text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Copyright (c) 2009-2015 Jendrik Seipp
#
# RedNotebook is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RedNotebook is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with RedNotebook; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------
# This is required for setup.py to be able to import this module.
import __builtin__
if not hasattr(__builtin__, '_'):
def _(string):
return string
try:
import argparse
except ImportError:
from rednotebook.external import argparse
version = '1.10.2'
author = 'Jendrik Seipp'
author_mail = 'jendrikseipp@web.de'
url = 'http://rednotebook.sourceforge.net'
answers_url = 'https://answers.launchpad.net/rednotebook'
translation_url = 'https://translations.launchpad.net/rednotebook/'
bug_url = 'https://bugs.launchpad.net/rednotebook/+filebug'
developers = [
'%(author)s <%(author_mail)s>' % locals(),
'',
'Contributors:',
'Alistair Marshall <thatscottishengineer@gmail.com>']
comments = '''\
RedNotebook is a graphical journal to keep track of notes and
thoughts. It includes calendar navigation, customizable
templates, export functionality and word clouds. You can also
format, tag and search your entries.
'''
license_text = '''\
Copyright (c) 2009-2015 Jendrik Seipp
RedNotebook is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
RedNotebook is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with RedNotebook; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
journal_path_help = '''\
(optional) Specify the directory storing the journal data.
The journal argument can be one of the following:
- An absolute path (e.g. /home/username/myjournal)
- A relative path (e.g. ../dir/myjournal)
- The name of a directory under $HOME/.rednotebook/ (e.g. myjournal)
If the journal argument is omitted then the last session's journal
path will be used. At the first program start, this defaults to
"$HOME/.rednotebook/data".
'''
def get_commandline_parser():
parser = argparse.ArgumentParser(
description=comments,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'--version', action='version', version='RedNotebook %s' % version)
parser.add_argument(
'--date', dest='start_date',
help='load specified date (format: YYYY-MM-DD)')
parser.add_argument('journal', nargs='?', help=journal_path_help)
return parser
tags = _('Tags')
todo = _('Todo')
done = _('Done')
rtm = _('Remember the milk')
dishes = _('Wash the dishes')
greeting = _('Hello!')
intro = _('Some example text has been added to help you start and '
'you can erase it whenever you like.')
# Translators: "Help" -> noun
help_par = _(
'The example text and more documentation is available under '
'"Help" -> "Contents".')
# Translators: noun
preview = _('Preview')
preview1 = _(
'There are two modes in RedNotebook, the __edit__ mode and the '
'__preview__ mode.')
preview2 = _('Click on Edit above to see the difference.')
preview_par = ' '.join([preview1, preview2])
tags1 = _('Tagging is easy.')
tags2 = _('Just use #hashtags like on twitter.')
tags_par = ' '.join([tags1, tags2])
example_entry = _(
'Today I went to the //pet shop// and bought a **tiger**. '
'Then we went to the --pool-- park and had a nice time playing '
'ultimate frisbee. Afterwards we watched "__Life of Brian__".')
templates = ('Templates')
temp1 = ('RedNotebook supports templates.')
temp2 = ('Click on the arrow next to the "Template" button to see some options.')
temp3 = ('''You can have one template for every day
of the week and unlimited arbitrarily named templates.''')
temp_par = ' '.join([temp1, temp2, temp3])
# Translators: both are verbs
save = _('Save and Export')
save1 = _(
'Everything you enter will be saved automatically at regular '
'intervals and when you exit the program.')
save2 = _('To avoid data loss you should backup your journal regularly.')
save3 = _('"Backup" in the "Journal" menu saves all your entered data in a zip file.')
save4 = _('In the "Journal" menu you also find the "Export" button.')
save5 = _('Click on "Export" and export your diary to Plain Text, PDF, HTML or Latex.')
save_par = ' '.join([save1, save2, save3, save4, save5])
error1 = _('If you encounter any errors, please drop me a note so I can fix them.')
error2 = _('Any feedback is appreciated.')
error_par = ' '.join([error1, error2])
goodbye_par = _('Have a nice day!')
completeWelcomeText = '''\
%(greeting)s %(intro)s %(help_par)s
=== %(preview)s ===
%(preview_par)s
=== %(tags)s ===
%(tags_par)s
=== %(save)s ===
%(save_par)s
%(error_par)s
%(goodbye_par)s''' % globals()
welcome_day = {'text': completeWelcomeText}
multiple_entries_text = _('''\
=== Multiple entries ===
You can add multiple entries to a single day by \
using different journals (one named "Work", the other "Family"), \
separating your entries with different titles (=== Work ===, === Family ===) \
and using horizontal separator lines (20 “=”s).''')
multiple_entries_example = _('''\
=== Work ===
Here goes the first entry. It is about #work.
====================
=== Family ===
Here comes the entry about my #family.''')
multiple_entries_day = {
'text':
multiple_entries_text + '\n\n' +
20 * '=' + '\n\n' +
multiple_entries_example}
example_content = [welcome_day, multiple_entries_day]
commandline_help = get_commandline_parser().format_help()
help_text = '''
== Layout ==
%(preview1)s
== Text ==
The main text field is the container for your normal diary entries like this one:
%(example_entry)s
== Format ==
As you see, the text can be formatted **bold**, //italic//, --struck
through-- and __underlined__. As a convenience there is also the
"Format" button, with which you can format the main text and tags.
A blank line starts a new **paragraph**, two backslashes \\\\ result in
a **newline**.
To see the result, click on the "Preview" button. You can also see how
this text was formatted by looking at its [source source.txt].
**Lists** can be created by using the following syntax, if you use "+"
instead of "-" you can create a **numbered list**:
```
- First Item
- Indented Item
- Do not forget two blank lines after a list
```
== Hashtags ==
%(tags_par)s
== Advanced tagging ==
Until #hashtags were introduced, you could only tag a day with the tag
panel on the right side, that is now hidden by default. Drag the slider
to the left to see it.
It provides an advanced tagging mechanism, allowing you to add a tags
with subtags like Movies->James Bond. Apart from the fact that you
can't have spaces in hashtags you can however achieve a similar effect
only with hashtags by adding #Movies and #James_Bond to the day's text.
Tags and subtags can be formatted **bold**, //italic//, etc.
== Images, Files and Links ==
RedNotebook lets you insert images, files and links into your entries.
To do so, select the appropriate option in the "Insert" pull-down menu
above the main text field. The text will be inserted at the current
cursor position. Note that currently, the things you insert are only
linked to, but not copied into your journal directory.
With the insert button you cannot insert **links to directories** on
your computer. Those can be inserted manually however (``[Home
""file:///home/""]`` becomes [Home ""file:///home/""]).
== %(templates)s ==
%(temp_par)s
The files 1.txt to 7.txt in the template directory correspond to the
templates for each day of the week. The current weekday's template will
be filled into the text area when you click on "Template". You can open
the template files from inside RedNotebook by opening the menu next to
the "Template" button.
== Search ==
On the left you find the search box. Double-clicking on a day in the
search results lets you jump to it.
You can search for text or dates (e.g. 2014, 2014-01, 2014-01-19).
== Clouds ==[clouds]
The most frequently used words will appear in the word cloud on the
left. Its contents are only refreshed when RedNotebook starts and when
the journal is saved.
If a word appears in the cloud that you don't want to see there,
right-click and select to hide it. Alternatively, you can open the
Preferences dialog and add the word to the cloud blacklist. Short words
with less than five letters can be white-listed there as well. [Regular
expressions http://docs.python.org/library/re.html] are allowed in the
lists. If you want to hide words with special characters, you can
escape them with a backslash: 3\\.50\\?
You can **hide the word cloud** by adding the regular expression .* to
the blacklist. This will filter out all words.
== Spellcheck ==
RedNotebook supports spellchecking your entries. On Linux this feature
needs the package ``python-gtkspell``. The feature can be turned on and
off by toggling the item under the "Edit" menu.
Since gtkspell 2.0.15, you can select the spellchecking language by
right-clicking on the main text area (in edit mode) and choosing it
from the submenu "Languages".
=== Adding custom dictionaries under Windows ===
We use the dictionaries available from the [openoffice extension
download site http://extensions.services.openoffice.org/dictionaries].
You need to download the appropriate language extension file(s) (files
are openoffice extensions *.oxt, which are just zip files that contain
additional data). Once you have downloaded a dictionary extension file,
you can rename it so it has a .zip extension and then extract the *.dic
and *.aff files in it to <RedNotebook Dir>\\share\\enchant\\myspell\\.
If RedNotebook is running, you need to restart it for new dictionaries
to be recognized.
== Options ==
Make sure you check out the customizable options in the preferences
dialog. You can open this dialog by clicking on the entry in the "Edit"
menu.
== Save ==
%(save1)s %(save2)s %(save3)s
== Export ==
%(save4)s %(save5)s
Since version 0.9.2 you can also directly export your journal to PDF.
If the option does not show up in the export assistant, you need to
install pywebkitgtk version 1.1.5 or later (the package is sometimes
called ``python-webkit``).
**Latex caveats**
Make sure to type all links with the full path including the protocol:
- http://www.wikipedia.org or http://wikipedia.org
(--wikipedia.org--, --"""www.wikipedia.org"""--)
- file:///home/sam/myfile.txt (--/home/sam/myfile.txt--)
== Synchronize across multiple computers ==[sync]
Syncing RedNotebook with a remote server is easy. You can either use a
cloud service like Dropbox or save your journal to your own server.
=== Dropbox ===
If you are registered for [Dropbox http://www.dropbox.com], you can
just save your journal in a subfolder of the respective synchronized
folder in your home directory.
=== Directly save to remote FTP or SSH server ===
With Linux you can have your journal directory on a remote server. To
use the feature you have to connect your computer to the remote server.
This is most easily done in Nautilus by clicking on "File" -> "Connect
to Server". Be sure to add a bookmark for the server. This way you can
see your server in Nautilus at all times on the left side. The next
time you open RedNotebook you will find your server in the "New",
"Open" and "Save As" dialogs. There you can select a new folder on the
server for your journal.
=== External sync with remote server ===
If you have your own server, you might want to try [Conduit
http://www.conduit-project.org] or [Unison
http://www.cis.upenn.edu/~bcpierce/unison]. To sync or backup your
journal you have to sync your journal folder (default is
"$HOME/.rednotebook/data/") with a folder on your server.
Obviously you have to be connected to the internet to use that feature.
Be sure to backup your data regularly if you plan to save your content
remotely. There are always more pitfalls when an internet connection is
involved.
=== Dual boot ===
Using RedNotebook from multiple operating systems on the same computer
is also possible. Save your journal with "Journal->Save As" in a
directory all systems can access. Then on the other systems you can
open the journal with "Journal->Open".
Optionally, you can also **share your settings** and templates. The
relevant setting is found in the file "rednotebook/files/default.cfg".
There you can set the value of ``userDir`` to the path where you want
to share your settings between the systems.
== Portable mode ==
RedNotebook can be run in portable mode. In this mode, the template
directory, the configuration and the log file are saved in the
application directory instead of in the home directory. Additionally,
the path to the last opened journal is remembered relatively to the
application directory.
To use RedNotebook on a flash drive on Windows, run the installer and
select a directory on your USB drive as the installation directory.
You probably don't need the "Start Menu Group" and Desktop icons in
portable mode.
To **activate portable mode**, change into the files/ directory and in
the default.cfg file set portable=1.
== Network drive ==
Unfortunately, you cannot add links to files on network shares directly
with the file selection dialog (this is due to a bug in GTK 2, it is
fixed in GTK 3, but RedNotebook still uses GTK 2 [bug on launchpad
""https://bugs.launchpad.net/ubuntu/+source/gtk+2.0/+bug/304345""]).
However, it is possible to enter links directly, for example ``[U:
""file:///U:/""]`` to reference the mapped drive letter [U
""file:///U:/""].
== Convert Latex output to PDF ==
In recent RedNotebook versions you can export your journal directly to
PDF, so this section may be obsolete. However, some people may prefer
to export their journal to Latex first and convert it to PDF later.
Here is how you do it:
=== Linux ===
For the conversion on Linux you need some extra packages:
texlive-latex-base and texlive-latex-recommended. Maybe you also need
texlive-latex-extra. Those contain the pdflatex program and are
available in the repositories of most Linux distros.
You can convert the .tex file by typing the following text in a command
line:
``pdflatex your-rednotebook-export.tex``
If you run into any problems during the conversion, the easiest way to
solve them is to install a latex editor (e.g. [Kile
http://kile.sourceforge.net]) and do the conversion with it. That way
you can see the errors right away and get rid of them by editing the
file.
=== Windows ===
You can open an exported Latex file with Texniccenter and convert it to
PDF with MikTex. Visit www.texniccenter.org and www.miktex.org for the
programs and instructions. Basically, you have to download both
programs, open the .tex file with Texniccenter and select "Build
Output" from the "Output" menu. The program will then create the PDF in
the same directory.
== Keyboard shortcuts ==
|| General | |
| Show help | <Ctrl> + H |
| Find | <Ctrl> + F |
| Export | <Ctrl> + E |
| Spellcheck | F7 |
| Fullscreen | F11 |
| New tag | <Ctrl> + N |
|| Navigation | |
| Go back one day | <Ctrl> + PageUp |
| Go forward one day | <Ctrl> + PageDown |
| Go to today | <Alt> + Home (Pos1) |
|| Insert | |
| Insert link | <Ctrl> + L |
| Insert date/time | <Ctrl> + D |
|| Format | |
| Bold | <Ctrl> + B |
| Italic | <Ctrl> + I |
| Monospace | <Ctrl> + M |
| Underline | <Ctrl> + I |
| Strikethrough | <Ctrl> + K |
| Remove format | <Ctrl> + R |
You can find more shortcuts in the menus in the main menu bar.
== Encryption ==
You can use e.g. [TrueCrypt http://www.truecrypt.org] to encrypt your
journal. The general idea is to create and mount an encrypted folder
with TrueCrypt and put your journal files in there.
In many Linux distributions it has become pretty easy to encrypt your
entire home partition. I would recommend to do that to anyone who
wishes to protect her/his diary and all other personal files. This
method is especially useful for laptop users, because their computers
are more likely to be stolen. If you encrypt your home partition all
RedNotebook data will be encrypted, too.
== Tips ==
%(multiple_entries_text)s
=== Todo list ===
You can also use RedNotebook as a todo list. An advantage is, that you
never have to explicitly state the date when you added the todo item,
you just add it on one day and it remains there until you delete it.
Here is how it works:
- Make sure the tag panel on the right is visible, if not drag the
slider to the left.
- On the right click on "Add Tag"
- Fill "%(todo)s" and "Remember the milk" in the fields and hit "OK"
- In the cloud on the left you can now click on "%(todo)s" and see all
your todo items
- This list can be sorted by day or by todo item if you click on "Date"
or "Text" in the header
- To tick off a todo item you can strike it out by adding "--" around
the item.
- To mark an item as important, add "**" around it.
So --%(rtm)s-- becomes struck through and **%(dishes)s** becomes bold.
Once you've finished an item, you could also change its tag name from
"%(todo)s" to "%(done)s".
=== Week numbers ===
If you'd like to see the week numbers in the calendar, you can set the
value of weekNumbers to 1 in the configuration file. This file normally
resides at $HOME/.rednotebook/configuration.cfg.
=== Language ===
If you want to change RedNotebook's language, setting the environment
variable LANG (Linux) or LANGUAGE (Windows) to a different language
code should be sufficient. Language codes have e.g. the format "de_DE"
or "de_DE.UTF-8" (German). To set the language to English you can also
set the code to "C". Before you change the language make sure you have
the required language packs installed. Otherwise an error will be
shown.
On **Linux**, start a terminal and call ``LANG=de_DE.utf8``. Then in the
same terminal, run ``rednotebook``. The language change will be gone
however once you close the terminal.
On **Windows**, set or create a LANGUAGE environment variable with the
desired code:
+ Right-click My Computer and click Properties.
+ In the System Properties window, click on the Advanced tab
(Windows XP) or go to Advanced System Settings (Windows 7).
+ In the Advanced section, click the Environment Variables button.
+ Click the New button and insert LANGUAGE at the top and e.g. de or
de_DE or de_DE.UTF-8 (use your [language code
""http://en.wikipedia.org/wiki/ISO_639-1""]).
=== Titles ===
You can insert titles into your post by adding "="s around your title
text. = My Title = is the biggest heading, ====== My Title ====== is
the smallest heading. A title line can only contain the title, nothing
else.
Numbered titles can be created by using "+" instead of "=". ""+ My
Title +"" produces a title like "1.", ++++++ My Title ++++++ produces a
title like 0.0.0.0.0.1
=== Insert HTML or Latex code ===
To insert custom code into your entries surround the code with single
quotes. Use 2 single quotes for inline insertions and 3 single quotes
if you want to insert a whole paragraph. For paragraphs be sure to put
the single quotes on their own line.
|| Text | Output |
| ``''<font color="red">Red</font>''`` | ''<font color="red">Red</font>'' |
| ``''$a^2$''`` | ''$a^2$'' (''a<sup>2</sup>'' in Latex) |
This feature can be used to insert e.g. Latex formulas:
```
\'''
$$\sum_{i=1}^{n} i =\frac{ncdot (n+1)}{2}$$
\'''
```
will produce a nice looking formula in the Latex export.
=== Verbatim text (Preserve format) ===
To insert preformatted text preserving newlines and spaces, you can
use the backquotes (`). Use 2 backquotes for inline insertions and 3
backquotes if you want to insert a whole paragraph.
For paragraphs be sure to put the backquotes on their own line.
Two examples (have a look at the [source source.txt] to see how it's
done):
To install rednotebook use ``sudo apt-get install rednotebook``.
```
class Robot(object):
def greet(self):
print 'Hello World'
robot = Robot()
robot.greet()
```
=== Unparsed text ===
Formatting commands inside two pairs of "" are not interpreted (""**not
bold**"").
=== Comments ===
Comments can be inserted after percent signs (**%%**). They will not be
shown in the preview and the exports. The %% has to be the first
character on the line.
=== List of all entries ===
To get a list of your entries search for "-". You can sort the
resulting list chronologically by pressing the "Date" button.
== Command line options ==
```
%(commandline_help)s
```
== Data format ==
The content of a RedNotebook journal is saved in a directory with many
files, not just one file. The directory name is used as a name for the
journal.
In the directory there are several files all conforming to the naming
scheme <year>-<month>.txt (e.g. 2010-05.txt). Obviously these files
correspond to months (e.g. May 2010).
Each month file contains plain text for the days of that month.
The text is actually [YAML www.yaml.org] markup. Without the
(unnecessary) python directives the files look like this:
```
24: {text: "This is a normal text entry."}
25:
Ideas: {"Invent Anti-Hangover machine": null}
text: "This is another text entry, shown in the main text area."
```
As you can see, the data format uses a dictionary (hashmap) for storing
the information. The outer dictionary has the day numbers as keys and
the day content as values. The day values consist of another
dictionary. It can have a key "text" whose value will be inserted in
the main content area. Additionally there can be multiple other keys
that stand for the categories that belong to that day. Each category
contains a dictionary mapping category entries to the null value.
In summary the data format is a hierarchy of dictionaries. This way the
format can be easily extended if the need for that arises.
All textual content can be formatted with [txt2tags
http://txt2tags.org] markup.
== Questions ==
If you have any questions or comments, feel free to post them on the
mailing list or contact me directly.
== Bugs ==
There is no software without bugs, so if you encounter one please drop
me a note. This way RedNotebook can get better, not only for you, but
for all users.
Bug reports should go [here https://bugs.launchpad.net/rednotebook],
but if you don't know how to use that site, a simple mail is equally
fine.
''' % globals()
desktop_file = '''\
[Desktop Entry]
Version=1.0
Name=RedNotebook
GenericName=Journal
Comment=Daily journal with calendar, templates and keyword searching
Exec=rednotebook
Icon=rednotebook
Terminal=false
Type=Application
Categories=Office;
StartupNotify=true
'''
|
Isendir/rednotebook
|
rednotebook/info.py
|
Python
|
gpl-2.0
| 24,256
|
[
"VisIt"
] |
36a963746485577bde3dbe67923718a6f453e0f98a03526ffc6e9ec92d3021e1
|
# -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
import os
import socket
import zipfile
import tempfile
import shutil
import itertools
import functools
from setuptools.extern import six
from setuptools.extern.six.moves import http_client, urllib
from pkg_resources import iter_entry_points
from .upload import upload
def _encode(s):
errors = 'strict' if six.PY2 else 'surrogateescape'
return s.encode('utf-8', errors)
class upload_docs(upload):
# override the default repository as upload_docs isn't
# supported by Warehouse (and won't be).
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
if 'pypi.python.org' in self.repository:
log.warn("Upload_docs command is deprecated. Use RTD instead.")
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
tmpl = "no files found in upload directory '%s'"
raise DistutilsOptionError(tmpl % self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
@staticmethod
def _build_part(item, sep_boundary):
key, values = item
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if isinstance(value, tuple):
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = _encode(value)
yield sep_boundary
yield _encode(title)
yield b"\n\n"
yield value
if value and value[-1:] == b'\r':
yield b'\n' # write an extra newline (lurve Macs)
@classmethod
def _build_multipart(cls, data):
"""
Build up the MIME payload for the POST data
"""
boundary = b'--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\n--' + boundary
end_boundary = sep_boundary + b'--'
end_items = end_boundary, b"\n",
builder = functools.partial(
cls._build_part,
sep_boundary=sep_boundary,
)
part_groups = map(builder, data.items())
parts = itertools.chain.from_iterable(part_groups)
body_items = itertools.chain(parts, end_items)
content_type = 'multipart/form-data; boundary=%s' % boundary.decode('ascii')
return b''.join(body_items), content_type
def upload_file(self, filename):
with open(filename, 'rb') as f:
content = f.read()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = _encode(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if not six.PY2:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
body, ct = self._build_multipart(data)
msg = "Submitting documentation to %s" % (self.repository)
self.announce(msg, log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urllib.parse.urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = http_client.HTTPConnection(netloc)
elif schema == 'https':
conn = http_client.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = ct
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
msg = 'Server response (%s): %s' % (r.status, r.reason)
self.announce(msg, log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
msg = 'Upload successful. Visit %s' % location
self.announce(msg, log.INFO)
else:
msg = 'Upload failed (%s): %s' % (r.status, r.reason)
self.announce(msg, log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
|
google/material-design-icons
|
update/venv/lib/python3.9/site-packages/setuptools/command/upload_docs.py
|
Python
|
apache-2.0
| 7,315
|
[
"VisIt"
] |
3b5dfb6cdf5db7fb042debdfe2fc2ec16464a201dfe1b3d773e8cd4715506a2c
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# mal
# ------------------------------------------------------------
# Proveedor de información de MyAnimeList mediante la API Jikan v3
# Utilizado para obtener datos de animes para la videoteca
# del addon, infoLabels y también para Kodi.
#
# Parte del código ha sido tomado de los scrapers existentes
# de TMDb y TVDb, crédito a quienes corresponda.
#
# ------------------------------------------------------------
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
from builtins import range
from builtins import object
import re
import time
import copy
from core import httptools
from core import scrapertools
from core.item import InfoLabels
from platformcode import config
from platformcode import logger
omal_global = None
def set_infoLabels(source, seek=True, include_hentai=False):
"""
Dependiendo del tipo de source obtiene y establece los datos extras
de animes o capítulos en los item.infoLabels
@param source: Variable que contiene la información para establecer infoLabels
@type source: list, item
@param seek: Si es True, busca datos adicionales en myanimelist.net-
En caso contrario, obtiene los datos del propio Item.
@type seek: bool
@param include_hentai: Determina si se incluirá hentai (contenido +18) en la búsqueda
@type include_hentai: bool
@return: un número o lista de números con el resultado de las llamadas a set_infoLabels_item
@rtype: int, list
"""
logger.info()
start_time = time.time()
if isinstance(source, list):
ret = set_infoLabels_itemlist(source, seek, include_hentai)
logger.debug("Se han obtenido los datos de {} enlaces en {} segundos".format(len(source), time.time() - start_time))
else:
ret = set_infoLabels_item(source, seek, include_hentai)
logger.debug("Se han obtenido los datos del enlace en {} segundos".format(time.time() - start_time))
return ret
def set_infoLabels_itemlist(item_list, seek=False, include_hentai=False):
"""
De manera concurrente, obtiene los datos de los items incluidos en la lista item_list.
La API no tiene un límite definido, pero se recomiendan 2 peticiones por IP por segundo
por lo que no se recomienda utillizar esta función para obtener datos generales de anime.
Esto es, obtener datos de 20 pelis/animes/ovas a la vez, debido a que los tiempos de espera
son equivalentes la mitad de items (ej. 20 items en 10 segundos) lo que alarga bastante los
tiempos de espera para listar.
@param item_list: listado de objetos Item que representan animes, temporadas o capítulos.
Las temporadas son tratadas como entradas/series individuales. El atributo infoLabels
de cada objeto Item sera modificado con los obtenidos de la web.
@type item_list: list
@param seek: Si es True hace una búsqueda en myanimelist.net para obtener los datos, en caso contrario
obtiene los datos del propio Item si existen.
@type seek: bool
@param include_hentai: Determina si se incluirá hentai (contenido +18) en la búsqueda (solo búsquedas por texto)
@type include_hentai: bool
@return: Una lista de números cuyo valor absoluto representa la cantidad de elementos incluidos en el
atributo infoLabels de cada Item. Este número sera positivo si se han obtenido los datos desde
myanimelist satisfactoriamente y negativo en caso contrario.
@rtype: list
"""
logger.info()
import threading
semaforo = threading.Semaphore(2)
lock = threading.Lock()
r_list = list()
i = 0
l_hilo = list()
def sub_thread(_item, _i, _seek, _include_hentai):
semaforo.acquire()
ret = set_infoLabels_item(_item, _seek, _include_hentai, lock)
semaforo.release()
r_list.append((_i, _item, ret))
for item in item_list:
t = threading.Thread(target=sub_thread, args=(item, i, seek, include_hentai))
t.start()
i += 1
l_hilo.append(t)
# esperar q todos los hilos terminen
for x in l_hilo:
x.join()
# Ordenar lista de resultados por orden de llamada para mantener el mismo orden q item_list
r_list.sort(key=lambda i: i[0])
# Reconstruir y devolver la lista solo con los resultados de las llamadas individuales
return [ii[2] for ii in r_list]
def set_infoLabels_item(item, seek=True, include_hentai=False, lock=None):
"""
Obtiene y fija (item.infoLabels) los datos extras de una serie, capitulo o pelicula.
@param item: Objeto Item que representa un pelicula, serie o capitulo. El atributo infoLabels sera modificado
incluyendo los datos extras localizados.
@type item: Item
@param seek: Si es True hace una búsqueda en myanimelist.net para obtener los datos, en caso contrario
obtiene los datos del propio Item si existen.
@type seek: bool
@param include_hentai: Determina si se incluirá hentai (contenido +18) en la búsqueda (solo búsquedas por texto)
@type include_hentai: bool
@param lock: Para adquisición de threads al llamarse desde set_infoLabels_itemlist.
@return: Una lista de números cuyo valor absoluto representa la cantidad de elementos incluidos en el
atributo infoLabels de cada Item. Este número sera positivo si se han obtenido los datos desde
myanimelist satisfactoriamente y negativo en caso contrario.
@rtype: list
"""
logger.info()
#===========================================================================#
# NOTA IMPORTANTE: El contentType debe ser uno de los soportados #
# por MyAnimeList para correcta asignación de infoLabels aquí #
# #
# Estos son: "ova", "ona", "special", "tv", "movie". #
# #
# También se puede llamar sin especificar alguno válido (o ninguno) #
# pero en esos casos se buscará sin contentType, pudiendo llevar #
# a búsquedas un poco imprecisas. Esto afecta particularmente #
# a las OVA/ONA que pueden ser tanto series como pelis #
# #
# Cabe notar, que el contentType será reasignado a movie/tv en #
# función de lo que se scrapee de MAL #
#===========================================================================#
global omal_global
tipos_busqueda_validos = ["ova", "ona", "special", "movie", "tv"]
if item.contentType in tipos_busqueda_validos:
tipo_busqueda = item.contentType
elif item.contentType in ["tvshow"]:
tipo_busqueda = "tv"
else:
tipo_busqueda = ""
if tipo_busqueda in ["tv"] or item.contentSerieName:
texto_buscado = item.contentSerieName
elif item.contentTitle:
texto_buscado = item.contentTitle
else:
texto_buscado = item.title
def __leer_datos(omal_aux):
item.infoLabels = omal_aux.get_infoLabels(item.infoLabels)
if item.infoLabels['thumbnail']:
item.thumbnail = item.infoLabels['thumbnail']
if item.infoLabels['fanart']:
item.fanart = item.infoLabels['fanart']
logger.info("seek: "+str(seek))
if seek:
# Estamos buscando datos de episodio o temporada
if 'season' in item.infoLabels.keys():
try:
numtemporada = int(item.infoLabels['season'])
except ValueError:
logger.debug("El numero de temporada no es valido")
return -1 * len(item.infoLabels)
# Bloqueamos el thread
if lock:
lock.acquire()
# Si aún no tenemos datos sobre el anime, buscamos
if not omal_global or (item.infoLabels['mal_id'] and str(omal_global.result.get("mal_id")) != item.infoLabels['mal_id']) \
or (omal_global.texto_buscado and omal_global.texto_buscado != item.infoLabels['tvshowtitle']):
if item.infoLabels.get('mal_id'):
omal_global = MAL(mal_id=item.infoLabels['mal_id'])
else:
omal_global = MAL(texto_buscado=texto_buscado, tipo=tipo_busqueda, year=item.infoLabels['year'], include_hentai=include_hentai)
__leer_datos(omal_global)
# Si hay nº de episodio en el item, estamos buscando + info. de episodios
if item.infoLabels.get('episode'):
try:
episode = int(item.infoLabels['episode'])
except ValueError:
logger.debug("El número de episodio (%s) no es valido" % repr(item.infoLabels['episode']))
return -1 * len(item.infoLabels)
# Tenemos número de temporada y episodio válidos
# Buscamos los datos del episodio
item.infoLabels['mediatype'] = 'episode'
episodio = omal_global.get_episodio(numtemporada, episode)
if episodio:
# Actualizar datos
__leer_datos(omal_global)
item.infoLabels['title'] = episodio['episodio_titulo']
if episodio['episodio_air_date']:
item.infoLabels['aired'] = episodio['episodio_air_date']
return len(item.infoLabels)
# Sino, estamos buscando datos de temporada. (Se pasa lo del anime porque datos de temporada pues no hay 🤷♂️)
else:
item.infoLabels['mediatype'] = 'season'
temporada = omal_global.get_temporada(numtemporada)
# Función heredada (tmdb)
# Si tenemos datos de temporada, los asignamos a infoLabels
if temporada:
__leer_datos(omal_global)
item.infoLabels['title'] = temporada['name']
if temporada['air_date']:
item.infoLabels['aired'] = temporada['air_date']
return len(item.infoLabels)
# Liberamos el thread
if lock and lock.locked():
lock.release()
# Estamos buscando datos generales de un anime
else:
# Bloqueamos el thread
if lock:
lock.acquire()
omal = copy.copy(omal_global)
# Búsqueda por ID de MyAnimeList
if item.infoLabels.get('mal_id'):
omal = MAL(mal_id=item.infoLabels['mal_id'])
# No hay ID de MAL; buscamos por título
if not item.infoLabels.get('mal_id'):
# Búsqueda por tipo (si hay)
if tipo_busqueda:
omal = MAL(texto_buscado=texto_buscado, tipo=tipo_busqueda, year=item.infoLabels['year'], include_hentai=include_hentai)
# Búsqueda genérica (adivinemos)
else:
omal = MAL(texto_buscado=texto_buscado, year=item.infoLabels.get('year', ''), include_hentai=include_hentai)
if lock and lock.locked():
lock.release()
# Si hay resultado de búsqueda válido (hay ID de MAL), procesamos y retornamos de éxito
if omal is not None and omal.get_id():
__leer_datos(omal)
return len(item.infoLabels)
# Liberamos el thread
if lock and lock.locked():
lock.release()
# La búsqueda falló en alguna parte. Esto no es común en MAL, verificar por si acaso.
# item.contentType = item.infoLabels['mediatype']
return -1 * len(item.infoLabels)
def find_and_set_infoLabels(item):
logger.info()
global omal_global
mal_result = None
tipos_busqueda_validos = ["ova", "ona", "special", "movie", "tv"]
if item.contentType in tipos_busqueda_validos:
tipo_busqueda = item.contentType
elif item.contentType in ["tvshow"]:
tipo_busqueda = "tv"
else:
tipo_busqueda = ""
if tipo_busqueda in ["movie", "special"] or item.contentTitle:
tipo_contenido = config.get_localized_string(70283)
title = item.contentTitle
elif tipo_busqueda in ["tv"] or item.contentSerieName:
tipo_contenido = config.get_localized_string(60245)
title = item.contentSerieName
else:
tipo_contenido = ""
title = item.title
# Si el titulo incluye el (año) se lo quitamos
year = scrapertools.find_single_match(title, "^.+?\s*(\(\d{4}\))$")
if year:
title = title.replace(year, "").strip()
item.infoLabels['year'] = year[1:-1]
# Si no tenemos ID de MAL, buscamos por texto
if not item.infoLabels.get("mal_id"):
omal_global = MAL(texto_buscado=title, tipo=tipo_busqueda, year=item.infoLabels['year'])
# Si hay ID de MAL pero no se ha buscado o el ID de MAL no coincide con el del resultado, buscamos por ID
elif not omal_global or str(omal_global.result.get("mal_id")) != item.infoLabels['mal_id']:
omal_global = MAL(id_mal=item.infoLabels['mal_id'])
results = omal_global.get_results_list()
# Si hay más de un resultado, preguntamos cuál es el correcto
# Esta acción ocurrirá siempre que no se provea un mal_id (por el contenido relacionado que devuelve)
if len(results) > 1:
from platformcode import platformtools
mal_result = platformtools.show_video_info(results, item=item, caption=config.get_localized_string(60247) % (title, tipo_contenido))
# Si solo hay un resultado, lo seleccionamos
elif len(results) > 0:
mal_result = results[0]
# Comprobaciones
if isinstance(item.infoLabels, InfoLabels):
infoLabels = item.infoLabels
else:
infoLabels = InfoLabels()
if mal_result:
infoLabels['mal_id'] = mal_result['mal_id']
item.infoLabels = infoLabels
set_infoLabels_item(item)
return True
else:
item.infoLabels = infoLabels
return False
# def get_nfo(item):
# """
# Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi,
# @param item: elemento que contiene los datos necesarios para generar la info
# @type item: Item
# @rtype: str
# @return:
# """
# if "season" in item.infoLabels and "episode" in item.infoLabels:
# info_nfo = "http://thetvdb.com/?tab=episode&seriesid=%s&seasonid=%s&id=%s\n" \
# % (item.infoLabels['tvdb_id'], item.season_id, item.episode_id)
# else:
# info_nfo = ', '.join(item.infoLabels['url_scraper']) + "\n"
# return info_nfo
def completar_codigos(item):
"""
Si es necesario comprueba si existen identificadores externos y si no existen los busca
@param item: tipo item
@type item: Item
"""
# if not item.infoLabels['tmdb_id']:
# listsources = [(item.infoLabels['tvdb_id'], "tvdb_id")]
# if item.infoLabels['imdb_id']:
# listsources.append((item.infoLabels['imdb_id'], "imdb_id"))
# from core.tmdb import Tmdb
# ob = Tmdb()
# for external_id, external_source in listsources:
# ob.search_by_id(id=external_id, source=external_source, tipo='tv')
# item.infoLabels['tmdb_id'] = ob.get_id()
# if item.infoLabels['tmdb_id']:
# url_scraper = "https://www.themoviedb.org/tv/%s" % item.infoLabels['tmdb_id']
# item.infoLabels['url_scraper'].append(url_scraper)
# break
pass
# ---------------------------------------------------------------------------------------------------------------
# class MAL:
# Scraper de anime para el addon basado en el Api de https://jikan.moe/ (API no oficial de MyAnimeList)
# version 0.1:
# - Liberación inicial
#
#
# Usos:
# Método constructor:
# MAL(texto_buscado)
# Parámetros:
# texto_buscado:(str) Texto o parte del texto a buscar
# (opcional) tipo: ("ova", "ona", "special", "tv", "movie", "music") El tipo de contenido. Por defecto "tv"
# (opcional) year: (str) Año entre el que buscar (toma prioridad sobre fecha_inicial).
# (opcional) fecha_inicial: (str) Buscar con esta fecha inicial en formato yyyy-mm-dd.
# (opcional) fecha_final: (str) Buscar con esta fecha final en formato yyyy-mm-dd.
# (opcional) estado: ("airing", "completed", "to_be_aired") Estado de transmisión.
# (opcional) page: (int) Cuando hay muchos resultados para una búsqueda, estos se organizan por páginas.
# Podemos cargar la página que deseemos, aunque por defecto siempre es la primera.
# (opcional) include_hentai: (bool) Si se incluye contenido +18 (hentai) en los resultados. Solo aplica para búsquedas por texto.
# (opcional) orden: ("title", "start_date", "end_date", "type", "id", "episodes", "score") Orden de los resultados (si hay más de 1).
# (opcional) direccion: ("asc", "desc") Dirección (ascendente o descendente) para los resultados (si hay más de 1).
#
# Return:
# Devuelve un objeto MAL con la primera página del resultado de la búsqueda de 'texto_buscado'
# en myanimelist.net. Cuantos más parámetros opcionales se incluyan, mas precisa sera la búsqueda.
# Además, el objeto está inicializado con el primer resultado de la primera página de resultados.
#
# MAL(mal_id=ID)
# Parametros:
# ID: (int o str) Identificador de un anime en MyAnimeList
# Return:
# Esta llamada devuelve un objeto MAL con el resultado asociado al identificador
# correspondiente de MyAnimeList, o un resultado vacío si no existe
#
#
# Metodos principales:
# get_id(): Retorna un str con el identificador de MAL asociado al anime o una cadena vacía si no hubiese nada cargado
# get_temporada(temporada): Obtiene un diccionario con datos especificos de la temporada.
# get_episodio(temporada, capitulo): Obtiene un diccionario con datos especificos del episodio.
# get_generos(): Retorna un str con la lista de géneros asociados al anime.
#
#
# Otros metodos:
# load_resultado(resultado, page): Cuando la busqueda devuelve varios resultados podemos seleccionar que resultado
# concreto y de que pagina cargar los datos.
#
# Limitaciones:
# Se recomienda no exceder 2 peticiones por segundo (1 cada 0.5s) para evitar un bloqueo
# Informacion sobre la api : https://jikan.docs.apiary.io
# -------------------------------------------------------------------------------------------------------------------
class MAL(object):
def __init__(self, **kwargs):
# Variables iniciales
self.page = kwargs.get('page', 1)
self.orden = kwargs.get('orden', '')
self.direccion_orden = kwargs.get('direccion', '')
self.results = []
self.result = {}
self.list_episodes = {}
self.episodes = {}
self.temporada = {}
self.busqueda_id = str(kwargs.get('mal_id', ''))
self.busqueda_texto = re.sub('\[\\\?(B|I|COLOR)\s?[^\]]*\]', '', kwargs.get('texto_buscado', '')).strip() # Limpiamos el texto
self.busqueda_tipo = kwargs.get('tipo', '')
self.busqueda_fecha_inicio = scrapertools.find_single_match(kwargs.get('fecha_inicial', ''), '\d{4}-[0-1][0-9]-\d{2}') # Validamos la fecha estilo yyyy-mm-dd
self.busqueda_fecha_fin = scrapertools.find_single_match(kwargs.get('fecha_final', ''), '\d{4}-[0-1][0-9]-\d{2}') # Validamos la fecha estilo yyyy-mm-dd
self.busqueda_hentai = kwargs.get("include_hentai", False)
if kwargs.get('year', ''):
self.busqueda_fecha_inicio = "{}-01-01".format(kwargs.get('year', ''))
self.direccion_orden ='desc'
self.busqueda_estado = kwargs.get('estado', '')
# Si hay, buscamos por id de MAL
if self.busqueda_id:
self.__by_id()
# Sino, buscamos por texto
elif self.busqueda_texto:
self.__search(page=self.page)
# Si no hay resultados de la búsqueda...
if not self.result:
if self.busqueda_id:
msg = config.get_localized_string(70266) % ("{} en MyAnimeList".format(self.busqueda_id))
else:
msg = config.get_localized_string(70266) % ("'{}' en MyAnimeList".format(self.busqueda_texto))
logger.debug(msg)
@staticmethod
def get_json(url):
try:
result = httptools.downloadpage(url, cookies=False, ignore_response_code=True, hide_infobox=True)
res_headers = result.headers
dict_data = result.json
# Pasamos solo los resultados sin paginación
if "results" in dict_data and isinstance(dict_data.get("results"), list) or isinstance(dict_data.get("results"), dict):
dict_data = dict_data["results"]
# Pasamos solo los episodios sin tags adicionales
elif "episodes" in dict_data and isinstance(dict_data.get("episodes"), list) or isinstance(dict_data.get("episodes"), dict):
dict_data = dict_data["episodes"]
# Pasamos tal cual el dict e informamos de error en el log
elif "error" in dict_data:
logger.debug("\nError de MAL: {} {}".format(dict_data["error"], dict_data["message"]))
except Exception as ex:
# Hubo un error al obtener los datos, hay que ver si hubo cambios
message = "An exception of type {} occured. Arguments:\n{}".format(type(ex).__name__, repr(ex.args))
logger.error("Error en: {}".format(message))
dict_data = {}
return dict_data
def __search(self, index_results=0, page=1):
"""
Busca una anime dados ciertos parámetros.
@param name: nombre a buscar
@type name: str
@param status: estado de emisión
@type status: str
@param type: tipo de contenido (ej. ova, ona, special, tv, movie, music)
@type type: str
@param start_date: fecha de primera emisión al formato yyyy-mm-dd
@type start_date: str
@param end_date: fecha de última emisión al formato yyyy-mm-dd
@type end_date: str
@param mal_id: id de MyAnimeList (si se conoce)
@type mal_id: str
"""
logger.info()
result = {}
# Buscamos según la información proporcionada
if self.busqueda_texto:
url = 'https://api.jikan.moe/v3/search/anime?q={}'.format(self.busqueda_texto)
# Desactivado da mejores resultados
# if self.busqueda_tipo == "movie":
# url += '&type={}'.format(self.busqueda_tipo)
if self.busqueda_hentai:
url += '&rated=rx'
if self.busqueda_estado:
url += '&status={}'.format(self.busqueda_estado)
# Dan problemas en ocasiones
# if self.busqueda_fecha_inicio:
# url += '&start_date={}'.format(self.busqueda_fecha_inicio)
# if self.busqueda_fecha_fin:
# url += '&end_date={}'.format(self.busqueda_fecha_fin)
# if self.orden:
# url += '&order_by={}'.format(self.orden)
# if self.direccion_orden:
# url += '&sort={}'.format(self.direccion_orden)
if page > 1:
url += '&page={}'.format(page)
logger.debug("[Mal.py] Buscando {}:\n{}".format(self.busqueda_texto, url))
results = self.get_json(url)
if isinstance(results, list) or not (isinstance(results, dict) and results.get("error")):
if index_results >= len(results):
# Se ha solicitado un índice de resultado mayor de los que se obtuvieron
logger.error("La busqueda de '{}' dio {} resultados para la pagina {}\nImposible mostrar el resultado numero {}".format(
self.busqueda_texto, len(results), page, index_results))
return 0
# Devolvemos el número de resultados de esta página
self.results = results
self.result = self.results[index_results]
self.busqueda_id = self.result["mal_id"]
return len(self.results)
else:
# Hubo errores al consultar, verificar la consulta
msg = "La busqueda de '{}' no dio resultados para la pagina {}".format(self.busqueda_texto, page)
logger.error(msg)
return 0
def __by_id(self, mal_id=None):
"""
Busca una anime según su ID de MyAnimeList.
@param mal_id: id de MyAnimeList
@type mal_id: str
"""
logger.info()
if not mal_id and self.busqueda_id:
mal_id = self.busqueda_id
# Si hay id,solicitamos los datos asociados directamente
if mal_id:
url = 'https://api.jikan.moe/v3/anime/{}'.format(mal_id)
buscando = "mal_id: {}".format(mal_id)
logger.debug("[Mal.py] Buscando {}:\n{}".format(buscando, url))
result = self.get_json(url)
# Se obtuvo el resultado sin errores
if result and (isinstance(result, list) or isinstance(result, dict)) and not "error" in result:
self.results = [result]
self.total_results = 1
self.total_pages = 1
self.result = result
# El ID NO existe o hubo errores al procesar
else:
msg = "La busqueda de {} no dio resultados.".format(mal_id)
logger.debug(msg)
def get_id(self):
"""
:return:
Devuelve el identificador de MyAnimeList o una cadena vacia en caso de que no hubiese
nada cargado. Se puede utilizar este método para saber si una búsqueda ha dado resultado.
:rtype: str
"""
logger.info()
return str(self.result.get('mal_id', ''))
def get_results_list(self, num_result=25):
logger.info()
result_list = []
# Limitamos los resultados si son más del máximo establecido
if num_result <= 0:
num_result = self.total_results
num_result = min([num_result, len(self.results)])
for res in self.results:
result = res
result.update({"type": self.get_contentType(res),
"title": res.get("title", ""),
"original_title": res.get("title_japanese", ""),
"date": self.get_airdate(res),
"plot": res.get("synopsis", ""),
"thumbnail": res.get("image_url", "")})
result_list.append(result)
return result_list
def get_contentType(self, result=None):
"""
Obtiene el contentType de un anime basado en ciertos criterios
"""
if not result:
result = self.result
if result["type"] == "TV" or result["episodes"] > 1:
contentType = "tv"
else:
contentType = "movie"
return contentType
def get_airdate(self, origen=None):
"""
Obtiene la fecha de lanzamiento según la información del resultado
"""
if not origen:
origen = self.result
if origen.get("start_date"):
date = scrapertools.find_multiple_matches(origen.get("start_date"), "(\d{4}).(\d{2}).(\d{2})")
if len(date) > 0:
date = date[0]
air_date = "{}/{}/{}".format(date[0], date[1], date[2])
elif origen.get("airing") and not isinstance(origen.get("airing"), bool):
try:
start_date = origen["airing"].get('prop', {}).get('from', {})
air_date = '{}/{}/{}'.format(start_date.get('year'), start_date.get('month'), start_date.get('day'))
except:
logger.error("airing: {}".format(origen['airing']))
air_date = ""
else:
air_date = ""
return air_date
def get_generos(self, origen=None):
"""
Función de relleno que acomoda los géneros. Se podría localizar/traducir después...
"""
generos = ""
if origen is None:
origen = self.result
if "genres" in origen:
", ".join(i["name"] for i in origen["genres"])
return generos
def get_temporada(self, numtemporada=1):
"""
Devuelve una temporada concreta. Función de "placebo" porque MAL no clasifica temporadas (pone todos los episodios "de jalón")
NOTA: MAL clasifica temporadas diferentes como elementos diferentes, por lo que si hay algún anime que se categoriza
como el mismo pero 2da temporada, habrá problemas si no se busca con el nombre de temporada (resultados inexactos)
Devuelve un dict con el mismo nº de temporada y obtiene los episodios del anime (para uso con get_episodio)
El parámetro numtemporada se ignora y se mantiene por compatibilidad (aunque se valida por si acaso)
"""
logger.info()
# Si no hay id o no es serie, devolvemos un dict vacío
if not self.busqueda_id or self.busqueda_tipo == "movie":
return {}
numtemporada = int(numtemporada)
# if numtemporada > 1:
# if not self.result:
# self.__by_id()
# self.busqueda_texto = "{} season {}".format(self.result["title"], numtemporada)
# self.__search()
# Si aún no se han obtenido los episodios, los obtenemos
if not self.temporada.get(numtemporada, {}):
air_date = self.get_airdate(self.result)
self.temporada[numtemporada] = {"season": numtemporada, "name": config.get_localized_string(60027) % numtemporada, "air_date": air_date}
url = 'https://api.jikan.moe/v3/anime/{}/episodes'.format(self.busqueda_id)
buscando = "id_MAL: {} temporada: {}\nURL: {}".format(self.busqueda_id, numtemporada, url)
logger.debug("[Mal.py] Buscando {}".format(buscando))
episodios = self.get_json(url)
if episodios.get("error"):
self.temporada[numtemporada].update(episodios)
else:
self.temporada[numtemporada]["episodes"] = episodios
return {"season": numtemporada}
def get_episodio(self, numtemporada=1, capitulo=1):
"""
Parámetros:
numtemporada(no utilizar): (int) Número de temporada. Por defecto 1.
capitulo: (int) Número de capítulo. Por defecto 1.
Return: (dic)
Devuelve un dicionario con los siguientes datos:
"episodio_titulo", "episodio_titulo_original", "episodio_air_date"
"""
# Si no hay id o no es serie, salimos
if not self.result["mal_id"] or self.busqueda_tipo != "tv":
return {}
# Validamos nº de capítulo
try:
capitulo = int(capitulo)
except ValueError:
logger.debug("El número de episodio no es válido")
return {}
# Obtenemos la temporada
temporada = self.get_temporada(numtemporada)
if not temporada:
# No se pudo obtener la temporada, salimos
return {}
# Obtenemos los datos del capítulo
if capitulo != -1:
episodio = temporada["episodes"][capitulo - 1]
ret_dic["episodio_titulo"] = episodio.get("title", "")
ret_dic["episodio_titulo_original"] = episodio.get("title_japanese", "")
# No hay reseñas en la API que usamos 🙁
# ret_dic["episodio_sinopsis"] = episodio["synopsis"]
date = scrapertools.find_single_match(episodio["aired"], "\d{4}-\d{2}-\d{2}")
ret_dic["episodio_air_date"] = date[2] + "/" + date[1] + "/" + date[0]
return ret_dic
def get_infoLabels(self, infoLabels=None, origen=None):
"""
:param infoLabels: Informacion extra del anime
:type infoLabels: Dict
:param origen: Diccionario desde donde se obtienen los infoLabels, por defecto self.result
:type origen: Dict
:return: Devuelve la información extra obtenida del objeto actual. Si se paso el parametro infoLabels, el valor
devuelto sera el leído como parámetro debidamente actualizado.
:rtype: Dict
"""
logger.info()
if not "request_hash" in self.result:
self.__by_id()
if infoLabels:
ret_infoLabels = InfoLabels(infoLabels)
else:
ret_infoLabels = InfoLabels()
if not origen:
origen = self.result
items = list(origen.items())
# Información de temporada/episodio
if isinstance(ret_infoLabels.get('season'), int):
if self.temporada.get(ret_infoLabels['season']):
# Si hay datos cargados de la temporada indicada
episodio = -1
if ret_infoLabels.get('episode'):
episodio = ret_infoLabels['episode']
items.extend(list(self.get_episodio(ret_infoLabels['season'], episodio).items()))
for key, value in items:
if not value:
continue
elif isinstance(value, str):
value = re.sub(r"\n|\r|\t", "", value)
# fix
if value == "None":
continue
if key == 'synopsis':
ret_infoLabels['plot'] = value
elif key == 'type':
ret_infoLabels['mediatype'] = self.get_contentType(origen)
ret_infoLabels['type'] = value
if ret_infoLabels['mediatype'] == "movie":
ret_infoLabels.pop("tvshowtitle", "")
elif key == 'duration':
duration = 0
time = scrapertools.find_single_match(value, "(?:(\d+).hr.+?|)(\d+).min")
if time[0]:
duration += int(time[0] * 60)
duration += int(time[1])
ret_infoLabels['duration'] = int(duration * 60)
elif key in ['aired', "start_date"]:
ret_infoLabels['year'] = self.get_airdate(origen).split("/")[0]
ret_infoLabels['release_date'] = self.get_airdate(origen)
ret_infoLabels['aired'] = ret_infoLabels['release_date']
ret_infoLabels['premiered'] = ret_infoLabels['release_date']
elif key == 'image_url':
ret_infoLabels['thumbnail'] = value
elif key == 'background' and value:
ret_infoLabels['fanart'] = value
elif key == 'mal_id':
ret_infoLabels['code'] = value
ret_infoLabels['mal_id'] = value
ret_infoLabels['id'] = value
elif key == 'genres':
ret_infoLabels['genre'] = self.get_generos(origen)
elif key == 'name' or key == 'title':
ret_infoLabels['title'] = value
elif key == 'studios':
ret_infoLabels['studio'] = ", ".join(i['name'] for i in value)
elif key == 'trailer_url':
ret_infoLabels['trailer'] = value
elif key in ['title_japanese']:
ret_infoLabels['originaltitle'] = value
elif key == 'score':
ret_infoLabels['rating'] = float(value)
elif key == 'scored_by':
ret_infoLabels['votes'] = value
elif isinstance(value, str) or isinstance(value, int) or isinstance(value, float):
ret_infoLabels[key] = value
else:
# logger.debug("Atributos no añadidos: " + key +'= '+ str(v))
pass
if ret_infoLabels["mediatype"] == "movie" and "episodes" in ret_infoLabels:
ret_infoLabels.pop("episodes", "")
return ret_infoLabels
|
alfa-addon/addon
|
plugin.video.alfa/core/mal.py
|
Python
|
gpl-3.0
| 37,049
|
[
"MOE"
] |
af104003b940046a43ba3df4c59a8c96c915b81616227645811d99dca12e1cdf
|
""" Expose the model metadata module as a datatype module also,
allowing it to live in galaxy.model means the model module doesn't
have any dependencies on th datatypes module. This module will need
to remain here for datatypes living in the tool shed so we might as
well keep and use this interface from the datatypes module.
"""
from galaxy.model.metadata import (
Statement,
MetadataElement,
MetadataCollection,
MetadataSpecCollection,
MetadataParameter,
MetadataElementSpec,
SelectParameter,
DBKeyParameter,
RangeParameter,
ColumnParameter,
ColumnTypesParameter,
ListParameter,
DictParameter,
PythonObjectParameter,
FileParameter,
MetadataTempFile,
JobExternalOutputMetadataWrapper,
)
__all__ = [
"Statement",
"MetadataElement",
"MetadataCollection",
"MetadataSpecCollection",
"MetadataParameter",
"MetadataElementSpec",
"SelectParameter",
"DBKeyParameter",
"RangeParameter",
"ColumnParameter",
"ColumnTypesParameter",
"ListParameter",
"DictParameter",
"PythonObjectParameter",
"FileParameter",
"MetadataTempFile",
"JobExternalOutputMetadataWrapper",
]
|
icaoberg/cellorganizer-galaxy-tools
|
datatypes/metadata.py
|
Python
|
gpl-3.0
| 1,195
|
[
"Galaxy"
] |
65abdf42773a2b50ceb984ced62a37188cb07d9a4793982942bbe2d7b0542f89
|
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import pyglet
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
from cocos.particle_systems import *
class L(Layer):
def __init__(self):
super( L, self).__init__()
# p = Fireworks()
# p = Explosion()
# p = Fire()
# p = Flower()
p = Smoke()
# p = Sun()
# p = Spiral()
# p = Meteor()
# p = Galaxy()
p.position = (320,100)
self.add( p )
if __name__ == "__main__":
director.init( resizable=True )
main_scene = cocos.scene.Scene()
main_scene.add( ColorLayer(0,0,0,255), z=0 )
main_scene.add( L(), z=1 )
director.run( main_scene )
|
adamwiggins/cocos2d
|
test/test_particle_smoke.py
|
Python
|
bsd-3-clause
| 858
|
[
"Galaxy"
] |
d8bb0c28f35d6d28ec738c1329815128ae0803f51155d88cfa2f78ef3c818ed3
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_Stiffness_Variable_Velocity/with_padding_1.2s/')
from data_padding_hshv_1dot2s import Fmat_original_hshv
from data_padding_hslv_1dot2s import Fmat_original_hslv
from data_padding_lshv_1dot2s import Fmat_original_lshv
from data_padding_lslv_1dot2s import Fmat_original_lslv
# Returns mu,sigma for 20 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((20,1)))
sigma = np.matrix(np.zeros((20,1)))
DIVS = m/20
while (index < 20):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((20,n)))
DIVS = m/20
for i in range(n):
index = 0
while (index < 20):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
# HMM - Implementation:
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# pi - initial probabilities per state
pi = [0.05] * 20
# Confusion Matrix
cmat = np.zeros((4,4))
#############################################################################################################################################
# HSHV as testing set and Rest as training set
# Checking the Data-Matrix
mu_rf_hshv,sigma_rf_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:121,0:15], Fmat_original_lshv[0:121,0:15], Fmat_original_lslv[0:121,0:15]))))
mu_rm_hshv,sigma_rm_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:121,15:30], Fmat_original_lshv[0:121,15:30], Fmat_original_lslv[0:121,15:30]))))
mu_sf_hshv,sigma_sf_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:121,30:45], Fmat_original_lshv[0:121,30:45], Fmat_original_lslv[0:121,30:45]))))
mu_sm_hshv,sigma_sm_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:121,45:60], Fmat_original_lshv[0:121,45:60], Fmat_original_lslv[0:121,45:60]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hshv = np.zeros((20,2))
B_rm_hshv = np.zeros((20,2))
B_sf_hshv = np.zeros((20,2))
B_sm_hshv = np.zeros((20,2))
for num_states in range(20):
B_rf_hshv[num_states,0] = mu_rf_hshv[num_states]
B_rf_hshv[num_states,1] = sigma_rf_hshv[num_states]
B_rm_hshv[num_states,0] = mu_rm_hshv[num_states]
B_rm_hshv[num_states,1] = sigma_rm_hshv[num_states]
B_sf_hshv[num_states,0] = mu_sf_hshv[num_states]
B_sf_hshv[num_states,1] = sigma_sf_hshv[num_states]
B_sm_hshv[num_states,0] = mu_sm_hshv[num_states]
B_sm_hshv[num_states,1] = sigma_sm_hshv[num_states]
B_rf_hshv = B_rf_hshv.tolist()
B_rm_hshv = B_rm_hshv.tolist()
B_sf_hshv = B_sf_hshv.tolist()
B_sm_hshv = B_sm_hshv.tolist()
# generate RF, RM, SF, SM models from parameters
model_rf_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_hshv, pi) # Will be Trained
model_rm_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_hshv, pi) # Will be Trained
model_sf_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_hshv, pi) # Will be Trained
model_sm_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_hshv, pi) # Will be Trained
# For Training
total_seq_rf_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:121,0:15], Fmat_original_lshv[0:121,0:15], Fmat_original_lslv[0:121,0:15])))
total_seq_rm_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:121,15:30], Fmat_original_lshv[0:121,15:30], Fmat_original_lslv[0:121,15:30])))
total_seq_sf_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:121,30:45], Fmat_original_lshv[0:121,30:45], Fmat_original_lslv[0:121,30:45])))
total_seq_sm_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:121,45:60], Fmat_original_lshv[0:121,45:60], Fmat_original_lslv[0:121,45:60])))
train_seq_rf_hshv = (np.array(total_seq_rf_hshv).T).tolist()
train_seq_rm_hshv = (np.array(total_seq_rm_hshv).T).tolist()
train_seq_sf_hshv = (np.array(total_seq_sf_hshv).T).tolist()
train_seq_sm_hshv = (np.array(total_seq_sm_hshv).T).tolist()
#print train_seq_rf_hshv
final_ts_rf_hshv = ghmm.SequenceSet(F,train_seq_rf_hshv)
final_ts_rm_hshv = ghmm.SequenceSet(F,train_seq_rm_hshv)
final_ts_sf_hshv = ghmm.SequenceSet(F,train_seq_sf_hshv)
final_ts_sm_hshv = ghmm.SequenceSet(F,train_seq_sm_hshv)
model_rf_hshv.baumWelch(final_ts_rf_hshv)
model_rm_hshv.baumWelch(final_ts_rm_hshv)
model_sf_hshv.baumWelch(final_ts_sf_hshv)
model_sm_hshv.baumWelch(final_ts_sm_hshv)
# For Testing
total_seq_obj_hshv = Fmat_original_hshv[0:121,:]
rf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
rm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
k = 0
while (k < np.size(total_seq_obj_hshv,1)):
test_seq_obj_hshv = (np.array(total_seq_obj_hshv[0:121,k]).T).tolist()
new_test_seq_obj_hshv = np.array(sum(test_seq_obj_hshv,[]))
#print new_test_seq_obj_hshv
ts_obj_hshv = new_test_seq_obj_hshv
#print np.shape(ts_obj_hshv)
final_ts_obj_hshv = ghmm.EmissionSequence(F,ts_obj_hshv.tolist())
# Find Viterbi Path
path_rf_obj_hshv = model_rf_hshv.viterbi(final_ts_obj_hshv)
path_rm_obj_hshv = model_rm_hshv.viterbi(final_ts_obj_hshv)
path_sf_obj_hshv = model_sf_hshv.viterbi(final_ts_obj_hshv)
path_sm_obj_hshv = model_sm_hshv.viterbi(final_ts_obj_hshv)
obj_hshv = max(path_rf_obj_hshv[1],path_rm_obj_hshv[1],path_sf_obj_hshv[1],path_sm_obj_hshv[1])
if obj_hshv == path_rf_obj_hshv[1]:
rf_hshv[0,k] = 1
elif obj_hshv == path_rm_obj_hshv[1]:
rm_hshv[0,k] = 1
elif obj_hshv == path_sf_obj_hshv[1]:
sf_hshv[0,k] = 1
else:
sm_hshv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hshv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hshv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hshv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_hshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hshv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hshv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hshv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_hshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hshv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hshv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hshv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_hshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hshv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hshv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hshv[0,45:60])
#print cmat
#############################################################################################################################################
# HSLV as testing set and Rest as training set
mu_rf_hslv,sigma_rf_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,0:15], Fmat_original_lshv[0:121,0:15], Fmat_original_lslv[0:121,0:15]))))
mu_rm_hslv,sigma_rm_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,15:30], Fmat_original_lshv[0:121,15:30], Fmat_original_lslv[0:121,15:30]))))
mu_sf_hslv,sigma_sf_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,30:45], Fmat_original_lshv[0:121,30:45], Fmat_original_lslv[0:121,30:45]))))
mu_sm_hslv,sigma_sm_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,45:60], Fmat_original_lshv[0:121,45:60], Fmat_original_lslv[0:121,45:60]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hslv = np.zeros((20,2))
B_rm_hslv = np.zeros((20,2))
B_sf_hslv = np.zeros((20,2))
B_sm_hslv = np.zeros((20,2))
for num_states in range(20):
B_rf_hslv[num_states,0] = mu_rf_hslv[num_states]
B_rf_hslv[num_states,1] = sigma_rf_hslv[num_states]
B_rm_hslv[num_states,0] = mu_rm_hslv[num_states]
B_rm_hslv[num_states,1] = sigma_rm_hslv[num_states]
B_sf_hslv[num_states,0] = mu_sf_hslv[num_states]
B_sf_hslv[num_states,1] = sigma_sf_hslv[num_states]
B_sm_hslv[num_states,0] = mu_sm_hslv[num_states]
B_sm_hslv[num_states,1] = sigma_sm_hslv[num_states]
B_rf_hslv = B_rf_hslv.tolist()
B_rm_hslv = B_rm_hslv.tolist()
B_sf_hslv = B_sf_hslv.tolist()
B_sm_hslv = B_sm_hslv.tolist()
model_rf_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_hslv, pi) # Will be Trained
model_rm_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_hslv, pi) # Will be Trained
model_sf_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_hslv, pi) # Will be Trained
model_sm_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_hslv, pi) # Will be Trained
# For Training
total_seq_rf_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,0:15], Fmat_original_lshv[0:121,0:15], Fmat_original_lslv[0:121,0:15])))
total_seq_rm_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,15:30], Fmat_original_lshv[0:121,15:30], Fmat_original_lslv[0:121,15:30])))
total_seq_sf_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,30:45], Fmat_original_lshv[0:121,30:45], Fmat_original_lslv[0:121,30:45])))
total_seq_sm_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,45:60], Fmat_original_lshv[0:121,45:60], Fmat_original_lslv[0:121,45:60])))
train_seq_rf_hslv = (np.array(total_seq_rf_hslv).T).tolist()
train_seq_rm_hslv = (np.array(total_seq_rm_hslv).T).tolist()
train_seq_sf_hslv = (np.array(total_seq_sf_hslv).T).tolist()
train_seq_sm_hslv = (np.array(total_seq_sm_hslv).T).tolist()
#print train_seq_rf_hslv
final_ts_rf_hslv = ghmm.SequenceSet(F,train_seq_rf_hslv)
final_ts_rm_hslv = ghmm.SequenceSet(F,train_seq_rm_hslv)
final_ts_sf_hslv = ghmm.SequenceSet(F,train_seq_sf_hslv)
final_ts_sm_hslv = ghmm.SequenceSet(F,train_seq_sm_hslv)
model_rf_hslv.baumWelch(final_ts_rf_hslv)
model_rm_hslv.baumWelch(final_ts_rm_hslv)
model_sf_hslv.baumWelch(final_ts_sf_hslv)
model_sm_hslv.baumWelch(final_ts_sm_hslv)
# For Testing
total_seq_obj_hslv = Fmat_original_hslv[0:121,:]
rf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
rm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
k = 0
while (k < np.size(total_seq_obj_hslv,1)):
test_seq_obj_hslv = (np.array(total_seq_obj_hslv[0:121,k]).T).tolist()
new_test_seq_obj_hslv = np.array(sum(test_seq_obj_hslv,[]))
#print new_test_seq_obj_hslv
ts_obj_hslv = new_test_seq_obj_hslv
#print np.shape(ts_obj_hslv)
final_ts_obj_hslv = ghmm.EmissionSequence(F,ts_obj_hslv.tolist())
# Find Viterbi Path
path_rf_obj_hslv = model_rf_hslv.viterbi(final_ts_obj_hslv)
path_rm_obj_hslv = model_rm_hslv.viterbi(final_ts_obj_hslv)
path_sf_obj_hslv = model_sf_hslv.viterbi(final_ts_obj_hslv)
path_sm_obj_hslv = model_sm_hslv.viterbi(final_ts_obj_hslv)
obj_hslv = max(path_rf_obj_hslv[1],path_rm_obj_hslv[1],path_sf_obj_hslv[1],path_sm_obj_hslv[1])
if obj_hslv == path_rf_obj_hslv[1]:
rf_hslv[0,k] = 1
elif obj_hslv == path_rm_obj_hslv[1]:
rm_hslv[0,k] = 1
elif obj_hslv == path_sf_obj_hslv[1]:
sf_hslv[0,k] = 1
else:
sm_hslv[0,k] = 1
k = k+1
#print rf_hslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hslv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_hslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hslv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_hslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hslv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_hslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hslv[0,45:60])
#print cmat
############################################################################################################################################
# LSHV as testing set and Rest as training set
mu_rf_lshv,sigma_rf_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,0:15], Fmat_original_hslv[0:121,0:15], Fmat_original_lslv[0:121,0:15]))))
mu_rm_lshv,sigma_rm_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,15:30], Fmat_original_hslv[0:121,15:30], Fmat_original_lslv[0:121,15:30]))))
mu_sf_lshv,sigma_sf_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,30:45], Fmat_original_hslv[0:121,30:45], Fmat_original_lslv[0:121,30:45]))))
mu_sm_lshv,sigma_sm_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,45:60], Fmat_original_hslv[0:121,45:60], Fmat_original_lslv[0:121,45:60]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lshv = np.zeros((20,2))
B_rm_lshv = np.zeros((20,2))
B_sf_lshv = np.zeros((20,2))
B_sm_lshv = np.zeros((20,2))
for num_states in range(20):
B_rf_lshv[num_states,0] = mu_rf_lshv[num_states]
B_rf_lshv[num_states,1] = sigma_rf_lshv[num_states]
B_rm_lshv[num_states,0] = mu_rm_lshv[num_states]
B_rm_lshv[num_states,1] = sigma_rm_lshv[num_states]
B_sf_lshv[num_states,0] = mu_sf_lshv[num_states]
B_sf_lshv[num_states,1] = sigma_sf_lshv[num_states]
B_sm_lshv[num_states,0] = mu_sm_lshv[num_states]
B_sm_lshv[num_states,1] = sigma_sm_lshv[num_states]
B_rf_lshv = B_rf_lshv.tolist()
B_rm_lshv = B_rm_lshv.tolist()
B_sf_lshv = B_sf_lshv.tolist()
B_sm_lshv = B_sm_lshv.tolist()
model_rf_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_lshv, pi) # Will be Trained
model_rm_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_lshv, pi) # Will be Trained
model_sf_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_lshv, pi) # Will be Trained
model_sm_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_lshv, pi) # Will be Trained
# For Training
total_seq_rf_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,0:15], Fmat_original_hslv[0:121,0:15], Fmat_original_lslv[0:121,0:15])))
total_seq_rm_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,15:30], Fmat_original_hslv[0:121,15:30], Fmat_original_lslv[0:121,15:30])))
total_seq_sf_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,30:45], Fmat_original_hslv[0:121,30:45], Fmat_original_lslv[0:121,30:45])))
total_seq_sm_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,45:60], Fmat_original_hslv[0:121,45:60], Fmat_original_lslv[0:121,45:60])))
train_seq_rf_lshv = (np.array(total_seq_rf_lshv).T).tolist()
train_seq_rm_lshv = (np.array(total_seq_rm_lshv).T).tolist()
train_seq_sf_lshv = (np.array(total_seq_sf_lshv).T).tolist()
train_seq_sm_lshv = (np.array(total_seq_sm_lshv).T).tolist()
#print train_seq_rf_lshv
final_ts_rf_lshv = ghmm.SequenceSet(F,train_seq_rf_lshv)
final_ts_rm_lshv = ghmm.SequenceSet(F,train_seq_rm_lshv)
final_ts_sf_lshv = ghmm.SequenceSet(F,train_seq_sf_lshv)
final_ts_sm_lshv = ghmm.SequenceSet(F,train_seq_sm_lshv)
model_rf_lshv.baumWelch(final_ts_rf_lshv)
model_rm_lshv.baumWelch(final_ts_rm_lshv)
model_sf_lshv.baumWelch(final_ts_sf_lshv)
model_sm_lshv.baumWelch(final_ts_sm_lshv)
# For Testing
total_seq_obj_lshv = Fmat_original_lshv[0:121,:]
rf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
rm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
k = 0
while (k < np.size(total_seq_obj_lshv,1)):
test_seq_obj_lshv = (np.array(total_seq_obj_lshv[0:121,k]).T).tolist()
new_test_seq_obj_lshv = np.array(sum(test_seq_obj_lshv,[]))
#print new_test_seq_obj_lshv
ts_obj_lshv = new_test_seq_obj_lshv
#print np.shape(ts_obj_lshv)
final_ts_obj_lshv = ghmm.EmissionSequence(F,ts_obj_lshv.tolist())
# Find Viterbi Path
path_rf_obj_lshv = model_rf_lshv.viterbi(final_ts_obj_lshv)
path_rm_obj_lshv = model_rm_lshv.viterbi(final_ts_obj_lshv)
path_sf_obj_lshv = model_sf_lshv.viterbi(final_ts_obj_lshv)
path_sm_obj_lshv = model_sm_lshv.viterbi(final_ts_obj_lshv)
obj_lshv = max(path_rf_obj_lshv[1],path_rm_obj_lshv[1],path_sf_obj_lshv[1],path_sm_obj_lshv[1])
if obj_lshv == path_rf_obj_lshv[1]:
rf_lshv[0,k] = 1
elif obj_lshv == path_rm_obj_lshv[1]:
rm_lshv[0,k] = 1
elif obj_lshv == path_sf_obj_lshv[1]:
sf_lshv[0,k] = 1
else:
sm_lshv[0,k] = 1
k = k+1
#print rf_lshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lshv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_lshv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_lshv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_lshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lshv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_lshv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_lshv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_lshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lshv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_lshv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_lshv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_lshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lshv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_lshv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_lshv[0,45:60])
#print cmat
#############################################################################################################################################
# LSLV as testing set and Rest as training set
mu_rf_lslv,sigma_rf_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,0:15], Fmat_original_hslv[0:121,0:15], Fmat_original_lshv[0:121,0:15]))))
mu_rm_lslv,sigma_rm_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,15:30], Fmat_original_hslv[0:121,15:30], Fmat_original_lshv[0:121,15:30]))))
mu_sf_lslv,sigma_sf_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,30:45], Fmat_original_hslv[0:121,30:45], Fmat_original_lshv[0:121,30:45]))))
mu_sm_lslv,sigma_sm_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,45:60], Fmat_original_hslv[0:121,45:60], Fmat_original_lshv[0:121,45:60]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lslv = np.zeros((20,2))
B_rm_lslv = np.zeros((20,2))
B_sf_lslv = np.zeros((20,2))
B_sm_lslv = np.zeros((20,2))
for num_states in range(20):
B_rf_lslv[num_states,0] = mu_rf_lslv[num_states]
B_rf_lslv[num_states,1] = sigma_rf_lslv[num_states]
B_rm_lslv[num_states,0] = mu_rm_lslv[num_states]
B_rm_lslv[num_states,1] = sigma_rm_lslv[num_states]
B_sf_lslv[num_states,0] = mu_sf_lslv[num_states]
B_sf_lslv[num_states,1] = sigma_sf_lslv[num_states]
B_sm_lslv[num_states,0] = mu_sm_lslv[num_states]
B_sm_lslv[num_states,1] = sigma_sm_lslv[num_states]
B_rf_lslv = B_rf_lslv.tolist()
B_rm_lslv = B_rm_lslv.tolist()
B_sf_lslv = B_sf_lslv.tolist()
B_sm_lslv = B_sm_lslv.tolist()
model_rf_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_lslv, pi) # Will be Trained
model_rm_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_lslv, pi) # Will be Trained
model_sf_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_lslv, pi) # Will be Trained
model_sm_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_lslv, pi) # Will be Trained
# For Training
total_seq_rf_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,0:15], Fmat_original_hslv[0:121,0:15], Fmat_original_lshv[0:121,0:15])))
total_seq_rm_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,15:30], Fmat_original_hslv[0:121,15:30], Fmat_original_lshv[0:121,15:30])))
total_seq_sf_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,30:45], Fmat_original_hslv[0:121,30:45], Fmat_original_lshv[0:121,30:45])))
total_seq_sm_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,45:60], Fmat_original_hslv[0:121,45:60], Fmat_original_lshv[0:121,45:60])))
train_seq_rf_lslv = (np.array(total_seq_rf_lslv).T).tolist()
train_seq_rm_lslv = (np.array(total_seq_rm_lslv).T).tolist()
train_seq_sf_lslv = (np.array(total_seq_sf_lslv).T).tolist()
train_seq_sm_lslv = (np.array(total_seq_sm_lslv).T).tolist()
#print train_seq_rf_lslv
final_ts_rf_lslv = ghmm.SequenceSet(F,train_seq_rf_lslv)
final_ts_rm_lslv = ghmm.SequenceSet(F,train_seq_rm_lslv)
final_ts_sf_lslv = ghmm.SequenceSet(F,train_seq_sf_lslv)
final_ts_sm_lslv = ghmm.SequenceSet(F,train_seq_sm_lslv)
model_rf_lslv.baumWelch(final_ts_rf_lslv)
model_rm_lslv.baumWelch(final_ts_rm_lslv)
model_sf_lslv.baumWelch(final_ts_sf_lslv)
model_sm_lslv.baumWelch(final_ts_sm_lslv)
# For Testing
total_seq_obj_lslv = Fmat_original_lslv[0:121,:]
rf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
rm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
k = 0
while (k < np.size(total_seq_obj_lslv,1)):
test_seq_obj_lslv = (np.array(total_seq_obj_lslv[0:121,k]).T).tolist()
new_test_seq_obj_lslv = np.array(sum(test_seq_obj_lslv,[]))
#print new_test_seq_obj_lslv
ts_obj_lslv = new_test_seq_obj_lslv
#print np.shape(ts_obj_lslv)
final_ts_obj_lslv = ghmm.EmissionSequence(F,ts_obj_lslv.tolist())
# Find Viterbi Path
path_rf_obj_lslv = model_rf_lslv.viterbi(final_ts_obj_lslv)
path_rm_obj_lslv = model_rm_lslv.viterbi(final_ts_obj_lslv)
path_sf_obj_lslv = model_sf_lslv.viterbi(final_ts_obj_lslv)
path_sm_obj_lslv = model_sm_lslv.viterbi(final_ts_obj_lslv)
obj_lslv = max(path_rf_obj_lslv[1],path_rm_obj_lslv[1],path_sf_obj_lslv[1],path_sm_obj_lslv[1])
if obj_lslv == path_rf_obj_lslv[1]:
rf_lslv[0,k] = 1
elif obj_lslv == path_rm_obj_lslv[1]:
rm_lslv[0,k] = 1
elif obj_lslv == path_sf_obj_lslv[1]:
sf_lslv[0,k] = 1
else:
sm_lslv[0,k] = 1
k = k+1
#print rf_lslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_lslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_lslv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_lslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_lslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_lslv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_lslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_lslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_lslv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_lslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_lslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_lslv[0,45:60])
#print cmat
############################################################################################################################################
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.savefig('results_force_20_states.png')
pp.show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/Variable_Stiffness_Variable_Velocity/HMM/with padding 1.2s/hmm_crossvalidation_force_20_states.py
|
Python
|
mit
| 29,306
|
[
"Mayavi"
] |
938014607dec3e6d3580edaba92e3d16c9f67cb320066a4e23afbdfe29cad17d
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-wms-job-get-jdl
# Author : Stuart Paterson
########################################################################
"""
Retrieve the current JDL of a DIRAC job
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
original = False
Script.registerSwitch( 'O', 'Original', 'Gets the original JDL' )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC Job ID' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
for switch in Script.getUnprocessedSwitches():
if switch[0] == 'Original' or switch[0] == 'O':
original = True
if len( args ) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
exitCode = 0
errorList = []
for job in args:
result = dirac.getJobJDL( job, original = original, printOutput = True )
if not result['OK']:
errorList.append( ( job, result['Message'] ) )
exitCode = 2
for error in errorList:
print "ERROR %s: %s" % error
DIRAC.exit( exitCode )
|
coberger/DIRAC
|
Interfaces/scripts/dirac-wms-job-get-jdl.py
|
Python
|
gpl-3.0
| 1,394
|
[
"DIRAC"
] |
915da359d39d3424d17ff135cc52267eb9e26a66f096ab8dfdba03533ab4c460
|
## Copyright (C) 2005-2006 Graham I Cummins
## This program is free software; you can redistribute it and/or modify it under
## the terms of the GNU General Public License as published by the Free Software
## Foundation; either version 2 of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful, but WITHOUT ANY
## WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
## PARTICULAR PURPOSE. See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along with
## this program; if not, write to the Free Software Foundation, Inc., 59 Temple
## Place, Suite 330, Boston, MA 02111-1307 USA
##
import os
from mien.math.sigtools import *
from string import join
import random, mien.parsers
from mien.wx.base import wx, BaseGui
from mien.nmpml.data import newData
from mien.parsers.nmpml import forceGetPath
def nameHash(objs):
d = {}
for o in objs:
d[str(o)]=o
return d
def renameSection(sec, newname):
print "renaming", str(sec), "=>", newname
op = sec.upath()
cell=sec.container
kids=[cell._sections[s] for s in cell.getChildren(sec.name())]
for k in kids:
print str(k), " Parent => ", newname
k.setAttrib("Parent",newname)
sec.setAttrib("Name",newname)
cell.refresh()
doc = sec.xpath(True)[0]
np=sec.upath()
refs=doc.getElements("ElementReference")
for r in refs:
s=r.attrib("Target")
if s and s.startswith(op):
print "%s references the renamed object. Adjusting target path" % str(r)
r.setAttrib("Target", s.replace(op, np))
SYNPROP={
'AfferentClass01':{"Length":1200, "Direction":225, "Cercus":'L', 'Latency':4.0},
'AfferentClass02':{"Length":1200, "Direction":315, "Cercus":'L', 'Latency':4.0},
'AfferentClass03':{"Length":1200, "Direction":45 , "Cercus":'L', 'Latency':4.0},
'AfferentClass04':{"Length":1200, "Direction":135, "Cercus":'L', 'Latency':4.0},
'AfferentClass05':{"Length":800, "Direction":225, "Cercus":'L' , 'Latency':4.0},
'AfferentClass06':{"Length":800, "Direction":315, "Cercus":'L' , 'Latency':4.0},
'AfferentClass07':{"Length":800, "Direction":45 , "Cercus":'L' , 'Latency':4.0},
'AfferentClass08':{"Length":800, "Direction":135, "Cercus":'L' , 'Latency':4.0},
'AfferentClass09':{"Length":1200, "Direction":135, "Cercus":'R', 'Latency':4.0},
'AfferentClass10':{"Length":1200, "Direction":45 , "Cercus":'R', 'Latency':4.0},
'AfferentClass11':{"Length":1200, "Direction":315, "Cercus":'R', 'Latency':4.0},
'AfferentClass12':{"Length":1200, "Direction":225, "Cercus":'R', 'Latency':4.0},
'AfferentClass13':{"Length":800, "Direction":135, "Cercus":'R' , 'Latency':4.0},
'AfferentClass14':{"Length":800, "Direction":45 , "Cercus":'R' , 'Latency':4.0},
'AfferentClass15':{"Length":800, "Direction":315, "Cercus":'R' , 'Latency':4.0},
'AfferentClass16':{"Length":800, "Direction":225, "Cercus":'R' , 'Latency':4.0},
'AfferentClass17':{"Length":1200, "Direction":45, "Cercus":'L' , 'Latency':8.0},
'AfferentClass18':{"Length":1200, "Direction":315 , "Cercus":'R' , 'Latency':8.0},
}
def getSynapseProperties(type, Mean=False, String=False):
lenstddev=100
dirstddev=10
d={}
d.update(SYNPROP[type])
if not Mean:
d['Direction']=round(normal(d['Direction'], dirstddev))
d['Direction'] = d['Direction']%360
d['Length'] = round(normal(d['Length'], lenstddev))
if String:
ln="S"
if d['Length']>600:
ln="M"
if d['Length']>1000:
ln="L"
return "%s%i%s" % (d['Cercus'], d['Direction'], ln)
else:
return d
def synapseFree(section, cell):
l=[section]
if not section.getElements("Synapse"):
for k in cell.getChildren(section.name()):
l.extend(synapseFree(cell._sections[k], cell))
return l
def getSynapseClasses(atr):
l=array([225, 315, 45, 135])
clid=argmin(abs(l-float(atr["Direction"])))
if float(atr["Length"]) < 1000:
clid+=4
if atr["Cercus"]=="R":
clid+=8
return clid
class CellEditor(BaseGui):
def __init__(self, gui, cell):
self.gui=gui
self.cell=cell
self.selectedSections = self.cell._sections.values()
BaseGui.__init__(self, gui, title="Edit Cell %s" % cell.name(), menus=["File", "Selection", "Synapse", "Mechanism","Measurements", "Morphology"], pycommand=True,height=4)
commands=[["File", "Quit", lambda x: self.Destroy()],
["Selection", "Select Sections", self.makeSel],
['Selection', 'Select Spatial Region', self.selectLocation],
["Selection", "Export Selection", self.exportSel],
["Selection", "Import Selection", self.importSel],
["Selection", "Clear Selection", self.killSel],
["Synapse", "Synapse Info", self.synapseInfo],
["Synapse", "Make Masks", self.makeMasks],
["Synapse", "Mask Selected Sections", self.makeUniformMask],
["Synapse", "Make Synapses", self.makeSyn],
["Synapse", "Remove Synapses", self.delSyn],
["Synapse", "Scramble Synapses", self.randomSyn],
["Synapse", "Edit Synapses", self.editSyn],
["Mechanism", "Set Mechanism Density", self.setProperty],
["Mechanism", "Copy Mechanisms", self.dupMech],
["Mechanism", "Insert Mechanism", self.addMech],
["Mechanism", "Remove Mechanism", self.delMech],
["Mechanism", "Strip All Mechanisms", self.nukeMech],
["Mechanism", "Set passive properties", self.setPass],
["Measurements", "Input Impedance", self.getRin],
["Morphology", "Random Connectivity", self.fuzzBall],
["Morphology", "Cut tips", self.antiFuzzBall],
["Morphology", "Uniform Sections", self.splitCell],
["Morphology", "Human Readable Names", self.assignNames],
["Morphology", "Simple Names", self.simpleNames],
["Morphology", "Load Morphology", self.getCellMorph]]
self.fillMenus(commands)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(wx.StaticText(self.main, -1, "%s" % str(self.cell)), 0, wx.ALIGN_CENTRE|wx.ALL, 5)
self.info=wx.StaticText(self.main, -1, "00000 Sections, 00000 Synapses, 00 Channel Types")
sizer.Add(self.info, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
self.nsel=wx.StaticText(self.main, -1,"%i (All) Sections Selected" % len(self.selectedSections))
sizer.Add(self.nsel, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
self.main.SetSizer(sizer)
self.main.SetAutoLayout(True)
sizer.Fit(self)
self.setInfo()
self.SetSize(wx.Size(600,300))
def selectLocation(self, event=None):
d=self.askParam([{'Name':'X min','Value':-1000000.0},
{'Name':'X max','Value':1000000.0},
{'Name':'Y min','Value':-1000000.0},
{'Name':'Y max','Value':1000000.0},
{'Name':'z min','Value':-1000000.0},
{'Name':'z max','Value':1000000.0}
])
if not d:
return
hits=[]
for sec in self.cell._sections.values():
pts=sec.getPoints()
if pts[:,0].min()<d[0]:
continue
if pts[:,0].max()>d[1]:
continue
if pts[:,1].min()<d[2]:
continue
if pts[:,1].max()>d[3]:
continue
if pts[:,2].min()<d[4]:
continue
if pts[:,2].max()>d[5]:
continue
hits.append(sec)
self.selectedSections=hits
self.setInfo()
def makeSel(self, event=None):
cell=self.cell
sections = cell._sections.keys()
d = [{"Name":"Name Contains",
"Value":"ANY PATTERN"}]
reg = cell.getElements("NamedRegion")
for r in reg:
d.append({"Name":"Region %s" % r.name(),
"Type":"List",
"Value":["Ignore", "And", "Or", "And Not", "Or Not"]})
d=self.askParam(d)
if not d:
return
if d[0]!='ANY PATTERN':
sections = filter(lambda x: d[0] in x.name(), sections)
d = d[1:]
first = 1
allsections=set(sections)
for i, v in enumerate(d):
if v == 'Ignore':
continue
s = set(reg[i].getSectionNames())
if v.endswith("Not"):
s=allsections-s
if first:
first = 0
sections = s.copy()
else:
if v.startswith("Or"):
sections=sections.union(s) #[c for c in sections]+[c for c in s if not c in sections]
else:
sections = sections.intersection(s)
sections=[cell._sections[s] for s in sections]
self.selectedSections=sections
self.setInfo()
def setInfo(self):
nsec=len(self.cell._sections.values())
syn=len(self.cell.getElements("Synapse"))
chan=[]
for s in self.cell._sections.values():
ell=s.getElements(["Channel", "RangeVar"])
for e in ell:
n="%s:%s" % (e.__tag__, e.name())
if not n in chan:
chan.append(n)
chan=len(chan)
self.info.SetLabel("%i Sections, %i Synapses, %i Channel Types" % (nsec, syn, chan))
self.nsel.SetLabel("%i Sections Selected" % len(self.selectedSections))
def exportSel(self, event):
self.gui.contextMenuSelect=[]
try:
self.gui.objecttree.UnselectAll()
except:
pass
for si in self.selectedSections:
self.gui.objecttree.SelectItem(si._guiinfo["treeid"])
self.gui.objecttree.EnsureVisible(si._guiinfo["treeid"])
self.gui.contextMenuSelect.append(si._guiinfo["treeid"])
self.report("Exported %i selected sections to nmpml. They may not all appear highlighted depending on the Wx library version" % len(self.selectedSections) )
def importSel(self, event=None):
self.selectedSections= [self.gui.objecttree.GetPyData(s) for s in self.gui.contextMenuSelect]
self.report("imported %i section selection" % len(self.selectedSections))
self.setInfo()
def killSel(self, event):
self.selectedSections = []
self.setInfo()
def synapseInfo(self, event):
cell = self.cell
syn = cell.getElements("Synapse")
classes ={}
for s in syn:
c= getSynapseClasses(s.attributes)
cds=getSynapseProperties(c, True, True)
if not classes.has_key(cds):
classes[cds]=0
classes[cds]+=1
self.report( str(classes) )
def sectionMask(self, inverse=False):
inds=self.cell.sec_draw_indexes([s.name() for s in set(self.selectedSections)])
inds=array(inds).astype(int32)
points = self.cell.get_drawing_coords()
m=zeros(points.shape[0], float32)
put(m, inds, 1.0)
if inverse:
m=logical_not(m)
m=reshape(m, (-1, 2))[:,0]
if not any(m):
self.report("Selection is empty. Will not cerate Mask")
return None
return m
def makeUniformMask(self, event):
d=self.askParam([{"Name":"Density",
"Value":.1},
{"Name":"Name",
"Value":"AfferentClassXX"},
{"Name":"Which Sections",
"Type":"List",
"Value":["All", "Only selected sections",
"Only non-selected sections"]}])
if not d:
return
points = self.cell.get_drawing_coords()
nc=reshape(points, (-1, 8)).shape[0]
maskdat=ones(nc, float32)*d[0]
if d[2]!="All":
m=self.sectionMask(("non" in d[2]))
if m==None:
return
maskdat=maskdat*m
d = newData(reshape(maskdat, (-1,1)), {'Name':d[1],'SampleType':'mask'})
self.cell.newElement(d)
self.gui.update_all(object=d, event="Create")
def makeMasks(self, event):
c = self.cell
points = reshape(c.get_drawing_coords(), (-1, 8))
diams = (points[:,3]+points[:,7])/2
of = open('modeltemplate.txt', 'w')
for p in points:
of.write("%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n" % tuple(p))
of.close()
check=os.system("matlab -nojvm -nosplash -r \"maskalldata('modeltemplate.txt'); quit\"")
if check:
self.report("system call to matlab maskalldata failed")
return
lines=open("masked_modeltemplate.txt").readlines()
tags = array(map(lambda l:map(float, l.split()),lines))
filt = diams < 8.5
tags = tags*filt[:,NewAxis]
for i in range(tags.shape[1]):
n = "AfferentClass%02d" % (i+1,)
d = newData(tags[:,i:i+1], {'Name':n,'SampleType':'mask'})
c.newElement(d)
self.gui.update_all(object=c, event="Rebuild")
def makeSyn(self, event=None):
cell = self.cell
masks=cell.getElements("Data", {"SampleType":"mask"}, depth=1)
if not masks:
self.report("No masks")
return
md=dict([(n.name(), n) for n in masks])
masks=md.keys()
masks.sort()
d=self.askParam([{"Name":"How Many",
"Value":10},
{"Name":"Type",
"Value":"GSyn"},
{"Name":"Exclude Sections",
"Type":"List",
"Value":["No", "Only in selected sections",
"Never in selectied sections"]},
{"Name": "Which Masks",
"Type":"Select",
"Value":masks}])
if not d:
return
if d[-1]:
masks=[md[k] for k in d[-1]]
else:
masks=md.values()
if not masks:
self.report("No masks")
return
howmany=d[0]
modeltype = d[1]
tags = hstack([m.getData() for m in masks])
if d[2]!="No":
m=self.sectionMask(d[2].startswith("Never"))
if m==None:
return
print tags.shape, m.shape
tags=tags*reshape(m, (-1,1))
nclasses=tags.shape[1]
synapse_prob=cumsum(ravel(tags))
rands=uniform(0,synapse_prob[-1], howmany)
cents = []
types = []
for i in range(rands.shape[0]):
p=rands[i]
ind=nonzero1d(synapse_prob>=p)[0]
c,t = divmod(ind,nclasses)
n=masks[t].name()
atr= getSynapseProperties(n)
try:
sec, loc = cell.nthCenter(c)
sec = cell.getSection(sec)
print "Adding synapse in %s" % str(sec)
except:
print "Center %i not found. nthCenter sez %s" % (c, str(exc_info()[1]))
continue
atr.update({"Type":modeltype, "Id":i, "Afferent":
n, 'Name':"%s%s" % (n, modeltype),
'Point':str(sec.ptAtRel(loc))})
self.gui.makeElem("Synapse", atr, sec, update=False)
self.gui.update_all(object=cell, event="Rebuild")
self.report("Generated Synapses")
self.setInfo()
def delSyn(self, event=None):
syns = self.cell.getElements("Synapse")
if not syns:
return
tn=list(set([foo.attrib('Afferent') for foo in syns]))
tn.sort()
if len(tn)>1:
d=self.askParam([{'Name':'Delete which inputs?',
'Type':'Select',
'Value':tn}])
if not d:
return
if d[0]:
syns=[s for s in syns if str(s.attrib('Afferent')) in d[0]]
for s in syns:
s.sever()
self.gui.update_all(object=self.cell, event="Rebuild")
self.setInfo()
def randomSyn(self, event):
secs = self.selectedSections
if not secs:
return
syns = []
for s in secs:
syns.extend(s.getElements("Synapse"))
self.report("moving %i syns" % len(syns))
for s in syns:
newo = random.choice(secs)
if newo!=s.container:
newo.newElement(s)
newi = random.randint(0, newo.getPoints().shape[0]-1)
s.setAttrib("Point", newi)
def editSyn(self, event=None):
syns = self.cell.getElements("Synapse")
self.report("%i total synapses" % len(syns))
SynapsePars = ["Type"]
spd = []
for p in SynapsePars:
spd.append({"Name":p,
"Type":str,
"Optional":1})
d = self.askParam(spd)
if not d:
return
for s in syns:
for i, p in enumerate(SynapsePars):
s.setAttrib(p, d[i])
def fuzzBall(self, event):
secs = self.selectedSections
if not secs:
return
d=self.askParam([{"Name":"Root Section",
"Value":"section[4]"}])
if not d:
return
root=self.cell._sections[d[0]]
newroot=[x for x in secs if not x.parent()]
if newroot:
self.report("New root assigned")
root.setAttrib("Parent", None)
else:
self.report("Keeping old root. Selected section will be parent for new sections")
tp = root.getPoints()[-1][:3]
for s in secs:
s.setAttrib("Parent",root.name())
apoints = s.getPoints()
trans = tp - apoints[0,:3]
apoints[:,:3] = apoints[:,:3]+trans
s.setPoints(apoints)
self.cell.refresh()
self.report("fuzz complete")
def antiFuzzBall(self, event):
r=self.cell.root()
keep=synapseFree(self.cell._sections[r], self.cell)
syns = self.cell.getElements("Synapse")
for s in syns:
if s.container in keep:
continue
newo=s.container
while not newo in keep:
newo=self.cell._sections[newo.parent()]
newo.newElement(s)
newi = newo.getPoints().shape[0]-1
s.setAttrib("Point", newi)
secs=self.cell._sections.values()
for s in secs:
if not s in keep:
self.gui.update_all(object=s, event="Delete")
self.cell.refresh()
self.setInfo()
def splitCell(self, event=None):
mlen=min([x.stats()[0] for x in self.cell._sections.values()])
d=self.askParam([{"Name":"Target Length",
"Value":mlen}])
if not d:
return
self.cell.uniformSectionLength(d[0])
self.report("Done. Now have %i sections" % len(self.cell._sections.keys()))
self.setInfo()
def getCellMorph(self, event):
cell = self.cell
dlg=wx.FileDialog(self.gui, message="Select file", style=wx.OPEN)
dlg.CenterOnParent()
if dlg.ShowModal() == wx.ID_OK:
fname=dlg.GetPath()
else:
self.report("Canceled File Load.")
return
doc = mien.parsers.fileIO.read(fname)
c = doc.getElements("Cell")
if len(c)==0:
self.report("No cells in this document")
return
elif len(c)>1:
wc=self.askUsr("Which Cell", map(str, c))
if not wc:
return
c=[cell for cell in c if str(cell)==wc]
c=c[0]
ts=cell._sections.values()[0]
cell.elements=[e for e in cell.elements if e.__tag__!='Section']
for sec in c._sections.values():
sec.elements=[]
for k in ["Ra"]:
if ts.attrib(k):
sec.setAttrib(k,ts.attrib(k))
for e in ts.elements:
if e.__tag__ in ["Synapse"]:
continue
try:
newe = e.__class__(e.attributes)
sec.newElement(newe)
except:
self.report("Warning: could not duplicate element %s" % str(e))
sec.refresh()
cell.elements.append(sec)
cell.refresh()
self.gui.update_all(object=cell, event="Move")
self.setInfo()
def nukeMech(self, evt):
for s in self.selectedSections:
s.elements=[]
self.gui.update_all(object=self.cell, event="rebuild")
def dupMech(self, event):
d=self.askParam([{"Name":"Duplicate Which Section?","Value":self.cell.root()}])
if not d:
return
rs=self.cell._sections[d[0]]
for s in self.selectedSections:
if s==rs:
continue
s.setAttrib('Ra',rs.attrib('Ra'))
for c in s.getElements(['Channel', 'RangeVar']):
self.gui.update_all(object=c, event="Delete")
for c in rs.getElements(['Channel', 'RangeVar']):
newobj = c.clone()
s.newElement(newobj)
self.gui.update_all(object=newobj, event="Create")
self.report("Set Mechanisms")
def assignNames(self, event):
roots={}
names={}
newstarts=0
for r in self.cell.getElements("NamedRegion"):
root=r.getOrder()[0]
roots[root.name()]=r.name()
for secname in self.cell.branch():
if roots.has_key(secname):
names[secname]=roots[secname]+"_1"
else:
parent=self.cell._sections[secname].parent()
if not parent:
nn="root_1"
elif names.has_key(parent):
kids=self.cell.getChildren(parent)
bn=names[parent]
pnp=bn.split("_")
pi=int(pnp[-1])
bn=join(pnp[:-1], "_")
nkids=0
for n in kids:
if n==secname:
continue
if not names.has_key(n):
continue
if names[n].startswith(bn):
nkids+=1
if nkids==0:
nn=bn+"_"+str(pi+1)
else:
nn=names[parent]+"_%i" % nkids
else:
nn="sec%i_1" % newstarts
newstarts+=1
if nn in names.values():
print "name %s is a duplicate" % nn
print nkids
return
names[secname]=nn
for on in names.keys():
renameSection(self.cell._sections[on], names[on])
self.gui.update_all(object=self.cell, event='move')
def simpleNames(self, event):
names={}
for ind, name in enumerate(self.cell.branch()):
names[name]="sec[%i]" % ind
for on in names.keys():
sec=self.cell._sections[on]
sec.setName(names[on])
self.gui.update_all(object=self.cell, event='Rebuild')
def setProperty(self, event):
setProperty(self.gui, self.selectedSections)
def addMech(self, event):
addChan(self.gui, self.selectedSections)
def delMech(self, event):
delChan(self.gui, self.selectedSections)
def setPass(self, event):
pass
def getRin(self, event):
if len(self.selectedSections)==1:
target = self.selectedSections[0]
else:
d = self.askParam([{"Name":"Which Section", "Type":str}])
if not d:
return
target = self.cell.getSection(d[0])
doc = self.gui.document
stim = forceGetPath(doc, "/Stimulus:ModelBuilderStimulus")
for e in stim.getElements(depth=1):
e.sever()
ic = forceGetPath(doc, "/Stimulus:ModelBuilderStimulus/IClamp:inject")
exp.setAttrib("Start", 5)
exp.setAttrib("Amp", 5)
exp.setAttrib("Stop", 10)
exp.setAttrib("Id", 1)
ref = forceGetPath(doc, "/Stimulus:ModelBuilderStimulus/IClamp:inject/ElementReference:Section")
ref.setAttrib('Target', target.upath())
ref.setAttrib('Data', 0.5)
exp=forceGetPath(doc, "/Experiment:ModelBuilderExperiment")
exp.setAttrib('secondorder', 2)
exp.setAttrib('Simulator', 'Neuron')
exp.setAttrib('celsius', 20)
exp.setAttrib('time', 20)
exp.setAttrib('dt', 0.05)
for e in exp.getElements(depth=1):
e.sever()
ref = forceGetPath(doc, "/Experiment:ModelBuilderExperiment/ElementReference:Cell")
ref.setAttrib('Target', self.cell.upath())
ref = forceGetPath(doc, "/Experiment:ModelBuilderExperiment/ElementReference:Stimulus")
ref.setAttrib(doc, "Target", "/Stimulus:ModelBuilderStimulus")
rec=forceGetPath(doc, "/Experiment:ModelBuilderExperiment/Recording:v")
rec.setAttrib("Variable", 'v')
rec.setAttrib("DataType", "d")
rec.setAttrib("SamplesPerSecond", 10000.0)
ref = forceGetPath(doc, "/Experiment:ModelBuilderExperiment/Recording:v/ElementReference:Section")
ref.setAttrib('Target', target.upath())
ref.setAttrib('Data', 0.5)
ME={}
def areSections(l):
if set(l)==set(['Section']):
return True
return False
def cellEdit(gui, elems):
cell=elems[0]
c=CellEditor(gui, cell)
def newRegion(gui, secs):
gui.report("Making region containing %i sections" % len(secs))
atr = gui.getElemAttribs("NamedRegion")
reg = gui.makeElem("NamedRegion", atr, secs[0].container)
for i, s in enumerate(secs):
gui.makeElem('ElementReference', {"Name":"el%i" % i, "Target":s.upath()}, reg, update=False)
gui.update_all(object=reg, event="Rebuild")
def regSel(gui,elems):
reg=elems[0]
selectedSections = reg.getSections()
gui.objecttree.UnselectAll()
gui.contextMenuSelect=[]
for si in selectedSections:
gui.objecttree.EnsureVisible(si._guiinfo["treeid"])
gui.contextMenuSelect.append(si._guiinfo["treeid"])
#gui.gui.objecttree.ToggleItemSelection(si._guiinfo["treeid"])
gui.objecttree.SelectItem(si._guiinfo["treeid"])
gui.report("%i sections selected (but they may not all be highlighted!)" % len(selectedSections))
def setProperty(gui, secs):
gui.report("Operating on %i sections" % len(secs))
mechanisms=["Ra"]
for s in secs:
ell=s.getElements(["Channel", "RangeVar"])
for e in ell:
n="%s:%s" % (e.__tag__, e.name())
if not n in mechanisms:
mechanisms.append(n)
d = gui.askParam([{"Name":"Mechanism",
"Type":"List",
"Value":mechanisms},
{"Name":"Value",
"Value":0.0}])
if not d:
return
value=str(d[1])
d=d[0].split(':')
if len(d)>1:
tag, name=d[:2]
else:
tag=d[0]
name=None
if tag=="Ra":
for s in secs:
s.setAttrib("Ra",value)
else:
propnames={"RangeVar":"Values",
"Channel":"Density"}
for s in secs:
var = s.getElements(tag, name)
if not var:
continue
var=var[0]
var.setAttrib(propnames[tag], value)
gui.report("Set value in %i sections" % len(secs))
def addChan(gui, secs):
gui.report("Operating on %i sections" % len(secs))
d = gui.askParam([{"Name":'Tag',
"Type":"Choice",
"Value":{"Channel":[{"Name":"Ion",
"Type":"List",
"Value": ["Na", "K", "Ca", "Cl", "Leak"]},
{"Name":"Reversal",
"Type":str}],
"RangeVar":[]}},
{"Name":"Name",
"Type":str},
{"Name":"Density",
"Type":str}]
)
if not d:
return
for s in secs:
if d[0][0]=="Channel":
gui.makeElem("Channel", {"Name":d[1], "Ion":d[0][1], "Density":d[2], "Reversal":d[0][2]}, s, update=False)
else:
gui.makeElem("RangeVar", {"Name":d[1], "Values":d[2]}, s, update=False)
if secs:
gui.update_all(object=secs[0].container, event="Rebuild")
gui.report("Added mechanism in %i sections" % len(secs))
def delChan(gui,secs):
gui.report("Operating on %i sections" % len(secs))
mechanisms=[]
for s in secs:
ell=s.getElements(["Channel", "RangeVar"])
for e in ell:
n="%s:%s" % (e.__tag__, e.name())
if not n in mechanisms:
mechanisms.append(n)
d = gui.askParam([{"Name":"Name",
"Type":"List",
"Value":mechanisms}])
if not d:
return
kill = 0
d=d[0].split(':')
tag, name=d[:2]
for s in secs:
var = s.getElements(tag, name)
if not var:
continue
for c in var:
gui.update_all(object=c, elems="Delete")
kill+=1
gui.report("Deleted %i mechanisms" % kill)
def maskPoint(gui, elems):
mask = elems[0]
cell=mask.container
d=gui.askParam([{"Name":"Section",
"Type":"List",
"Value":cell._sections.keys()},
{"Name":"Location",
"Value":0.5}])
if not d:
return
loc=(cell._sections[d[0]], d[1])
gui.report(str(mask[loc]))
def maskAssign(gui, elems):
mask = elems[0]
mask.assign()
gui.report("set values")
MECM={"Set Mechanism Density":(setProperty, "Section"),
'Remove Mechanism':(delChan, "Section"),
"Insert Mechanism":(addChan, "Section"),
#"Launch Cell Editor":(cellEdit, "Cell"),
"Evaluate at point":(maskPoint, "Mask"),
"Assign density":(maskAssign, "Mask"),
"Select Sections":(regSel, "NamedRegion"),
"Make Region":(newRegion, areSections)
}
|
gic888/MIEN
|
interface/modelbuilder.py
|
Python
|
gpl-2.0
| 25,254
|
[
"NEURON"
] |
53acf7358e9989a4f70df9fdcf1ced4d58ecb35b59cfa21ea97f67f84b236ead
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts Assert statements to their corresponding TF calls."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph.core import converter
from tensorflow.contrib.autograph.pyct import templates
class AssertsTransformer(converter.Base):
"""Transforms Print nodes to Call so they can be handled as functions."""
def visit_Assert(self, node):
self.generic_visit(node)
# Note: The lone tf.Assert call will be wrapped with control_dependencies
# by side_effect_guards.
template = """
tf.Assert(test, (msg,))
"""
if node.msg is None:
return templates.replace(
template, test=node.test, msg=gast.Str('Assertion error'))
elif isinstance(node.msg, gast.Str):
return templates.replace(template, test=node.test, msg=node.msg)
else:
raise NotImplementedError('can only convert string messages for now.')
def transform(node, ctx):
return AssertsTransformer(ctx).visit(node)
|
drpngx/tensorflow
|
tensorflow/contrib/autograph/converters/asserts.py
|
Python
|
apache-2.0
| 1,737
|
[
"VisIt"
] |
4fc56ed591a5fb4899a8d994622e65a0af2346396cff5d63e4bfbc869420ea93
|
# -*- coding: utf-8 -*-
"""Testing functions."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
from contextlib import contextmanager
from functools import partial, wraps
import os
import inspect
from io import StringIO
from shutil import rmtree
import sys
import tempfile
import traceback
from unittest import SkipTest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from ._logging import warn, ClosingStringIO
from .numerics import object_diff
from ..fixes import _compare_version
def _explain_exception(start=-1, stop=None, prefix='> '):
"""Explain an exception."""
# start=-1 means "only the most recent caller"
etype, value, tb = sys.exc_info()
string = traceback.format_list(traceback.extract_tb(tb)[start:stop])
string = (''.join(string).split('\n') +
traceback.format_exception_only(etype, value))
string = ':\n' + prefix + ('\n' + prefix).join(string)
return string
class _TempDir(str):
"""Create and auto-destroy temp dir.
This is designed to be used with testing modules. Instances should be
defined inside test functions. Instances defined at module level can not
guarantee proper destruction of the temporary directory.
When used at module level, the current use of the __del__() method for
cleanup can fail because the rmtree function may be cleaned up before this
object (an alternative could be using the atexit module instead).
"""
def __new__(self): # noqa: D105
new = str.__new__(self, tempfile.mkdtemp(prefix='tmp_mne_tempdir_'))
return new
def __init__(self): # noqa: D102
self._path = self.__str__()
def __del__(self): # noqa: D105
rmtree(self._path, ignore_errors=True)
def requires_nibabel():
"""Wrap to requires_module with a function call (fewer lines to change)."""
return partial(requires_module, name='nibabel')
def requires_dipy():
"""Check for dipy."""
import pytest
# for some strange reason on CIs we can get:
#
# can get weird ImportError: dlopen: cannot load any more object
# with static TLS
#
# so let's import everything in the decorator.
try:
from dipy.align import imaffine, imwarp, metrics, transforms # noqa, analysis:ignore
from dipy.align.reslice import reslice # noqa, analysis:ignore
from dipy.align.imaffine import AffineMap # noqa, analysis:ignore
from dipy.align.imwarp import DiffeomorphicMap # noqa, analysis:ignore
except Exception:
have = False
else:
have = True
return pytest.mark.skipif(not have, reason='Requires dipy >= 0.10.1')
def requires_version(library, min_version='0.0'):
"""Check for a library version."""
import pytest
reason = f'Requires {library}'
if min_version != '0.0':
reason += ' version >= {min_version}'
return pytest.mark.skipif(not check_version(library, min_version),
reason=reason)
def requires_module(function, name, call=None):
"""Skip a test if package is not available (decorator)."""
import pytest
call = ('import %s' % name) if call is None else call
reason = 'Test %s skipped, requires %s.' % (function.__name__, name)
try:
exec(call) in globals(), locals()
except Exception as exc:
if len(str(exc)) > 0 and str(exc) != 'No module named %s' % name:
reason += ' Got exception (%s)' % (exc,)
skip = True
else:
skip = False
return pytest.mark.skipif(skip, reason=reason)(function)
_mne_call = """
if not has_mne_c():
raise ImportError
"""
_fs_call = """
if not has_freesurfer():
raise ImportError
"""
_n2ft_call = """
if 'NEUROMAG2FT_ROOT' not in os.environ:
raise ImportError
"""
requires_pandas = partial(requires_module, name='pandas')
requires_pylsl = partial(requires_module, name='pylsl')
requires_sklearn = partial(requires_module, name='sklearn')
requires_mne = partial(requires_module, name='MNE-C', call=_mne_call)
def requires_freesurfer(arg):
"""Require Freesurfer."""
if isinstance(arg, str):
# Calling as @requires_freesurfer('progname'): return decorator
# after checking for progname existence
call = """
from . import run_subprocess
run_subprocess([%r, '--version'])
""" % (arg,)
return partial(
requires_module, name='Freesurfer (%s)' % (arg,), call=call)
else:
# Calling directly as @requires_freesurfer: return decorated function
# and just check env var existence
return requires_module(arg, name='Freesurfer', call=_fs_call)
requires_neuromag2ft = partial(requires_module, name='neuromag2ft',
call=_n2ft_call)
requires_vtk = partial(requires_module, name='vtk')
requires_good_network = partial(
requires_module, name='good network connection',
call='if int(os.environ.get("MNE_SKIP_NETWORK_TESTS", 0)):\n'
' raise ImportError')
requires_nitime = partial(requires_module, name='nitime')
# just keep this in case downstream packages need it (no coverage hit here)
requires_h5py = partial(requires_module, name='h5py')
def requires_numpydoc(func):
"""Decorate tests that need numpydoc."""
return requires_version('numpydoc', '1.0')(func) # validate needs 1.0
def check_version(library, min_version):
r"""Check minimum library version required.
Parameters
----------
library : str
The library name to import. Must have a ``__version__`` property.
min_version : str
The minimum version string. Anything that matches
``'(\d+ | [a-z]+ | \.)'``. Can also be empty to skip version
check (just check for library presence).
Returns
-------
ok : bool
True if the library exists with at least the specified version.
"""
ok = True
try:
library = __import__(library)
except ImportError:
ok = False
else:
if min_version:
this_version = getattr(library, '__version__', '0.0').lstrip('v')
if _compare_version(this_version, '<', min_version):
ok = False
return ok
def run_command_if_main():
"""Run a given command if it's __main__."""
local_vars = inspect.currentframe().f_back.f_locals
if local_vars.get('__name__', '') == '__main__':
local_vars['run']()
class ArgvSetter(object):
"""Temporarily set sys.argv."""
def __init__(self, args=(), disable_stdout=True,
disable_stderr=True): # noqa: D102
self.argv = list(('python',) + args)
self.stdout = ClosingStringIO() if disable_stdout else sys.stdout
self.stderr = ClosingStringIO() if disable_stderr else sys.stderr
def __enter__(self): # noqa: D105
self.orig_argv = sys.argv
sys.argv = self.argv
self.orig_stdout = sys.stdout
sys.stdout = self.stdout
self.orig_stderr = sys.stderr
sys.stderr = self.stderr
return self
def __exit__(self, *args): # noqa: D105
sys.argv = self.orig_argv
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
class SilenceStdout(object):
"""Silence stdout."""
def __init__(self, close=True):
self.close = close
def __enter__(self): # noqa: D105
self.stdout = sys.stdout
sys.stdout = StringIO()
return sys.stdout
def __exit__(self, *args): # noqa: D105
if self.close:
sys.stdout.close()
sys.stdout = self.stdout
def has_nibabel():
"""Determine if nibabel is installed.
Returns
-------
has : bool
True if the user has nibabel.
"""
try:
import nibabel # noqa
except ImportError:
return False
else:
return True
def has_mne_c():
"""Check for MNE-C."""
return 'MNE_ROOT' in os.environ
def has_freesurfer():
"""Check for Freesurfer."""
return 'FREESURFER_HOME' in os.environ
def buggy_mkl_svd(function):
"""Decorate tests that make calls to SVD and intermittently fail."""
@wraps(function)
def dec(*args, **kwargs):
try:
return function(*args, **kwargs)
except np.linalg.LinAlgError as exp:
if 'SVD did not converge' in str(exp):
msg = 'Intel MKL SVD convergence error detected, skipping test'
warn(msg)
raise SkipTest(msg)
raise
return dec
def assert_and_remove_boundary_annot(annotations, n=1):
"""Assert that there are boundary annotations and remove them."""
from ..io.base import BaseRaw
if isinstance(annotations, BaseRaw): # allow either input
annotations = annotations.annotations
for key in ('EDGE', 'BAD'):
idx = np.where(annotations.description == '%s boundary' % key)[0]
assert len(idx) == n
annotations.delete(idx)
def assert_object_equal(a, b):
"""Assert two objects are equal."""
d = object_diff(a, b)
assert d == '', d
def _raw_annot(meas_date, orig_time):
from .. import Annotations, create_info
from ..annotations import _handle_meas_date
from ..io import RawArray
info = create_info(ch_names=10, sfreq=10.)
raw = RawArray(data=np.empty((10, 10)), info=info, first_samp=10)
if meas_date is not None:
meas_date = _handle_meas_date(meas_date)
with raw.info._unlock(check_after=True):
raw.info['meas_date'] = meas_date
annot = Annotations([.5], [.2], ['dummy'], orig_time)
raw.set_annotations(annotations=annot)
return raw
def _get_data(x, ch_idx):
"""Get the (n_ch, n_times) data array."""
from ..evoked import Evoked
from ..io import BaseRaw
if isinstance(x, BaseRaw):
return x[ch_idx][0]
elif isinstance(x, Evoked):
return x.data[ch_idx]
def _check_snr(actual, desired, picks, min_tol, med_tol, msg, kind='MEG'):
"""Check the SNR of a set of channels."""
actual_data = _get_data(actual, picks)
desired_data = _get_data(desired, picks)
bench_rms = np.sqrt(np.mean(desired_data * desired_data, axis=1))
error = actual_data - desired_data
error_rms = np.sqrt(np.mean(error * error, axis=1))
np.clip(error_rms, 1e-60, np.inf, out=error_rms) # avoid division by zero
snrs = bench_rms / error_rms
# min tol
snr = snrs.min()
bad_count = (snrs < min_tol).sum()
msg = ' (%s)' % msg if msg != '' else msg
assert bad_count == 0, ('SNR (worst %0.2f) < %0.2f for %s/%s '
'channels%s' % (snr, min_tol, bad_count,
len(picks), msg))
# median tol
snr = np.median(snrs)
assert snr >= med_tol, ('%s SNR median %0.2f < %0.2f%s'
% (kind, snr, med_tol, msg))
def assert_meg_snr(actual, desired, min_tol, med_tol=500., chpi_med_tol=500.,
msg=None):
"""Assert channel SNR of a certain level.
Mostly useful for operations like Maxwell filtering that modify
MEG channels while leaving EEG and others intact.
"""
from ..io.pick import pick_types
picks = pick_types(desired.info, meg=True, exclude=[])
picks_desired = pick_types(desired.info, meg=True, exclude=[])
assert_array_equal(picks, picks_desired, err_msg='MEG pick mismatch')
chpis = pick_types(actual.info, meg=False, chpi=True, exclude=[])
chpis_desired = pick_types(desired.info, meg=False, chpi=True, exclude=[])
if chpi_med_tol is not None:
assert_array_equal(chpis, chpis_desired, err_msg='cHPI pick mismatch')
others = np.setdiff1d(np.arange(len(actual.ch_names)),
np.concatenate([picks, chpis]))
others_desired = np.setdiff1d(np.arange(len(desired.ch_names)),
np.concatenate([picks_desired,
chpis_desired]))
assert_array_equal(others, others_desired, err_msg='Other pick mismatch')
if len(others) > 0: # if non-MEG channels present
assert_allclose(_get_data(actual, others),
_get_data(desired, others), atol=1e-11, rtol=1e-5,
err_msg='non-MEG channel mismatch')
_check_snr(actual, desired, picks, min_tol, med_tol, msg, kind='MEG')
if chpi_med_tol is not None and len(chpis) > 0:
_check_snr(actual, desired, chpis, 0., chpi_med_tol, msg, kind='cHPI')
def assert_snr(actual, desired, tol):
"""Assert actual and desired arrays are within some SNR tolerance."""
from scipy import linalg
with np.errstate(divide='ignore'): # allow infinite
snr = (linalg.norm(desired, ord='fro') /
linalg.norm(desired - actual, ord='fro'))
assert snr >= tol, '%f < %f' % (snr, tol)
def assert_stcs_equal(stc1, stc2):
"""Check that two STC are equal."""
assert_allclose(stc1.times, stc2.times)
assert_allclose(stc1.data, stc2.data)
assert_array_equal(stc1.vertices[0], stc2.vertices[0])
assert_array_equal(stc1.vertices[1], stc2.vertices[1])
assert_allclose(stc1.tmin, stc2.tmin)
assert_allclose(stc1.tstep, stc2.tstep)
def _dig_sort_key(dig):
"""Sort dig keys."""
return (dig['kind'], dig['ident'])
def assert_dig_allclose(info_py, info_bin, limit=None):
"""Assert dig allclose."""
from ..bem import fit_sphere_to_headshape
from ..io.constants import FIFF
from ..io.meas_info import Info
from ..channels.montage import DigMontage
# test dig positions
dig_py, dig_bin = info_py, info_bin
if isinstance(dig_py, Info):
assert isinstance(dig_bin, Info)
dig_py, dig_bin = dig_py['dig'], dig_bin['dig']
else:
assert isinstance(dig_bin, DigMontage)
assert isinstance(dig_py, DigMontage)
dig_py, dig_bin = dig_py.dig, dig_bin.dig
info_py = info_bin = None
assert isinstance(dig_py, list)
assert isinstance(dig_bin, list)
dig_py = sorted(dig_py, key=_dig_sort_key)
dig_bin = sorted(dig_bin, key=_dig_sort_key)
assert len(dig_py) == len(dig_bin)
for ii, (d_py, d_bin) in enumerate(zip(dig_py[:limit], dig_bin[:limit])):
for key in ('ident', 'kind', 'coord_frame'):
assert d_py[key] == d_bin[key], key
assert_allclose(d_py['r'], d_bin['r'], rtol=1e-5, atol=1e-5,
err_msg='Failure on %s:\n%s\n%s'
% (ii, d_py['r'], d_bin['r']))
if any(d['kind'] == FIFF.FIFFV_POINT_EXTRA for d in dig_py) and \
info_py is not None:
r_bin, o_head_bin, o_dev_bin = fit_sphere_to_headshape(
info_bin, units='m', verbose='error')
r_py, o_head_py, o_dev_py = fit_sphere_to_headshape(
info_py, units='m', verbose='error')
assert_allclose(r_py, r_bin, atol=1e-6)
assert_allclose(o_dev_py, o_dev_bin, rtol=1e-5, atol=1e-6)
assert_allclose(o_head_py, o_head_bin, rtol=1e-5, atol=1e-6)
@contextmanager
def modified_env(**d):
"""Use a modified os.environ with temporarily replaced key/value pairs.
Parameters
----------
**kwargs : dict
The key/value pairs of environment variables to replace.
"""
warn('modified_env is deprecated and will be removed in 1.1. In tests, '
'use monkeypatch from pytest instead. In subprocess calls, pass '
'modified environments directly.', DeprecationWarning)
orig_env = dict()
for key, val in d.items():
orig_env[key] = os.getenv(key)
if val is not None:
assert isinstance(val, str)
os.environ[key] = val
elif key in os.environ:
del os.environ[key]
try:
yield
finally:
for key, val in orig_env.items():
if val is not None:
os.environ[key] = val
elif key in os.environ:
del os.environ[key]
def _click_ch_name(fig, ch_index=0, button=1):
"""Click on a channel name in a raw/epochs/ICA browse-style plot."""
from ..viz.utils import _fake_click
fig.canvas.draw()
text = fig.mne.ax_main.get_yticklabels()[ch_index]
bbox = text.get_window_extent()
x = bbox.intervalx.mean()
y = bbox.intervaly.mean()
_fake_click(fig, fig.mne.ax_main, (x, y), xform='pix',
button=button)
|
mne-tools/mne-python
|
mne/utils/_testing.py
|
Python
|
bsd-3-clause
| 16,406
|
[
"VTK"
] |
6a0e77c90bb0eaa4eff36f0814049388f637d98032c89034464823ecf8154d90
|
""" Definitions of a standard set of pilot commands
Each commands is represented by a class inheriting CommandBase class.
The command class constructor takes PilotParams object which is a data
structure which keeps common parameters across all the pilot commands.
The constructor must call the superclass constructor with the PilotParams
object and the command name as arguments, e.g. ::
class InstallDIRAC( CommandBase ):
def __init__( self, pilotParams ):
CommandBase.__init__(self, pilotParams, 'Install')
...
The command class must implement execute() method for the actual command
execution.
"""
__RCSID__ = "$Id$"
import sys
import os
import stat
import socket
from pilotTools import CommandBase, retrieveUrlTimeout
class GetPilotVersion(CommandBase):
""" Used to get the pilot version that needs to be installed.
If passed as a parameter, uses that one. If not passed, it looks for alternatives.
This assures that a version is always got even on non-standard Grid resources.
"""
def execute(self):
""" Standard method for pilot commands
"""
if self.pp.releaseVersion:
self.log.info("Pilot version requested as pilot script option. Nothing to do.")
else:
try:
import json
except ImportError:
self.log.error('No json module available, exiting ...')
sys.exit(2)
self.log.info("Pilot version not requested as pilot script option, going to find it")
result = retrieveUrlTimeout(self.pp.pilotCFGFileLocation + '/' + self.pp.pilotCFGFile,
self.pp.pilotCFGFile,
self.log,
timeout=120)
if not result:
self.log.error("Failed to get pilot version, exiting ...")
sys.exit(1)
fp = open(self.pp.pilotCFGFile + '-local', 'r')
pilotCFGFileContent = json.load(fp)
fp.close()
pilotVersions = [str(pv) for pv in pilotCFGFileContent[self.pp.setup]['Version']]
self.log.debug("Pilot versions found: %s" % ', '.join(pilotVersions))
self.log.info("Setting pilot version to %s" % pilotVersions[0])
self.pp.releaseVersion = pilotVersions[0]
class CheckWorkerNode(CommandBase):
""" Executes some basic checks
"""
def __init__(self, pilotParams):
""" c'tor
"""
super(CheckWorkerNode, self).__init__(pilotParams)
def execute(self):
""" Get host and local user info, and other basic checks, e.g. space available
"""
self.log.info('Uname = %s' % " ".join(os.uname()))
self.log.info('Host Name = %s' % socket.gethostname())
self.log.info('Host FQDN = %s' % socket.getfqdn())
self.log.info('WorkingDir = %s' % self.pp.workingDir) # this could be different than rootPath
fileName = '/etc/redhat-release'
if os.path.exists(fileName):
f = open(fileName, 'r')
self.log.info('RedHat Release = %s' % f.read().strip())
f.close()
fileName = '/etc/lsb-release'
if os.path.isfile(fileName):
f = open(fileName, 'r')
self.log.info('Linux release:\n%s' % f.read().strip())
f.close()
fileName = '/proc/cpuinfo'
if os.path.exists(fileName):
f = open(fileName, 'r')
cpu = f.readlines()
f.close()
nCPU = 0
for line in cpu:
if line.find('cpu MHz') == 0:
nCPU += 1
freq = line.split()[3]
elif line.find('model name') == 0:
CPUmodel = line.split(': ')[1].strip()
self.log.info('CPU (model) = %s' % CPUmodel)
self.log.info('CPU (MHz) = %s x %s' % (nCPU, freq))
fileName = '/proc/meminfo'
if os.path.exists(fileName):
f = open(fileName, 'r')
mem = f.readlines()
f.close()
freeMem = 0
for line in mem:
if line.find('MemTotal:') == 0:
totalMem = int(line.split()[1])
if line.find('MemFree:') == 0:
freeMem += int(line.split()[1])
if line.find('Cached:') == 0:
freeMem += int(line.split()[1])
self.log.info('Memory (kB) = %s' % totalMem)
self.log.info('FreeMem. (kB) = %s' % freeMem)
##########################################################################
# Disk space check
# fs = os.statvfs( rootPath )
fs = os.statvfs(self.pp.workingDir)
# bsize; /* file system block size */
# frsize; /* fragment size */
# blocks; /* size of fs in f_frsize units */
# bfree; /* # free blocks */
# bavail; /* # free blocks for non-root */
# files; /* # inodes */
# ffree; /* # free inodes */
# favail; /* # free inodes for non-root */
# flag; /* mount flags */
# namemax; /* maximum filename length */
diskSpace = fs[4] * fs[0] / 1024 / 1024
self.log.info('DiskSpace (MB) = %s' % diskSpace)
if diskSpace < self.pp.minDiskSpace:
self.log.error('%s MB < %s MB, not enough local disk space available, exiting'
% (diskSpace, self.pp.minDiskSpace))
sys.exit(1)
class InstallDIRAC(CommandBase):
""" Basically, this is used to call dirac-install with the passed parameters.
It requires dirac-install script to be sitting in the same directory.
"""
def __init__(self, pilotParams):
""" c'tor
"""
super(InstallDIRAC, self).__init__(pilotParams)
self.installOpts = []
self.pp.rootPath = self.pp.pilotRootPath
self.installScriptName = 'dirac-install.py'
self.installScript = ''
def _setInstallOptions(self):
""" Setup installation parameters
"""
for o, v in self.pp.optList:
if o in ('-b', '--build'):
self.installOpts.append('-b')
elif o == '-d' or o == '--debug':
self.installOpts.append('-d')
elif o == '-e' or o == '--extraPackages':
self.installOpts.append('-e "%s"' % v)
elif o == '-g' or o == '--grid':
self.pp.gridVersion = v
elif o == '-i' or o == '--python':
self.pp.pythonVersion = v
elif o in ('-l', '--project'):
self.installOpts.append("-l '%s'" % v)
elif o == '-p' or o == '--platform':
self.pp.platform = v
elif o == '-u' or o == '--url':
self.installOpts.append('-u "%s"' % v)
elif o in ('-P', '--path'):
self.installOpts.append('-P "%s"' % v)
self.pp.rootPath = v
elif o in ('-V', '--installation'):
self.installOpts.append('-V "%s"' % v)
elif o == '-t' or o == '--server':
self.installOpts.append('-t "server"')
if self.pp.gridVersion:
self.installOpts.append("-g '%s'" % self.pp.gridVersion)
if self.pp.pythonVersion:
self.installOpts.append("-i '%s'" % self.pp.pythonVersion)
if self.pp.platform:
self.installOpts.append('-p "%s"' % self.pp.platform)
# The release version to install is a requirement
self.installOpts.append('-r "%s"' % self.pp.releaseVersion)
self.log.debug('INSTALL OPTIONS [%s]' % ', '.join(map(str, self.installOpts)))
def _locateInstallationScript(self):
""" Locate installation script
"""
installScript = ''
for path in (self.pp.pilotRootPath, self.pp.originalRootPath, self.pp.rootPath):
installScript = os.path.join(path, self.installScriptName)
if os.path.isfile(installScript):
break
self.installScript = installScript
if not os.path.isfile(installScript):
self.log.error("%s requires %s to exist in one of: %s, %s, %s" % (self.pp.pilotScriptName,
self.installScriptName,
self.pp.pilotRootPath,
self.pp.originalRootPath,
self.pp.rootPath))
sys.exit(1)
try:
# change permission of the script
os.chmod(self.installScript, stat.S_IRWXU)
except OSError:
pass
def _installDIRAC(self):
""" Install DIRAC or its extension, then parse the environment file created, and use it for subsequent calls
"""
# Installing
installCmd = "%s %s" % (self.installScript, " ".join(self.installOpts))
self.log.debug("Installing with: %s" % installCmd)
# At this point self.pp.installEnv may coincide with os.environ
# If extensions want to pass in a modified environment, it's easy to set self.pp.installEnv in an extended command
retCode, output = self.executeAndGetOutput(installCmd, self.pp.installEnv)
self.log.info(output, header=False)
if retCode:
self.log.error("Could not make a proper DIRAC installation [ERROR %d]" % retCode)
self.exitWithError(retCode)
self.log.info("%s completed successfully" % self.installScriptName)
# Parsing the bashrc then adding its content to the installEnv
# at this point self.pp.installEnv may still coincide with os.environ
retCode, output = self.executeAndGetOutput('bash -c "source bashrc && env"', self.pp.installEnv)
if retCode:
self.log.error("Could not parse the bashrc file [ERROR %d]" % retCode)
self.exitWithError(retCode)
for line in output.split('\n'):
try:
var, value = [vx.strip() for vx in line.split('=', 1)]
if var == '_' or 'SSH' in var or '{' in value or '}' in value: # Avoiding useless/confusing stuff
continue
self.pp.installEnv[var] = value
except (IndexError, ValueError):
continue
# At this point self.pp.installEnv should contain all content of bashrc, sourced "on top" of (maybe) os.environ
self.pp.diracInstalled = True
def execute(self):
""" What is called all the time
"""
self._setInstallOptions()
self._locateInstallationScript()
self._installDIRAC()
class ReplaceDIRACCode(CommandBase):
""" This command will replace DIRAC code with the one taken from a different location.
This command is mostly for testing purposes, and should NOT be added in default configurations.
It uses generic -o option for specifying a zip location (like an archive file from github).
"""
def __init__(self, pilotParams):
""" c'tor
"""
super(ReplaceDIRACCode, self).__init__(pilotParams)
def execute(self):
""" Download/unzip an archive file
"""
from io import BytesIO
from urllib2 import urlopen
from zipfile import ZipFile
zipresp = urlopen(self.pp.genericOption)
zfile = ZipFile(BytesIO(zipresp.read()))
os.mkdir(os.getcwd() + os.path.sep + 'AlternativeCode')
zfile.extractall(os.getcwd() + os.path.sep + 'AlternativeCode')
zfile.close()
zipresp.close()
os.rename(os.getcwd() + os.path.sep + 'AlternativeCode' + os.path.sep + os.listdir('./AlternativeCode')[0],
os.getcwd() + os.path.sep + 'AlternativeCode' + os.path.sep + 'DIRAC')
self.pp.installEnv['PYTHONPATH'] = os.getcwd() + os.path.sep + 'AlternativeCode' + os.path.sep + 'DIRAC' ':' \
+ self.pp.installEnv['PYTHONPATH']
class ConfigureBasics(CommandBase):
""" This command completes DIRAC installation, e.g. calls dirac-configure to:
- download, by default, the CAs
- creates a standard or custom (defined by self.pp.localConfigFile) cfg file
to be used where all the pilot configuration is to be set, e.g.:
- adds to it basic info like the version
- adds to it the security configuration
If there is more than one command calling dirac-configure, this one should be always the first one called.
.. note:: Further commands should always call dirac-configure using the options -FDMH
.. note:: If custom cfg file is created further commands should call dirac-configure with
"-O %s %s" % ( self.pp.localConfigFile, self.pp.localConfigFile )
From here on, we have to pay attention to the paths. Specifically, we need to know where to look for
- executables (scripts)
- DIRAC python code
If the pilot has installed DIRAC (and extensions) in the traditional way, so using the dirac-install.py script,
simply the current directory is used, and:
- scripts will be in $CWD/scripts.
- DIRAC python code will be all sitting in $CWD
- the local dirac.cfg file will be found in $CWD/etc
For a more general case of non-traditional installations, we should use the PATH and PYTHONPATH as set by the
installation phase. Executables and code will be searched there.
"""
def __init__(self, pilotParams):
""" c'tor
"""
super(ConfigureBasics, self).__init__(pilotParams)
self.cfg = []
def execute(self):
""" What is called all the times.
VOs may want to replace/extend the _getBasicsCFG and _getSecurityCFG functions
"""
self._getBasicsCFG()
self._getSecurityCFG()
if self.pp.debugFlag:
self.cfg.append('-ddd')
if self.pp.localConfigFile:
self.cfg.append('-O %s' % self.pp.localConfigFile)
configureCmd = "%s %s" % (self.pp.configureScript, " ".join(self.cfg))
retCode, _configureOutData = self.executeAndGetOutput(configureCmd, self.pp.installEnv)
if retCode:
self.log.error("Could not configure DIRAC basics [ERROR %d]" % retCode)
self.exitWithError(retCode)
def _getBasicsCFG(self):
""" basics (needed!)
"""
self.cfg.append('-S "%s"' % self.pp.setup)
if self.pp.configServer:
self.cfg.append('-C "%s"' % self.pp.configServer)
if self.pp.releaseProject:
self.cfg.append('-e "%s"' % self.pp.releaseProject)
self.cfg.append('-o /LocalSite/ReleaseProject=%s' % self.pp.releaseProject)
if self.pp.gateway:
self.cfg.append('-W "%s"' % self.pp.gateway)
if self.pp.userGroup:
self.cfg.append('-o /AgentJobRequirements/OwnerGroup="%s"' % self.pp.userGroup)
if self.pp.userDN:
self.cfg.append('-o /AgentJobRequirements/OwnerDN="%s"' % self.pp.userDN)
self.cfg.append('-o /LocalSite/ReleaseVersion=%s' % self.pp.releaseVersion)
def _getSecurityCFG(self):
""" Nothing specific by default, but need to know host cert and key location in case they are needed
"""
if self.pp.useServerCertificate:
self.cfg.append('--UseServerCertificate')
self.cfg.append("-o /DIRAC/Security/CertFile=%s/hostcert.pem" % self.pp.certsLocation)
self.cfg.append("-o /DIRAC/Security/KeyFile=%s/hostkey.pem" % self.pp.certsLocation)
class CheckCECapabilities(CommandBase):
""" Used to get CE tags and other relevant parameters
"""
def __init__(self, pilotParams):
""" c'tor
"""
super(CheckCECapabilities, self).__init__(pilotParams)
# this variable contains the options that are passed to dirac-configure, and that will fill the local dirac.cfg file
self.cfg = []
def execute(self):
""" Main execution method
"""
if self.pp.useServerCertificate:
self.cfg.append('-o /DIRAC/Security/UseServerCertificate=yes')
if self.pp.localConfigFile:
self.cfg.append(self.pp.localConfigFile) # this file is as input
# Get the resource description as defined in its configuration
checkCmd = 'dirac-resource-get-parameters -S %s -N %s -Q %s %s' % (self.pp.site,
self.pp.ceName,
self.pp.queueName,
" ".join(self.cfg))
retCode, resourceDict = self.executeAndGetOutput(checkCmd, self.pp.installEnv)
if retCode:
self.log.error("Could not get resource parameters [ERROR %d]" % retCode)
self.exitWithError(retCode)
try:
import json
resourceDict = json.loads(resourceDict)
except ValueError:
self.log.error("The pilot command output is not json compatible.")
sys.exit(1)
self.pp.queueParameters = resourceDict
self.cfg = []
# Pick up all the relevant resource parameters that will be used in the job matching
if "WholeNode" in resourceDict:
self.pp.tags.append('WholeNode')
# If MaxNumberOfProcessors not defined check for NumberOfProcessors
if self.pp.maxNumberOfProcessors == 0:
self.pp.maxNumberOfProcessors = int(resourceDict.get("MaxNumberOfProcessors", resourceDict.get("NumberOfProcessors", 0)))
# Tags must be added to already defined tags if any
if resourceDict.get('Tag'):
self.pp.tags += resourceDict['Tag']
self.pp.tags = list(set(self.pp.tags))
if self.pp.tags:
self.cfg.append('-o "/Resources/Computing/CEDefaults/Tag=%s"' % ','.join((str(x) for x in self.pp.tags)))
# RequiredTags are similar to tags.
if resourceDict.get('RequiredTag'):
self.pp.reqtags += resourceDict['RequiredTag']
self.pp.reqtags = list(set(self.pp.reqtags))
if self.pp.reqtags:
self.cfg.append('-o "/Resources/Computing/CEDefaults/RequiredTag=%s"' %
','.join((str(x) for x in self.pp.reqtags)))
# LocalCE type for singularity
if resourceDict.get('Container') in ["Singularity", "singularity"]:
self.cfg.append('-o "/LocalSite/LocalCE=Singularity"')
# LocalCE for Container options
if resourceDict.get('ContainerBin'):
self.cfg.append('-o "/LocalSite/ContainerBin=%s"' % resourceDict['ContainerBin'])
if resourceDict.get('ContainerRoot'):
self.cfg.append('-o "/LocalSite/ContainerRoot=%s"' % resourceDict['ContainerRoot'])
if resourceDict.get('ContainerBind'):
self.cfg.append('-o "/LocalSite/ContainerBind=%s"' % resourceDict['ContainerBind'])
if resourceDict.get('ContainerOptions'):
self.cfg.append('-o "/LocalSite/ContainerOptions=%s"' % resourceDict['ContainerOptions'])
if resourceDict.get('ContainerExtraOpts'):
self.cfg.append('-o "/LocalSite/ContainerExtraOpts=%s"' % resourceDict['ContainerExtraOpts'])
# If there is anything to be added to the local configuration, let's do it
if self.cfg:
self.cfg.append('-FDMH')
if self.debugFlag:
self.cfg.append('-ddd')
if self.pp.localConfigFile:
self.cfg.append('-O %s' % self.pp.localConfigFile) # this file is as output
self.cfg.append(self.pp.localConfigFile) # this file is as input
configureCmd = "%s %s" % (self.pp.configureScript, " ".join(self.cfg))
retCode, _configureOutData = self.executeAndGetOutput(configureCmd, self.pp.installEnv)
if retCode:
self.log.error("Could not configure DIRAC [ERROR %d]" % retCode)
self.exitWithError(retCode)
else:
self.log.debug('No Tags defined for this Queue')
class CheckWNCapabilities(CommandBase):
""" Used to get capabilities specific to the Worker Node. This command must be called
after the CheckCECapabilities command
"""
def __init__(self, pilotParams):
""" c'tor
"""
super(CheckWNCapabilities, self).__init__(pilotParams)
self.cfg = []
def execute(self):
""" Discover NumberOfProcessors and RAM
"""
if self.pp.useServerCertificate:
self.cfg.append('-o /DIRAC/Security/UseServerCertificate=yes')
if self.pp.localConfigFile:
self.cfg.append(self.pp.localConfigFile) # this file is as input
# Get the worker node parameters
checkCmd = 'dirac-wms-get-wn-parameters -S %s -N %s -Q %s %s' % (self.pp.site,
self.pp.ceName,
self.pp.queueName,
" ".join(self.cfg))
retCode, result = self.executeAndGetOutput(checkCmd, self.pp.installEnv)
if retCode:
self.log.error( "Could not get resource parameters [ERROR %d]" % retCode )
self.exitWithError( retCode )
numberOfProcessors = 1
try:
result = result.split( ' ' )
numberOfProcessorsOnWN = int( result[0] )
maxRAM = int( result[1] )
except ValueError:
self.log.error("Wrong Command output %s" % result)
sys.exit(1)
self.cfg = []
# If NumberOfProcessors or MaxRAM are defined in the resource configuration, these
# values are preferred
if "WholeNode" in self.pp.tags:
numberOfProcessors = numberOfProcessorsOnWN
if self.pp.maxNumberOfProcessors > 0:
numberOfProcessors = min(numberOfProcessorsOnWN, self.pp.maxNumberOfProcessors)
if not numberOfProcessors:
self.log.warn("Could not retrieve number of processors, assuming 1")
numberOfProcessors = 1
self.cfg.append(
'-o "/Resources/Computing/CEDefaults/NumberOfProcessors=%d"' % int(numberOfProcessors))
maxRAM = self.pp.queueParameters.get('MaxRAM', maxRAM)
if maxRAM:
try:
self.cfg.append(
'-o "/Resources/Computing/CEDefaults/MaxRAM=%d"' % int(maxRAM))
except ValueError:
self.log.warn("MaxRAM is not an integer, will not fill it")
else:
self.log.warn(
"Could not retrieve MaxRAM, this parameter won't be filled")
if self.cfg:
self.cfg.append('-FDMH')
if self.debugFlag:
self.cfg.append('-ddd')
if self.pp.localConfigFile:
self.cfg.append('-O %s' % self.pp.localConfigFile) # this file is as output
self.cfg.append(self.pp.localConfigFile) # this file is as input
configureCmd = "%s %s" % (self.pp.configureScript, " ".join(self.cfg))
retCode, _configureOutData = self.executeAndGetOutput(configureCmd, self.pp.installEnv)
if retCode:
self.log.error("Could not configure DIRAC [ERROR %d]" % retCode)
self.exitWithError(retCode)
class ConfigureSite(CommandBase):
""" Command to configure DIRAC sites using the pilot options
"""
def __init__(self, pilotParams):
""" c'tor
"""
super(ConfigureSite, self).__init__(pilotParams)
# this variable contains the options that are passed to dirac-configure, and that will fill the local dirac.cfg file
self.cfg = []
self.boincUserID = ''
self.boincHostID = ''
self.boincHostPlatform = ''
self.boincHostName = ''
def execute(self):
""" Setup configuration parameters
"""
self.__setFlavour()
self.cfg.append('-o /LocalSite/GridMiddleware=%s' % self.pp.flavour)
self.cfg.append('-n "%s"' % self.pp.site)
self.cfg.append('-S "%s"' % self.pp.setup)
if not self.pp.ceName or not self.pp.queueName:
self.__getCEName()
self.cfg.append('-N "%s"' % self.pp.ceName)
self.cfg.append('-o /LocalSite/GridCE=%s' % self.pp.ceName)
self.cfg.append('-o /LocalSite/CEQueue=%s' % self.pp.queueName)
if self.pp.ceType:
self.cfg.append('-o /LocalSite/LocalCE=%s' % self.pp.ceType)
for o, v in self.pp.optList:
if o == '-o' or o == '--option':
self.cfg.append( '-o "%s"' % v )
if self.pp.pilotReference != 'Unknown':
self.cfg.append('-o /LocalSite/PilotReference=%s' % self.pp.pilotReference)
# add options for BOINc
# FIXME: this should not be part of the standard configuration
if self.boincUserID:
self.cfg.append('-o /LocalSite/BoincUserID=%s' % self.boincUserID)
if self.boincHostID:
self.cfg.append('-o /LocalSite/BoincHostID=%s' % self.boincHostID)
if self.boincHostPlatform:
self.cfg.append('-o /LocalSite/BoincHostPlatform=%s' % self.boincHostPlatform)
if self.boincHostName:
self.cfg.append('-o /LocalSite/BoincHostName=%s' % self.boincHostName)
if self.pp.useServerCertificate:
self.cfg.append('--UseServerCertificate')
self.cfg.append("-o /DIRAC/Security/CertFile=%s/hostcert.pem" % self.pp.certsLocation)
self.cfg.append("-o /DIRAC/Security/KeyFile=%s/hostkey.pem" % self.pp.certsLocation)
# these are needed as this is not the first time we call dirac-configure
self.cfg.append('-FDMH')
if self.pp.localConfigFile:
self.cfg.append('-O %s' % self.pp.localConfigFile)
self.cfg.append(self.pp.localConfigFile)
if self.debugFlag:
self.cfg.append('-ddd')
configureCmd = "%s %s" % (self.pp.configureScript, " ".join(self.cfg))
retCode, _configureOutData = self.executeAndGetOutput(configureCmd, self.pp.installEnv)
if retCode:
self.log.error("Could not configure DIRAC [ERROR %d]" % retCode)
self.exitWithError(retCode)
def __setFlavour(self):
pilotRef = 'Unknown'
# Pilot reference is specified at submission
if self.pp.pilotReference:
self.pp.flavour = 'DIRAC'
pilotRef = self.pp.pilotReference
# Take the reference from the Torque batch system
if 'PBS_JOBID' in os.environ:
self.pp.flavour = 'SSHTorque'
pilotRef = 'sshtorque://' + self.pp.ceName + '/' + os.environ['PBS_JOBID'].split('.')[0]
# Take the reference from the OAR batch system
if 'OAR_JOBID' in os.environ:
self.pp.flavour = 'SSHOAR'
pilotRef = 'sshoar://' + self.pp.ceName + '/' + os.environ['OAR_JOBID']
# Grid Engine
if 'JOB_ID' in os.environ and 'SGE_TASK_ID' in os.environ:
self.pp.flavour = 'SSHGE'
pilotRef = 'sshge://' + self.pp.ceName + '/' + os.environ['JOB_ID']
# Generic JOB_ID
elif 'JOB_ID' in os.environ:
self.pp.flavour = 'Generic'
pilotRef = 'generic://' + self.pp.ceName + '/' + os.environ['JOB_ID']
# Condor
if 'CONDOR_JOBID' in os.environ:
self.pp.flavour = 'SSHCondor'
pilotRef = 'sshcondor://' + self.pp.ceName + '/' + os.environ['CONDOR_JOBID']
# HTCondor
if 'HTCONDOR_JOBID' in os.environ:
self.pp.flavour = 'HTCondorCE'
pilotRef = 'htcondorce://' + self.pp.ceName + '/' + os.environ['HTCONDOR_JOBID']
# LSF
if 'LSB_BATCH_JID' in os.environ:
self.pp.flavour = 'SSHLSF'
pilotRef = 'sshlsf://' + self.pp.ceName + '/' + os.environ['LSB_BATCH_JID']
# SLURM batch system
if 'SLURM_JOBID' in os.environ:
self.pp.flavour = 'SSHSLURM'
pilotRef = 'sshslurm://' + self.pp.ceName + '/' + os.environ['SLURM_JOBID']
# This is the CREAM direct submission case
if 'CREAM_JOBID' in os.environ:
self.pp.flavour = 'CREAM'
pilotRef = os.environ['CREAM_JOBID']
if 'OSG_WN_TMP' in os.environ:
self.pp.flavour = 'OSG'
# GLOBUS Computing Elements
if 'GLOBUS_GRAM_JOB_CONTACT' in os.environ:
self.pp.flavour = 'GLOBUS'
pilotRef = os.environ['GLOBUS_GRAM_JOB_CONTACT']
# Direct SSH tunnel submission
if 'SSHCE_JOBID' in os.environ:
self.pp.flavour = 'SSH'
pilotRef = 'ssh://' + self.pp.ceName + '/' + os.environ['SSHCE_JOBID']
# ARC case
if 'GRID_GLOBAL_JOBID' in os.environ:
self.pp.flavour = 'ARC'
pilotRef = os.environ['GRID_GLOBAL_JOBID']
# VMDIRAC case
if 'VMDIRAC_VERSION' in os.environ:
self.pp.flavour = 'VMDIRAC'
pilotRef = 'vm://' + self.pp.ceName + '/' + os.environ['JOB_ID']
# This is for BOINC case
if 'BOINC_JOB_ID' in os.environ:
self.pp.flavour = 'BOINC'
pilotRef = os.environ['BOINC_JOB_ID']
if self.pp.flavour == 'BOINC':
if 'BOINC_USER_ID' in os.environ:
self.boincUserID = os.environ['BOINC_USER_ID']
if 'BOINC_HOST_ID' in os.environ:
self.boincHostID = os.environ['BOINC_HOST_ID']
if 'BOINC_HOST_PLATFORM' in os.environ:
self.boincHostPlatform = os.environ['BOINC_HOST_PLATFORM']
if 'BOINC_HOST_NAME' in os.environ:
self.boincHostName = os.environ['BOINC_HOST_NAME']
self.log.debug("Flavour: %s; pilot reference: %s " % (self.pp.flavour, pilotRef))
self.pp.pilotReference = pilotRef
def __getCEName(self):
""" Try to get the CE name
"""
# FIXME: this should not be part of the standard configuration (flavours discriminations should stay out)
if self.pp.flavour in ['LCG', 'OSG']:
retCode, CEName = self.executeAndGetOutput('glite-brokerinfo getCE',
self.pp.installEnv)
if retCode:
self.log.warn("Could not get CE name with 'glite-brokerinfo getCE' command [ERROR %d]" % retCode)
if 'OSG_JOB_CONTACT' in os.environ:
# OSG_JOB_CONTACT String specifying the endpoint to use within the job submission
# for reaching the site (e.g. manager.mycluster.edu/jobmanager-pbs )
CE = os.environ['OSG_JOB_CONTACT']
self.pp.ceName = CE.split('/')[0]
if len(CE.split('/')) > 1:
self.pp.queueName = CE.split('/')[1]
else:
self.log.error("CE Name %s not accepted" % CE)
self.exitWithError(retCode)
else:
self.log.error("Can't find ceName nor queue... have to fail!")
sys.exit(1)
else:
self.log.debug("Found CE %s" % CEName)
self.pp.ceName = CEName.split(':')[0]
if len(CEName.split('/')) > 1:
self.pp.queueName = CEName.split('/')[1]
# configureOpts.append( '-N "%s"' % cliParams.ceName )
elif self.pp.flavour == "CREAM":
if 'CE_ID' in os.environ:
self.log.debug("Found CE %s" % os.environ['CE_ID'])
self.pp.ceName = os.environ['CE_ID'].split(':')[0]
if os.environ['CE_ID'].count("/"):
self.pp.queueName = os.environ['CE_ID'].split('/')[1]
else:
self.log.error("Can't find queue name")
sys.exit(1)
else:
self.log.error("Can't find CE name")
sys.exit(1)
class ConfigureArchitecture(CommandBase):
""" This command simply calls dirac-platfom to determine the platform.
Separated from the ConfigureDIRAC command for easier extensibility.
"""
def execute(self):
""" This is a simple command to call the dirac-platform utility to get the platform, and add it to the configuration
The architecture script, as well as its options can be replaced in a pilot extension
"""
cfg = []
if self.pp.useServerCertificate:
cfg.append('-o /DIRAC/Security/UseServerCertificate=yes')
if self.pp.localConfigFile:
cfg.append(self.pp.localConfigFile) # this file is as input
architectureCmd = "%s %s" % (self.pp.architectureScript, " ".join(cfg))
retCode, localArchitecture = self.executeAndGetOutput(architectureCmd, self.pp.installEnv)
if retCode:
self.log.error("There was an error updating the platform [ERROR %d]" % retCode)
self.exitWithError(retCode)
self.log.debug("Architecture determined: %s" % localArchitecture)
# standard options
cfg = ['-FDMH'] # force update, skip CA checks, skip CA download, skip VOMS
if self.pp.useServerCertificate:
cfg.append('--UseServerCertificate')
if self.pp.localConfigFile:
cfg.append('-O %s' % self.pp.localConfigFile) # our target file for pilots
cfg.append(self.pp.localConfigFile) # this file is also an input
if self.pp.debugFlag:
cfg.append("-ddd")
# real options added here
localArchitecture = localArchitecture.strip()
cfg.append('-S "%s"' % self.pp.setup)
cfg.append('-o /LocalSite/Architecture=%s' % localArchitecture)
configureCmd = "%s %s" % (self.pp.configureScript, " ".join(cfg))
retCode, _configureOutData = self.executeAndGetOutput(configureCmd, self.pp.installEnv)
if retCode:
self.log.error("Configuration error [ERROR %d]" % retCode)
self.exitWithError(retCode)
return localArchitecture
class ConfigureCPURequirements(CommandBase):
""" This command determines the CPU requirements. Needs to be executed after ConfigureSite
"""
def __init__(self, pilotParams):
""" c'tor
"""
super(ConfigureCPURequirements, self).__init__(pilotParams)
def execute(self):
""" Get job CPU requirement and queue normalization
"""
# Determining the CPU normalization factor and updating pilot.cfg with it
configFileArg = ''
if self.pp.useServerCertificate:
configFileArg = '-o /DIRAC/Security/UseServerCertificate=yes'
if self.pp.localConfigFile:
configFileArg = '%s -R %s %s' % (configFileArg, self.pp.localConfigFile, self.pp.localConfigFile)
retCode, cpuNormalizationFactorOutput = self.executeAndGetOutput(
'dirac-wms-cpu-normalization -U %s' % configFileArg, self.pp.installEnv)
if retCode:
self.log.error("Failed to determine cpu normalization [ERROR %d]" % retCode)
self.exitWithError(retCode)
# HS06 benchmark
# FIXME: this is a (necessary) hack!
cpuNormalizationFactor = float(cpuNormalizationFactorOutput.split('\n')[0].replace("Estimated CPU power is ",
'').replace(" HS06", ''))
self.log.info(
"Current normalized CPU as determined by 'dirac-wms-cpu-normalization' is %f" %
cpuNormalizationFactor)
configFileArg = ''
if self.pp.useServerCertificate:
configFileArg = '-o /DIRAC/Security/UseServerCertificate=yes'
retCode, cpuTimeOutput = self.executeAndGetOutput('dirac-wms-get-queue-cpu-time %s %s' % (configFileArg,
self.pp.localConfigFile),
self.pp.installEnv)
if retCode:
self.log.error("Failed to determine cpu time left in the queue [ERROR %d]" % retCode)
self.exitWithError(retCode)
for line in cpuTimeOutput.split('\n'):
if "CPU time left determined as" in line:
# FIXME: this is horrible
cpuTime = int(line.replace("CPU time left determined as", '').strip())
self.log.info("CPUTime left (in seconds) is %s" % cpuTime)
# HS06s = seconds * HS06
try:
# determining the CPU time left (in HS06s)
self.pp.jobCPUReq = float(cpuTime) * float(cpuNormalizationFactor)
self.log.info("Queue length (which is also set as CPUTimeLeft) is %f" % self.pp.jobCPUReq)
except ValueError:
self.log.error('Pilot command output does not have the correct format')
sys.exit(1)
# now setting this value in local file
cfg = ['-FDMH']
if self.pp.useServerCertificate:
cfg.append('-o /DIRAC/Security/UseServerCertificate=yes')
if self.pp.localConfigFile:
cfg.append('-O %s' % self.pp.localConfigFile) # our target file for pilots
cfg.append(self.pp.localConfigFile) # this file is also input
cfg.append('-o /LocalSite/CPUTimeLeft=%s' % str(int(self.pp.jobCPUReq))) # the only real option
configureCmd = "%s %s" % (self.pp.configureScript, " ".join(cfg))
retCode, _configureOutData = self.executeAndGetOutput(configureCmd, self.pp.installEnv)
if retCode:
self.log.error("Failed to update CFG file for CPUTimeLeft [ERROR %d]" % retCode)
self.exitWithError(retCode)
class LaunchAgent(CommandBase):
""" Prepare and launch the job agent
"""
def __init__(self, pilotParams):
""" c'tor
"""
super(LaunchAgent, self).__init__(pilotParams)
self.inProcessOpts = []
self.jobAgentOpts = []
def __setInProcessOpts(self):
localUid = os.getuid()
try:
import pwd
localUser = pwd.getpwuid(localUid)[0]
except KeyError:
localUser = 'Unknown'
self.log.info('User Name = %s' % localUser)
self.log.info('User Id = %s' % localUid)
self.inProcessOpts = ['-s /Resources/Computing/CEDefaults']
self.inProcessOpts.append('-o WorkingDirectory=%s' % self.pp.workingDir)
self.inProcessOpts.append('-o /LocalSite/MaxCPUTime=%s' % (int(self.pp.jobCPUReq)))
self.inProcessOpts.append('-o /LocalSite/CPUTime=%s' % (int(self.pp.jobCPUReq)))
# To prevent a wayward agent picking up and failing many jobs.
self.inProcessOpts.append('-o MaxTotalJobs=%s' % 10)
self.jobAgentOpts = ['-o MaxCycles=%s' % self.pp.maxCycles]
if self.debugFlag:
self.jobAgentOpts.append('-o LogLevel=DEBUG')
if self.pp.userGroup:
self.log.debug('Setting DIRAC Group to "%s"' % self.pp.userGroup)
self.inProcessOpts .append('-o OwnerGroup="%s"' % self.pp.userGroup)
if self.pp.userDN:
self.log.debug('Setting Owner DN to "%s"' % self.pp.userDN)
self.inProcessOpts.append('-o OwnerDN="%s"' % self.pp.userDN)
if self.pp.useServerCertificate:
self.log.debug('Setting UseServerCertificate flag')
self.inProcessOpts.append('-o /DIRAC/Security/UseServerCertificate=yes')
# The instancePath is where the agent works
self.inProcessOpts.append('-o /LocalSite/InstancePath=%s' % self.pp.workingDir)
# The file pilot.cfg has to be created previously by ConfigureDIRAC
if self.pp.localConfigFile:
self.inProcessOpts.append(' -o /AgentJobRequirements/ExtraOptions=%s' % self.pp.localConfigFile)
self.inProcessOpts.append(self.pp.localConfigFile)
def __startJobAgent(self):
""" Starting of the JobAgent
"""
# Find any .cfg file uploaded with the sandbox or generated by previous commands
diracAgentScript = "dirac-agent"
extraCFG = []
for i in os.listdir(self.pp.rootPath):
cfg = os.path.join(self.pp.rootPath, i)
if os.path.isfile(cfg) and cfg.endswith('.cfg'):
extraCFG.append(cfg)
if self.pp.executeCmd:
# Execute user command
self.log.info("Executing user defined command: %s" % self.pp.executeCmd)
self.exitWithError(os.system("source bashrc; %s" % self.pp.executeCmd) / 256)
self.log.info('Starting JobAgent')
os.environ['PYTHONUNBUFFERED'] = 'yes'
jobAgent = '%s WorkloadManagement/JobAgent %s %s %s' % (diracAgentScript,
" ".join(self.jobAgentOpts),
" ".join(self.inProcessOpts),
" ".join(extraCFG))
retCode, _output = self.executeAndGetOutput(jobAgent, self.pp.installEnv)
if retCode:
self.log.error("Error executing the JobAgent [ERROR %d]" % retCode)
self.exitWithError(retCode)
fs = os.statvfs(self.pp.workingDir)
diskSpace = fs[4] * fs[0] / 1024 / 1024
self.log.info('DiskSpace (MB) = %s' % diskSpace)
def execute(self):
""" What is called all the time
"""
self.__setInProcessOpts()
self.__startJobAgent()
sys.exit(0)
|
arrabito/DIRAC
|
WorkloadManagementSystem/PilotAgent/pilotCommands.py
|
Python
|
gpl-3.0
| 38,374
|
[
"DIRAC"
] |
87e76412090fb2fa6cd842ee60fdccd1c9373c1a948dc9088d6c23a1556b044a
|
import pysam
import argparse
import sys
import logging
from collections import OrderedDict
DEBUG = True
NOT_DEBUG= not DEBUG
parser = argparse.ArgumentParser(description="Get read count in chromosomes",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input BAM file', required=NOT_DEBUG)
parser.add_argument('-o', '--output', action='store', nargs='?', help="Output summary file", required=NOT_DEBUG)
args = parser.parse_args()
if DEBUG:
args.input="/scratch/jbrown_lab/shengq2/projects/20201208_chipseq_485_886_hg38/bowtie2_cleanbam/result/No_Treatment_886.noChrM.bam"
args.output=args.input + ".chr_reads"
logger = logging.getLogger('bamReference')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
with pysam.Samfile(args.input, "rb") as sam:
chr_map = OrderedDict()
processed = 0
for read in sam.fetch(until_eof=True):
processed += 1
if processed % 100000 == 0:
logger.info(f"processed {processed}")
if read.is_unmapped:
continue
if read.reference_name in chr_map:
chr_map[read.reference_name] += 1
else:
chr_map[read.reference_name] = 1
with open(args.output, "wt") as fout:
fout.write("Chromosome\tCount\n")
for chr in chr_map.keys():
fout.write("%s\t%d\n" % (chr, chr_map[chr]))
logger.info("done")
|
shengqh/ngsperl
|
lib/Alignment/bamReference.py
|
Python
|
apache-2.0
| 1,447
|
[
"pysam"
] |
f91310f73a4ede164087d31d2791afb3bfd9666e854c3b7b8cae8fa2e157270a
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Name: update_cfg_file.py
# Purpose: Module to manipulate distro specific and main config files.
# Authors: Sundar
# Licence: This file is a part of multibootusb package. You can redistribute it or modify
# under the terms of GNU General Public License, v.2 or above
import os
import re
import shutil
from functools import partial
from .usb import *
from .gen import *
from .iso import *
from . import config
from . import grub
from . import menus
from .param_rewrite import add_tokens, remove_tokens, replace_token, \
add_or_replace_kv, replace_kv, remove_keys, \
always, contains_token, contains_all_tokens, contains_any_token, \
contains_key, contains_all_keys, contains_any_key, starter_is_either, _not
def dont_require_tweaking(fname, content, match_start, match_end):
# Avoid fixing a path on a comment line
beginning_of_line = content.rfind('\n', 0, match_start)
if beginning_of_line<0:
beginning_of_line = 0
if content[beginning_of_line:match_start].lstrip()[:1]=='#':
return True
if fname.startswith(('cdrom/', 'dev/')):
return True
if (4 <= match_start and # Don't write an arg of 'init=' param.
content[match_start-4:match_start+1] == 'init='):
return True
def fix_abspath_r(pattern, string, install_dir, iso_name, kept_paths):
"""Return a list of tuples consisting of 'string' with replaced path and a bool representing if /boot/ was prepended in the expression."""
m = pattern.search(string)
if not m:
return [(string, False)]
start, end = m.span()
prologue, specified_path = m.group(1), m.group(2)
if dont_require_tweaking(specified_path, string, start, end):
return [(string[:start] + prologue + '/' + specified_path,
'/%s is kept as is.' % specified_path)] \
+ fix_abspath_r(pattern, string[end:], install_dir, iso_name,
kept_paths)
# See if a path that has 'boot/' prepended is a better choice.
# E.g. Debian debian-live-9.4.0-amd64-cinnamon has a loopback.cfg
# which contains "source /grub/grub.cfg".
specified_path_exists = os.path.exists(
os.path.join(install_dir, specified_path))
if specified_path_exists:
# Confidently accept what is specified.
selected_path, fixed = specified_path, False
elif os.path.exists(os.path.join(install_dir, 'boot', specified_path)):
selected_path, fixed = ('boot/' + specified_path,
"Prepended '/boot/' to %s" % specified_path)
# A path specified by 'preseed/file=' or 'file=' is utilized
# after OS boots up. Doing this for grub is moot.
#elif specified_path.startswith('cdrom/') and \
# os.path.exists(os.path.join(install_dir, # len('cdrom/') => 6
# specified_path[6:])):
# # See /boot/grub/loopback.cfg in
# # ubuntu-14.04.5-desktop-amd64.iso for an example of this case.
# selected_path, fixed = specified_path[6:], "Removed '/cdrom/'"
elif specified_path.endswith('.efi') and \
os.path.exists(os.path.join(install_dir, specified_path[:-4])):
# Avira-RS provides boot/grub/loopback.cfg which points
# to non-existent /boot/grub/vmlinuz.efi.
selected_path, fixed = (specified_path[:-4],
"Removed '.efi' from %s" % specified_path)
else:
# Reluctantly accept what is specified.
if specified_path not in kept_paths:
kept_paths.append(specified_path)
selected_path, fixed = specified_path, False
out = string[:start] + prologue + '/multibootusb/' + iso_name + '/' \
+ selected_path.replace('\\', '/')
return [(out, fixed)] \
+ fix_abspath_r(pattern, string[end:], install_dir, iso_name,
kept_paths)
def fix_abspath(string, install_dir, iso_name, config_fname):
"""Rewrite what appear to be a path within 'string'. If a file does not exist with specified path, one with '/boot' prepended is tried."""
path_expression = re.compile(r'([ \t=,])/(.*?)((?=[,|\s*])|$)')
kept_paths = []
chunks = fix_abspath_r(
path_expression, string, install_dir, iso_name, kept_paths)
if len(kept_paths)==1:
log("In '%s', '/%s' is kept as is though it does not exist."
% (config_fname, kept_paths[0]))
elif 2<=len(kept_paths):
log("In '%s', "
"following paths are used as they are though they don't exist."
% config_fname)
for kept_path in kept_paths:
log(' /' + kept_path)
tweaked_chunks = [c for c in chunks if c[1]]
if len(tweaked_chunks) == 0:
# Fallback to the legacy implementation so that
# this tweak brings as little breakage as possible.
replace_text = r'\1/multibootusb/' + iso_name + '/'
return re.sub(r'([ \t =,])/', replace_text, string)
else:
log("Applied %s on '%s' as shown below:" %
(len(tweaked_chunks)==1 and 'a rewrite exception' or
('%d rewrite exceptions' % len(tweaked_chunks)), config_fname))
count_dict = {}
for path, op_desc in tweaked_chunks:
count_dict.setdefault(op_desc, []).append((path,op_desc))
for op_desc, sub_chunks in count_dict.items():
log(" %s [%d]" % (op_desc, len(sub_chunks)))
return ''.join([c[0] for c in chunks])
def update_distro_cfg_files(iso_link, usb_disk, distro, persistence=0):
"""
Main function to modify/update distro specific strings on distro config files.
:return:
"""
try:
usb_details = details(config.usb_disk)
except PartitionNotMounted as e:
log(str(e))
return
usb_mount = usb_details['mount_point']
usb_uuid = usb_details['uuid']
usb_label = usb_details['label']
usb_fs_type = usb_details['file_system']
# iso_cfg_ext_dir = os.path.join(multibootusb_host_dir(), "iso_cfg_ext_dir")
config.status_text = "Updating config files..."
_iso_name = iso_basename(iso_link)
install_dir = os.path.join(usb_mount, "multibootusb", _iso_name)
install_dir_for_grub = '/multibootusb/%s' % _iso_name
log('Updating distro specific config files...')
tweaker_params = ConfigTweakerParam(
iso_link, install_dir_for_grub,
persistence, usb_uuid, usb_mount, usb_disk, usb_fs_type)
tweaker_class_dict = {
'ubuntu' : UbuntuConfigTweaker,
'debian' : DebianConfigTweaker,
'debian-install' : DebianConfigTweaker,
'gentoo' : GentooConfigTweaker,
'centos' : FedoraConfigTweaker,
'centos-install' : FedoraConfigTweaker,
'fedora' : FedoraConfigTweaker,
'antix' : AntixConfigTweaker,
'salix-live' : SalixConfigTweaker,
'wifislax' : WifislaxConfigTweaker,
}
tweaker_class = tweaker_class_dict.get(distro)
for dirpath, dirnames, filenames in os.walk(install_dir):
for f in filenames:
if f.endswith(".cfg") or f.endswith('.CFG') or f.endswith('.lst') or f.endswith('.conf'):
cfg_file = os.path.join(dirpath, f)
try:
string = open(cfg_file, errors='ignore').read()
except IOError:
log("Unable to read %s" % cfg_file)
else:
if not distro == "generic":
string = fix_abspath(string, install_dir, _iso_name,
os.path.join(dirpath, f))
string = re.sub(r'linuxefi', 'linux', string)
string = re.sub(r'initrdefi', 'initrd', string)
if tweaker_class:
tweaker = tweaker_class(distro, tweaker_params)
string = tweaker.tweak(string)
elif distro == 'grml':
string = re.sub(r'live-media-path=', 'ignore_bootid live-media-path=', string)
elif distro == "ubuntu-server":
string = re.sub(r'file',
'cdrom-detect/try-usb=true floppy.allowed_drive_mask=0 ignore_uuid ignore_bootid root=UUID=' +
usb_uuid + ' file', string)
elif distro == 'kaspersky':
if not os.path.exists(os.path.join(usb_mount, 'multibootusb', iso_basename(iso_link), 'kaspersky.cfg')):
shutil.copyfile(resource_path(os.path.join('data', 'multibootusb', 'syslinux.cfg')),
os.path.join(usb_mount, 'multibootusb', iso_basename(iso_link), 'kaspersky.cfg'))
config_string = kaspersky_config('kaspersky')
config_string = config_string.replace('$INSTALL_DIR', '/multibootusb/' + iso_basename(iso_link))
config_string = re.sub(r'root=live:UUID=', 'root=live:UUID=' + usb_uuid, config_string)
with open(os.path.join(usb_mount, 'multibootusb', iso_basename(iso_link), 'kaspersky.cfg'), "a") as f:
f.write(config_string)
elif distro == "parted-magic":
if re.search(r'append', string, re.I):
string = re.sub(r'append', 'append directory=/multibootusb/' + iso_basename(iso_link), string,
flags=re.I)
string = re.sub(r'initrd=', 'directory=/multibootusb/' + iso_basename(iso_link) + '/ initrd=',
string)
string = re.sub(r'linux_64=\"', 'linux_64=\"/multibootusb/' + iso_basename(iso_link), string,
flags=re.I)
string = re.sub(r'linux_32=\"', 'linux_32=\"/multibootusb/' + iso_basename(iso_link), string,
flags=re.I)
string = re.sub(r'initrd_img=\"', 'initrd_img=\"/multibootusb/' + iso_basename(iso_link), string,
flags=re.I)
string = re.sub(r'initrd_img32=\"', 'initrd_img32=\"/multibootusb/' + iso_basename(iso_link), string,
flags=re.I)
string = re.sub(r'default_settings=\"', 'default_settings=\"directory=/multibootusb/' + iso_basename(iso_link) + ' ', string,
flags=re.I)
string = re.sub(r'live_settings=\"', 'live_settings=\"directory=/multibootusb/' + iso_basename(iso_link) + ' ', string,
flags=re.I)
elif distro == "ubcd":
string = re.sub(r'iso_filename=\S*', 'directory=/multibootusb/' + iso_basename(iso_link),
string, flags=re.I)
elif distro == 'f4ubcd':
if not 'multibootusb' in string:
string = re.sub(r'/HBCD', '/multibootusb/' + iso_basename(iso_link) + '/HBCD', string)
if not 'multibootusb' in string:
string = re.sub(r'/F4UBCD', '/multibootusb/' + iso_basename(iso_link) + '/F4UBCD', string)
elif distro == "ipcop":
string = re.sub(r'ipcopboot=cdrom\S*', 'ipcopboot=usb', string)
elif distro == "puppy":
if 'pmedia=cd' in string:
string = re.sub(r'pmedia=cd\S*',
'pmedia=usbflash psubok=TRUE psubdir=/multibootusb/' + iso_basename(iso_link) + '/',
string)
elif 'rootfstype' in string:
string = re.sub(r'rootfstype',
'pmedia=usbflash psubok=TRUE psubdir=/multibootusb/' + iso_basename(iso_link) + '/ rootfstype',
string)
elif distro == "slax":
string = re.sub(r'initrd=',
r'from=/multibootusb/' + iso_basename(iso_link) + '/slax changes=/multibootusb/' + iso_basename(iso_link) + '/slax fromusb initrd=', string)
elif distro == "finnix":
string = re.sub(r'initrd=',
r'finnixdir=/multibootusb/' + iso_basename(iso_link) + '/finnix initrd=', string)
elif distro == "knoppix":
string = re.sub(r'initrd=', 'knoppix_dir=/multibootusb/' + iso_basename(iso_link) + '/KNOPPIX initrd=', string)
elif distro == "systemrescuecd":
rows = []
subdir = '/multibootusb/' + iso_basename(iso_link)
for line in string.splitlines(True):
addline = True
if re.match(r'append.*--.*', line, flags=re.I):
line = re.sub(r'(append)(.*)--(.*)', r'\1\2subdir=' + subdir + r' --\3 subdir=' + subdir,
line, flags=re.I)
elif re.match(r'append', line, flags=re.I):
line = re.sub(r'(append)', r'\1 subdir=' + subdir, line, flags=re.I)
elif re.match(r'label rescue(32|64)_1', line, flags=re.I):
rows.append(line)
rows.append('append subdir=%s\n' % (subdir,))
addline = False
if addline:
rows.append(line)
string = ''.join(rows)
elif distro in ["arch", "chakra"]:
string = re.sub(r'isolabel=\S*',
'isodevice=/dev/disk/by-uuid/' + usb_uuid, string, flags=re.I)
string = re.sub(r'isobasedir=',
'isobasedir=/multibootusb/' + iso_basename(iso_link) + '/', string, flags=re.I)
string = commentout_gfxboot(string)
string = string.replace('%INSTALL_DIR%', 'arch')
if 'manjaro' in string:
if not os.path.exists(os.path.join(usb_mount, '.miso')):
with open(os.path.join(usb_mount, '.miso'), "w") as f:
f.write('')
elif distro == "kaos":
string = re.sub(r'kdeosisolabel=\S*',
'kdeosisodevice=/dev/disk/by-uuid/' + usb_uuid, string, flags=re.I)
string = re.sub(r'append',
'append kdeosisobasedir=/multibootusb/' + iso_basename(iso_link) + '/kdeos/', string, flags=re.I)
string = commentout_gfxboot(string)
elif distro in ["suse", "opensuse"]:
if re.search(r'opensuse_12', string, re.I):
string = re.sub(r'append',
'append loader=syslinux isofrom_system=/dev/disk/by-uuid/' + usb_uuid + ":/" +
iso_name(iso_link), string, flags=re.I)
else:
string = re.sub(r'append',
'append loader=syslinux isofrom_device=/dev/disk/by-uuid/' + usb_uuid +
' isofrom_system=/multibootusb/' + iso_basename(iso_link) + '/' + iso_name(iso_link),
string, flags=re.I)
elif distro == 'opensuse-install':
string = re.sub(r'splash=silent', 'splash=silent install=hd:/dev/disk/by-uuid/'
+ config.usb_uuid + '/multibootusb/' + iso_basename(iso_link), string)
elif distro == "pclinuxos":
string = re.sub(r'livecd=',
'fromusb livecd=' + '/multibootusb/' + iso_basename(iso_link) + '/',
string)
string = re.sub(r'prompt', '#prompt', string)
string = commentout_gfxboot(string)
string = re.sub(r'timeout', '#timeout', string)
elif distro == "wifislax":
string = re.sub(r'vmlinuz',
'vmlinuz from=multibootusb/' + iso_basename(iso_link) + ' noauto', string)
string = re.sub(r'vmlinuz2',
'vmlinuz2 from=multibootusb/' + iso_basename(iso_link) + ' noauto', string)
elif distro == "porteus":
string = re.sub(r'APPEND',
'APPEND from=/multibootusb/' + iso_basename(iso_link) + ' noauto', string)
string = re.sub(r'vmlinuz2',
'vmlinuz2 from=multibootusb/' + iso_basename(iso_link) + ' noauto', string)
elif distro == "hbcd":
if not 'multibootusb' in string:
string = re.sub(r'/HBCD', '/multibootusb/' + iso_basename(iso_link) + '/HBCD', string)
elif distro == "zenwalk":
string = re.sub(r'initrd=',
'from=/multibootusb/' + iso_basename(iso_link) + '/' + iso_name(iso_link) + ' initrd=',
string)
elif distro == "mageialive":
string = re.sub(r'LABEL=\S*', 'UUID=' + usb_uuid + ' mgalive.basedir=/multibootusb/' + iso_basename(iso_link),
string)
elif distro == "solydx":
string = re.sub(r'live-media-path=', 'live-media-path=/multibootusb/' + iso_basename(iso_link),
string)
elif distro == 'alt-linux':
string = re.sub(r':cdrom', ':disk', string)
elif distro == 'fsecure':
string = re.sub(r'APPEND ramdisk_size', 'APPEND noprompt ' + 'knoppix_dir=/multibootusb/' + iso_basename(iso_link)
+ '/KNOPPIX ramdisk_size', string)
elif distro == 'alpine':
string = re.sub(r'modules', 'alpine_dev=usbdisk:vfat modules', string)
elif config.distro == 'trinity-rescue':
# USB disk must have volume label to work properly
string = re.sub(r'initrd=', 'vollabel=' + config.usb_label + ' initrd=', string)
string = re.sub(r'root=\S*', 'root=/dev/ram0', string, flags=re.I)
config_file = open(cfg_file, "w")
config_file.write(string)
config_file.close()
update_mbusb_cfg_file(iso_link, usb_uuid, usb_mount, distro)
grub.mbusb_update_grub_cfg()
# copy isolinux.cfg file to syslinux.cfg for grub to boot.
def copy_to_syslinux_cfg_callback(dir_, fname):
if not fname.lower().endswith('isolinux.cfg'):
return
isolinux_cfg_path = os.path.join(dir_, fname)
syslinux_cfg_fname = fname.lower().replace('isolinux.cfg','syslinux.cfg')
syslinux_cfg_path = os.path.join(dir_, syslinux_cfg_fname)
if os.path.exists(syslinux_cfg_path):
return # don't overwrite.
try:
shutil.copyfile(isolinux_cfg_path, syslinux_cfg_path)
except Exception as e:
log('Copying %s %s to %s failed...' % (
fname, dir_, syslinux_cfg_fname))
log(e)
def fix_desktop_image_in_thema_callback(install_dir_for_grub,
dir_, fname):
if not fname.lower().endswith('.txt'):
return
theme_file = os.path.join(dir_, fname)
updated = False
with open(theme_file, 'r', encoding='utf-8') as f:
pattern = re.compile(r'^desktop-image\s*:\s*(.*)$')
try:
src_lines = f.readlines()
except UnicodeDecodeError:
log("Unexpected encoding in %s" % theme_file)
return
lines = []
for line in src_lines:
line = line.rstrip()
m = pattern.match(line)
if m and m.group(1).startswith(('/', '"/')):
log("Updating '%s' in %s" % (line,theme_file))
updated = True
partial_path = m.group(1).strip('"').lstrip('/')
line = 'desktop-image: "%s/%s"' % \
(install_dir_for_grub, partial_path)
lines.append(line)
if updated:
with open(theme_file, 'w') as f:
f.write('\n'.join(lines))
visitor_callbacks = [
# Ensure that isolinux.cfg file is copied as syslinux.cfg
# to boot correctly.
copy_to_syslinux_cfg_callback,
# Rewrite 'desktop-image: ...' line in a theme definition file
# so that a background image is displaymed during boot item selection.
# This tweak was first introduced for kali-linux-light-2018-1.
partial(fix_desktop_image_in_thema_callback, install_dir_for_grub),
]
# Now visit the tree.
for dirpath, dirnames, filenames in os.walk(install_dir):
for f in filenames:
for callback in visitor_callbacks:
callback(dirpath, f)
# Assertain if the entry is made..
sys_cfg_file = os.path.join(config.usb_mount, "multibootusb", "syslinux.cfg")
if gen.check_text_in_file(sys_cfg_file, iso_basename(config.image_path)):
log('Updated entry in syslinux.cfg...')
else:
log('Unable to update entry in syslinux.cfg...')
# Check if bootx64.efi is replaced by distro
efi_grub_img = os.path.join(config.usb_mount, 'EFI', 'BOOT', 'bootx64.efi')
if not os.path.exists(efi_grub_img):
gen.log('EFI image does not exist. Copying now...')
shutil.copy2(resource_path(os.path.join("data", "EFI", "BOOT", "bootx64.efi")),
os.path.join(config.usb_mount, 'EFI', 'BOOT'))
elif gen.grub_efi_exist(efi_grub_img) is False:
if distro == "Windows":
gen.log('EFI image overwritten by Windows install. Moving it now...')
dst = os.path.join(config.usb_mount, 'EFI', 'BOOT_WINDOWS')
os.makedirs(dst)
shutil.move(efi_grub_img, dst)
else:
gen.log('EFI image overwritten by distro install. Replacing it now...')
shutil.copy2(resource_path(os.path.join("data", "EFI", "BOOT", "bootx64.efi")),
os.path.join(config.usb_mount, 'EFI', 'BOOT'))
else:
gen.log('multibootusb EFI image already exist. Not copying...')
# Bug in the isolinux package
def commentout_gfxboot(input_text):
return re.sub(r'(ui\s+.*?gfxboot\.c32.*)$', r'# \1', input_text,
flags=re.I | re.MULTILINE)
def update_mbusb_cfg_file(iso_link, usb_uuid, usb_mount, distro):
"""
Update main multibootusb syslinux.cfg file after distro is installed.
:return:
"""
log('Updating multibootusb config file...')
name_from_iso = iso_basename(iso_link)
name_of_iso = iso_name(iso_link)
_isolinux_bin_exists = isolinux_bin_exist(config.image_path)
_isolinux_bin_dir = isolinux_bin_dir(iso_link)
sys_cfg_file = os.path.join(usb_mount, "multibootusb", "syslinux.cfg")
install_dir = os.path.join(usb_mount, "multibootusb", name_from_iso)
label = name_from_iso + ('' if _isolinux_bin_exists else ' via GRUB')
if os.path.exists(sys_cfg_file):
if distro == "hbcd":
if os.path.exists(os.path.join(usb_mount, "multibootusb", "menu.lst")):
_config_file = os.path.join(usb_mount, "multibootusb", "menu.lst")
config_file = open(_config_file, "w")
string = re.sub(r'/HBCD', '/multibootusb/' + name_from_iso + '/HBCD', _config_file)
config_file.write(string)
config_file.close()
with open(sys_cfg_file, "a") as f:
f.write("#start " + iso_basename(config.image_path) + "\n")
f.write("LABEL " + label + "\n")
f.write("MENU LABEL " + label + "\n")
f.write("BOOT " + '/multibootusb/' + name_from_iso + '/' + _isolinux_bin_dir.replace("\\", "/") + '/' + distro + '.bs' + "\n")
f.write("#end " + iso_basename(config.image_path) + "\n")
elif distro == "Windows":
if os.path.exists(sys_cfg_file):
config_file = open(sys_cfg_file, "a")
config_file.write("#start " + name_from_iso + "\n")
config_file.write("LABEL " + label + "\n")
config_file.write("MENU LABEL " + label + "\n")
config_file.write("KERNEL chain.c32 hd0 1 ntldr=/bootmgr" + "\n")
config_file.write("#end " + name_from_iso + "\n")
config_file.close()
elif distro == 'f4ubcd':
if os.path.exists(sys_cfg_file):
config_file = open(sys_cfg_file, "a")
config_file.write("#start " + name_from_iso + "\n")
config_file.write("LABEL " + label + "\n")
config_file.write("MENU LABEL " + label + "\n")
config_file.write("KERNEL grub.exe" + "\n")
config_file.write('APPEND --config-file=/multibootusb/' + iso_basename(config.image_path) + '/menu.lst' + "\n")
config_file.write("#end " + name_from_iso + "\n")
config_file.close()
elif distro == 'kaspersky':
if os.path.exists(sys_cfg_file):
config_file = open(sys_cfg_file, "a")
config_file.write("#start " + name_from_iso + "\n")
config_file.write("LABEL " + label + "\n")
config_file.write("MENU LABEL " + label + "\n")
config_file.write("CONFIG " + '/multibootusb/' + iso_basename(config.image_path) + '/kaspersky.cfg' + "\n")
config_file.write("#end " + name_from_iso + "\n")
config_file.close()
elif distro == 'grub4dos':
update_menu_lst()
elif distro == 'grub4dos_iso':
update_grub4dos_iso_menu()
else:
config_file = open(sys_cfg_file, "a")
config_file.write("#start " + name_from_iso + "\n")
config_file.write("LABEL " + label + "\n")
config_file.write("MENU LABEL " + label + "\n")
if distro == "salix-live":
if os.path.exists(
os.path.join(install_dir, 'boot', 'grub2-linux.img')):
config_file.write(
"LINUX " + '/multibootusb/' + name_from_iso +
'/boot/grub2-linux.img' + "\n")
else:
config_file.write("BOOT " + '/multibootusb/' + name_from_iso + '/' + _isolinux_bin_dir.replace("\\", "/") + '/' + distro + '.bs' + "\n")
elif distro == "pclinuxos":
config_file.write("kernel " + '/multibootusb/' + name_from_iso
+ '/isolinux/vmlinuz' + "\n")
config_file.write("append livecd=livecd root=/dev/rd/3 acpi=on vga=788 keyb=us vmalloc=256M nokmsboot "
"fromusb root=UUID=" + usb_uuid + " bootfromiso=/multibootusb/" +
name_from_iso + "/" + name_of_iso + " initrd=/multibootusb/"
+ name_from_iso + '/isolinux/initrd.gz' + "\n")
elif distro == "memtest":
config_file.write("kernel " + '/multibootusb/' + name_from_iso + '/BOOT/MEMTEST.IMG\n')
elif distro == "sgrubd2" or config.distro == 'raw_iso':
config_file.write("LINUX memdisk\n")
config_file.write("INITRD " + "/multibootusb/" + name_from_iso + '/' + name_of_iso + '\n')
config_file.write("APPEND iso\n")
elif distro == 'ReactOS':
config_file.write("COM32 mboot.c32" + '\n')
config_file.write("APPEND /loader/setupldr.sys" + '\n')
elif distro == 'pc-unlocker':
config_file.write("kernel ../ldntldr" + '\n')
config_file.write("append initrd=../ntldr" + '\n')
elif distro == 'pc-tool':
config_file.write(menus.pc_tool_config(syslinux=True, grub=False))
elif distro == 'grub2only':
config_file.write(menus.grub2only())
elif distro == 'memdisk_iso':
config_file.write(menus.memdisk_iso_cfg(syslinux=True, grub=False))
elif distro == 'memdisk_img':
config_file.write(menus.memdisk_img_cfg(syslinux=True, grub=False))
else:
if _isolinux_bin_exists is True:
if distro == "generic":
distro_syslinux_install_dir = _isolinux_bin_dir
if _isolinux_bin_dir != "/":
distro_sys_install_bs = os.path.join(usb_mount, _isolinux_bin_dir) + '/' + distro + '.bs'
else:
distro_sys_install_bs = '/' + distro + '.bs'
else:
distro_syslinux_install_dir = install_dir
distro_syslinux_install_dir = distro_syslinux_install_dir.replace(usb_mount, '')
distro_sys_install_bs = distro_syslinux_install_dir + '/' + _isolinux_bin_dir + '/' + distro + '.bs'
distro_sys_install_bs = "/" + distro_sys_install_bs.replace("\\", "/") # Windows path issue.
if config.syslinux_version == '3':
config_file.write("CONFIG /multibootusb/" + name_from_iso + '/' + _isolinux_bin_dir.replace("\\", "/") + '/isolinux.cfg\n')
config_file.write("APPEND /multibootusb/" + name_from_iso + '/' + _isolinux_bin_dir.replace("\\", "/") + '\n')
config_file.write("# Delete or comment above two lines using # and remove # from below line if "
"you get not a COM module error.\n")
config_file.write("#BOOT " + distro_sys_install_bs.replace("//", "/") + "\n")
else:
config_file.write("BOOT " + distro_sys_install_bs.replace("//", "/") + "\n")
else:
# isolinux_bin does not exist.
config_file.write('Linux /multibootusb/grub/lnxboot.img\n')
config_file.write('INITRD /multibootusb/grub/core.img\n')
config_file.write('TEXT HELP\n')
config_file.write('Booting via syslinux is not supported. '
'Please boot via GRUB\n')
config_file.write('ENDTEXT\n')
config_file.write("#end " + name_from_iso + "\n")
config_file.close()
# Update extlinux.cfg file by copying updated syslinux.cfg
shutil.copy(os.path.join(usb_mount, 'multibootusb', 'syslinux.cfg'),
os.path.join(usb_mount, 'multibootusb', 'extlinux.cfg'))
def kaspersky_config(distro):
if distro == 'kaspersky':
return """
menu label Kaspersky Rescue Disk
kernel $INSTALL_DIR/boot/rescue
append root=live:UUID= live_dir=$INSTALL_DIR/rescue/LiveOS/ subdir=$INSTALL_DIR/rescue/LiveOS/ looptype=squashfs rootfstype=auto vga=791 init=/linuxrc loop=$INSTALL_DIR/rescue/LiveOS/squashfs.img initrd=$INSTALL_DIR/boot/rescue.igz lang=en udev liveimg splash quiet doscsi nomodeset
label text
menu label Kaspersky Rescue Disk - Text Mode
kernel $INSTALL_DIR/boot/rescue
append root=live:UUID= live_dir=$INSTALL_DIR/rescue/LiveOS/ subdir=$INSTALL_DIR/rescue/LiveOS/ rootfstype=auto vga=791 init=/linuxrc loop=/multiboot/rescue/LiveOS/squashfs.img initrd=$INSTALL_DIR/boot/rescue.igz SLUG_lang=en udev liveimg quiet nox shell noresume doscsi nomodeset
label hwinfo
menu label Kaspersky Hardware Info
kernel $INSTALL_DIR/boot/rescue
append root=live:UUID= live_dir=$INSTALL_DIR/rescue/LiveOS/ subdir=$INSTALL_DIR/rescue/LiveOS/ rootfstype=auto vga=791 init=/linuxrc loop=$INSTALL_DIR/rescue/LiveOS/squashfs.img initrd=$INSTALL_DIR/boot/rescue.igz SLUG_lang=en udev liveimg quiet softlevel=boot nox hwinfo noresume doscsi nomodeset """
def update_menu_lst():
sys_cfg_file = os.path.join(config.usb_mount, "multibootusb", "syslinux.cfg")
# install_dir = os.path.join(config.usb_mount, "multibootusb", iso_basename(config.image_path))
menu_lst = iso_menu_lst_path(config.image_path).replace("\\", "/")
with open(sys_cfg_file, "a") as f:
f.write("#start " + iso_basename(config.image_path) + "\n")
f.write("LABEL " + iso_basename(config.image_path) + "\n")
f.write("MENU LABEL " + iso_basename(config.image_path) + "\n")
f.write("KERNEL grub.exe" + "\n")
f.write('APPEND --config-file=/' + menu_lst + "\n")
f.write("#end " + iso_basename(config.image_path) + "\n")
def update_grub4dos_iso_menu():
sys_cfg_file = os.path.join(config.usb_mount, "multibootusb",
"syslinux.cfg")
install_dir = os.path.join(config.usb_mount, "multibootusb",
iso_basename(config.image_path))
menu_lst_file = os.path.join(install_dir, 'menu.lst')
with open(menu_lst_file, "w") as f:
f.write("title Boot " + iso_name(config.image_path) + "\n")
f.write("find --set-root --ignore-floppies --ignore-cd /multibootusb/" + iso_basename(config.image_path) + '/'
+ iso_name(config.image_path) + "\n")
f.write("map --heads=0 --sectors-per-track=0 /multibootusb/" + iso_basename(config.image_path)
+ '/' + iso_name(config.image_path) + ' (hd32)' + "\n")
f.write("map --hook" + "\n")
f.write("chainloader (hd32)")
with open(sys_cfg_file, "a") as f:
f.write("#start " + iso_basename(config.image_path) + "\n")
f.write("LABEL " + iso_basename(config.image_path) + "\n")
f.write("MENU LABEL " + iso_basename(config.image_path) + "\n")
f.write("KERNEL grub.exe" + "\n")
f.write('APPEND --config-file=/multibootusb/' + iso_basename(config.image_path) + '/menu.lst' + "\n")
f.write("#end " + iso_basename(config.image_path) + "\n")
class ConfigTweakerParam:
# 'iso_link' is also known as 'image_path'
def __init__(self, iso_link, distro_path, persistence_size,
usb_uuid, usb_mount, usb_disk, usb_fs_type):
self.iso_fname = os.path.split(iso_link)[1]
self.distro_name = os.path.splitext(self.iso_fname)[0]
assert distro_path[0] == '/'
self.distro_path = distro_path # drive relative
self.persistence_size = persistence_size
self.usb_uuid = usb_uuid
self.usb_mount = usb_mount
self.usb_disk = usb_disk
self.usb_fs_type = usb_fs_type
class ConfigTweaker:
BOOT_PARAMS_STARTER = 'kernel|append|linux'
def __init__(self, distro_type, setup_params):
self.disto_type = distro_type
self.setup_params = setup_params
def tweak_first_match(self, content, kernel_param_line_pattern,
apply_persistence_to_all_lines,
param_operations,
param_operations_for_persistence):
"""Perofrm specified parameter modification to the first maching
line and return the concatination of the string leading up to the
match and the tweaked paramer line. If no match is found,
unmodified 'content' is returned.
"""
m = kernel_param_line_pattern.search(content)
if m is None:
return content
start, end = m.span()
upto_match, rest_of_content = content[:start], content[end:]
starter_part, starter_token, params_part = [
m.group(i) for i in [1,2,3]]
params = params_part.split(' ')
if apply_persistence_to_all_lines or \
self.has_persistency_param(params):
ops_to_apply = param_operations + \
param_operations_for_persistence
else:
ops_to_apply = param_operations
for op_or_op_list, precondition in ops_to_apply:
if not precondition(starter_token, params):
continue
try:
iter(op_or_op_list)
op_list = op_or_op_list
except TypeError:
op_list = [op_or_op_list]
for op in op_list:
params = op(params)
# I see something special about this param. Place it at the end.
three_dashes = '---'
if three_dashes in params:
params.remove(three_dashes)
params.append(three_dashes)
return upto_match + starter_part + ' '.join(params) + \
self.tweak_first_match(
rest_of_content, kernel_param_line_pattern,
apply_persistence_to_all_lines,
param_operations, param_operations_for_persistence)
def legacy_tweak(self, content):
return None
def tweak(self, content):
tweaked = self.legacy_tweak(content)
if tweaked:
return tweaked
apply_persistence_to_all_lines = \
0 < self.setup_params.persistence_size and \
not self.config_is_persistence_aware(content)
matching_re = r'^(\s*(%s)\s*)(.*)$' % self.BOOT_PARAMS_STARTER
kernel_parameter_line_pattern = re.compile(
matching_re,
flags = re.I | re.MULTILINE)
out = self.tweak_first_match(
content,
kernel_parameter_line_pattern,
apply_persistence_to_all_lines,
self.param_operations(),
self.param_operations_for_persistence())
return self.post_process(out)
def on_liveboot_params(self, starter, params):
return self.LIVE_BOOT_DETECT_PARAM in params
def post_process(self, entire_string):
return entire_string
def add_op_if_file_exists(self, op_list, op_creator_func, key,
candidate_relative_path_list, predicate):
for candidate in candidate_relative_path_list:
relpath = os.path.join(self.setup_params.distro_path[1:],
candidate)
if os.path.exists(os.path.join(
self.setup_params.usb_mount, relpath)):
normalized_relpath = '/' + relpath.replace('\\','/')
op_list.append((op_creator_func(key, normalized_relpath),
predicate))
break
def fullpath(self, subpath):
p = self.setup_params
return os.path.join(p.usb_mount, p.distro_path[1:],
subpath).replace('/', os.sep)
def file_is_installed(self, subpath):
return os.path.exists(self.fullpath(subpath))
def file_content(self, subpath):
fp = self.fullpath(subpath)
if not os.path.exists(fp):
return None
with open(fp, errors='ignore') as f:
return f.read()
def extract_distroinfo_from_file(self, subpath, regex, distro_group,
version_group):
content = self.file_content(subpath)
if not content:
return None
m = re.compile(regex, re.I).search(content)
if not m:
return None
return (m.group(distro_group),
[int(x) for x in m.group(version_group).split('.')])
def extract_distroinfo_from_fname(self, which_dir, regex, distro_group,
version_group):
p = re.compile(regex, re.I)
for fname in os.listdir(self.fullpath(which_dir)):
m = p.match(fname)
if m:
return (m.group(distro_group),
[int(x) for x in m.group(version_group).split('.')])
return None
class PersistenceConfigTweaker(ConfigTweaker):
def __init__(self, pac_re, *args, **kw):
self.persistence_awareness_checking_re = pac_re
super(PersistenceConfigTweaker, self).__init__(*args, **kw)
def config_is_persistence_aware(self, content):
""" Used to restrict update of boot parameters to persistent-aware
menu entries if the distribution provides any.
"""
return self.persistence_awareness_checking_re.search(content) \
is not None
class ConfigTweakerWithDebianStylePersistenceParam(PersistenceConfigTweaker):
def __init__(self, *args, **kw):
persistence_awareness_checking_re = re.compile(
r'^\s*(%s).*?\s%s(\s.*|)$' % \
(self.BOOT_PARAMS_STARTER, self.PERSISTENCY_TOKEN),
flags=re.I|re.MULTILINE)
super(ConfigTweakerWithDebianStylePersistenceParam,
self).__init__(persistence_awareness_checking_re, *args, **kw)
def has_persistency_param(self, params):
return self.PERSISTENCY_TOKEN in params
def param_operations_for_persistence(self):
return [
([add_tokens(self.PERSISTENCY_TOKEN),
add_or_replace_kv('%s-path=' % self.PERSISTENCY_TOKEN,
self.setup_params.distro_path)],
self.on_liveboot_params)]
class UbuntuConfigTweaker(ConfigTweakerWithDebianStylePersistenceParam):
LIVE_BOOT_DETECT_PARAM = 'boot=casper'
PERSISTENCY_TOKEN = 'persistent'
def param_operations(self):
return [
([add_tokens('ignore_bootid'),
add_or_replace_kv('live-media-path=',
'%s/casper' % self.setup_params.distro_path),
add_or_replace_kv('cdrom-detect/try-usb=', 'true'),
# Recently, correct param seems to be 'floppy=0,allowed_driver_mask
add_or_replace_kv('floppy.allowed_drive_mask=', '0'),
add_tokens('ignore_uuid'),
add_or_replace_kv('root=UUID=', self.setup_params.usb_uuid),
],
self.on_liveboot_params),
(replace_kv('live-media=',
'/dev/disk/by-uuid/%s' % self.setup_params.usb_uuid),
always),
]
def post_process(self, entire_string):
return entire_string.replace(r'ui gfxboot', '#ui gfxboot')
class DebianConfigTweaker(ConfigTweakerWithDebianStylePersistenceParam):
LIVE_BOOT_DETECT_PARAM = 'boot=live'
PERSISTENCY_TOKEN = 'persistence'
def param_operations(self):
return [
(add_tokens('ignore_bootid'), self.on_liveboot_params),
(add_or_replace_kv('live-media-path=',
'%s/live' % self.setup_params.distro_path),
self.on_liveboot_params),
]
class NoPersistenceTweaker(ConfigTweaker):
def config_is_persistence_aware(self, content):
return False
def param_operations_for_persistence(self):
return []
def has_persistency_param(self, params):
return False
class GentooConfigTweaker(NoPersistenceTweaker):
def param_operations(self):
uuid_spec = 'UUID=%s' % self.setup_params.usb_uuid
ops = [
([add_or_replace_kv('real_root=', uuid_spec),
add_tokens('slowusb'),
add_or_replace_kv('subdir=', self.setup_params.distro_path),
remove_keys('cdroot_hash='),
# Without this, pentoo-amd64-hardened-2018.0_RC5.8_pre20180305/
# stucks at "copying read-write image contents to tmpfs"
add_tokens('overlayfs'),
# Said distro fails to mount root device if this param is given
remove_tokens('aufs'),
],
starter_is_either('append', 'linux')),
]
fs_type = self.setup_params.usb_fs_type
if fs_type == 'vfat':
ops.append( (add_or_replace_kv('cdroot_type=', fs_type),
always) )
self.add_op_if_file_exists(
ops, add_or_replace_kv,
'loop=', ['liberte/boot/root-x86.sfs', 'image.squashfs'],
starter_is_either('append', 'linux'))
return ops
class FedoraConfigTweaker(PersistenceConfigTweaker):
def __init__(self, *args, **kw):
persistence_awareness_checking_re = re.compile(
r'^\s*(%s).*?\s(rd.live.overlay|overlay)=.+?' %
self.BOOT_PARAMS_STARTER, flags=re.I|re.MULTILINE)
super(FedoraConfigTweaker, self).__init__(
persistence_awareness_checking_re, *args, **kw)
def has_persistency_param(self, params):
return any(p.startswith(('overlay=', 'rd.live.overlay='))
for p in params)
def param_operations(self):
uuid_spec = 'UUID=%s' % self.setup_params.usb_uuid
escaped_distro_path = self.setup_params.distro_path \
.replace(' ', '\\0x20')
live_path = escaped_distro_path + '/LiveOS'
ops = [(replace_kv('inst.stage2=', 'hd:%s:%s' %
(uuid_spec, escaped_distro_path)), always),
(add_or_replace_kv('inst.repo=',
'http://mirror.centos.org'
'/centos/7/os/x86_64/'),
contains_key('inst.stage2=')),
(replace_kv('root=', 'live:' + uuid_spec), always),
(add_or_replace_kv('rd.live.dir=', live_path),
contains_any_token('rd.live.image', 'Solus')),
(add_or_replace_kv('live_dir=', live_path),
contains_token('liveimage')), ]
if self.file_is_installed('.treeinfo'):
# Add or replace value of 'inst.repo=' with reference
# to the copied iso.
ops.append(
(add_or_replace_kv(
'inst.repo=',
'hd:UUID=%s:%s' % (
self.setup_params.usb_uuid,
self.setup_params.distro_path + '/' +
self.setup_params.iso_fname)),
starter_is_either('append', 'linux')))
return ops
def param_operations_for_persistence(self):
uuid_spec = 'UUID=%s' % self.setup_params.usb_uuid
return [
(remove_tokens('ro'), always),
(add_or_replace_kv('overlay=', uuid_spec),
contains_token('liveimage')),
([add_tokens('rw'),
add_or_replace_kv('rd.live.overlay=', uuid_spec)],
contains_token('rd.live.image'))
]
class AntixConfigTweaker(NoPersistenceTweaker):
def param_operations(self):
dinfo = self.extract_distroinfo_from_file(
'version', r'(antiX|MX)-(\d+\.\d+)', 1, 2)
if not dinfo:
dinfo = self.extract_distroinfo_from_file(
'boot/isolinux/isolinux.cfg', r'(antiX|MX)-(\d+\.\d+)', 1, 2)
if not dinfo:
dinfo = self.extract_distroinfo_from_fname(
'', r'(MX)-(\d+\.\d+).*', 1, 2)
if dinfo and 17<=dinfo[1][0]:
ops = [
add_or_replace_kv('buuid=', self.setup_params.usb_uuid),
add_or_replace_kv('bdir=',
self.setup_params.distro_path + '/antiX')]
else:
ops = add_or_replace_kv('image_dir=',
self.setup_params.distro_path)
return [(ops, starter_is_either('append', 'APPEND', 'linux'))]
def post_process(self, s):
s = re.sub(r'^(\s*UI\s+(.*?gfxboot(\.c32|)))\s+(.*?)\s+(.*)$',
r'# \1 \4.renamed-to-avoid-lockup \5', s,
flags=re.I + re.MULTILINE)
return s
class SalixConfigTweaker(NoPersistenceTweaker):
def legacy_tweak(self, content):
if content.find('iso_path') < 0:
return None
p = self.setup_params
for replacee, replacer in [
('iso_path', "%s/%s" % (p.distro_path, p.iso_fname)),
('initrd=', 'fromiso=%s/%s initrd=' % (
p.distro_path, p.iso_fname)),
]:
content = content.replace(replacee, replacer)
return content
# salixlive-xfce-14.2.1 assumes that the installation media is
# labeled "LIVE" and the file tree is exploded at the root.
# (See /init for details.) Supporing it in harmony with installation
# of other distros is very hard to impossible. Do nothing here.
def param_operations(self):
return []
class WifislaxConfigTweaker(NoPersistenceTweaker):
def param_operations(self):
ops = [
(add_or_replace_kv('livemedia=','%s:%s/%s' % (
self.setup_params.usb_uuid, self.setup_params.distro_path,
self.setup_params.iso_fname)),
starter_is_either('append', 'linux'))]
return ops
def post_process(self, entire_string):
return entire_string.replace('($root)', self.setup_params.distro_path)
def test_tweak_objects():
def os_path_exists(f):
if f.endswith('liberte/boot/root-x86.sfs'):
return False
if f.endswith('image.squashfs'):
return True
return False
saved = os.path.exists
os.path.exists = os_path_exists
try:
_test_tweak_objects()
finally:
os.path.exists = saved
def _test_tweak_objects():
usb_mount = 'L:'
usb_disk = 'L:'
setup_params_no_persistence = ConfigTweakerParam(
'{iso-name}', '/multibootusb/{iso-name}', 0,
'{usb-uuid}', usb_mount, usb_disk)
debian_tweaker = DebianConfigTweaker('debian', setup_params_no_persistence)
ubuntu_tweaker = UbuntuConfigTweaker('ubuntu', setup_params_no_persistence)
centos_tweaker = FedoraConfigTweaker('centos', setup_params_no_persistence)
salix_tweaker = SalixConfigTweaker('centos', setup_params_no_persistence)
# Test awareness on 'persistent'
content = """
append boot=live foo baz=1 double-spaced ignore_bootid persistent more stuff""".lstrip()
print ("Testing awareness on 'persistent' of ubuntu tweaker.")
assert ubuntu_tweaker.config_is_persistence_aware(content)
print ("Testing awareness on 'persistent' of debian tweaker.")
assert not debian_tweaker.config_is_persistence_aware(content)
content = """
append boot=live foo baz=1 double-spaced ignore_bootid persistence more stuff""".lstrip()
print ("Testing awareness on 'persistence' of ubuntu tweaker.")
assert not ubuntu_tweaker.config_is_persistence_aware(content)
print ("Testing awareness on 'persistence' of debian tweaker.")
assert debian_tweaker.config_is_persistence_aware(content)
print ("Testing awareness on 'overlay=' of centos tweaker.")
content = """
append boot=live foo baz=1 overlay=UUID:2234-1224 double-spaced ignore_bootid persistence more stuff""".lstrip()
assert centos_tweaker.config_is_persistence_aware(content)
print ("Testing awareness on 'rd.live.overlay=' of centos tweaker.")
content = """
append boot=live foo baz=1 rd.live.overlay=UUID:2234-1224 double-spaced ignore_bootid persistence more stuff""".lstrip()
assert centos_tweaker.config_is_persistence_aware(content)
print ("Testing indefference on persistence keys of centos tweaker.")
content = """
append boot=live foo baz=1 double-spaced ignore_bootid persistence more stuff""".lstrip()
assert not centos_tweaker.config_is_persistence_aware(content)
print ("Testing awareness on 'overlay=' of centos tweaker.")
content = """
append boot=live foo baz=1 double-spaced ignore_bootid persistence more stuff""".lstrip()
assert not centos_tweaker.config_is_persistence_aware(content)
print ("Testing if 'persistence' token is left at the original place.")
content = "\tlinux\tfoo persistence boot=live in the middle"
assert debian_tweaker.tweak(content) == "\tlinux\tfoo persistence boot=live in the middle ignore_bootid live-media-path=/multibootusb/{iso-name}/live persistence-path=/multibootusb/{iso-name}"""
print ("Testing if 'boot=live' at the very end is recognized.")
content = "menu\n\tlinux\tfoo persistence in the middle boot=live"
assert debian_tweaker.tweak(content) == "menu\n\tlinux\tfoo persistence in the middle boot=live ignore_bootid live-media-path=/multibootusb/{iso-name}/live persistence-path=/multibootusb/{iso-name}"""
print ("Testing if 'boot=live' at a line end is recognized.")
content = """append zoo
\tappend\tfoo persistence in the middle boot=live
append foo"""
assert debian_tweaker.tweak(content) == """append zoo
\tappend\tfoo persistence in the middle boot=live ignore_bootid live-media-path=/multibootusb/{iso-name}/live persistence-path=/multibootusb/{iso-name}
append foo"""
print ("Testing if replacement of 'live-media=' happens on non-boot lines.")
content = "\t\tlinux live-media=/tobe/replaced"
assert ubuntu_tweaker.tweak(content)==\
"\t\tlinux live-media=/dev/disk/by-uuid/{usb-uuid}"
print ("Testing if \\tappend is recognized as a starter.")
content = """\tappend foo boot=live ignore_bootid persistence in the middle live-media-path=/foo/bar"""
assert debian_tweaker.tweak(content) == """\tappend foo boot=live ignore_bootid persistence in the middle live-media-path=/multibootusb/{iso-name}/live persistence-path=/multibootusb/{iso-name}"""
print ("Testing if debian tweaker does not get tickled by 'persistent'.")
content = """\tappend boot=live foo ignore_bootid persistent in the middle live-media-path=/foo/bar"""
assert debian_tweaker.tweak(content) == """\tappend boot=live foo ignore_bootid persistent in the middle live-media-path=/multibootusb/{iso-name}/live"""
print ("Testing replacement of 'live-media-path' value.")
content = " append boot=live foo live-media-path=/foo/bar more"
assert debian_tweaker.tweak(content) == """ append boot=live foo live-media-path=/multibootusb/{iso-name}/live more ignore_bootid"""
print ("Testing rewriting of 'file=' param by debian_tweaker.")
content = " kernel file=/cdrom/preseed/ubuntu.seed boot=live"
setup_params_persistent = ConfigTweakerParam(
'debian', '/multibootusb/{iso-name}', 128*1024*1024, '{usb-uuid}',
usb_mount, usb_disk)
debian_persistence_tweaker = DebianConfigTweaker(
'debian', setup_params_persistent)
ubuntu_persistence_tweaker = UbuntuConfigTweaker(
'ubuntu', setup_params_persistent)
centos_persistence_tweaker = FedoraConfigTweaker(
'centos', setup_params_persistent)
print ("Testing if debian tweaker appends persistence parameters.")
content = """label foo
kernel foo bar
append boot=live foo live-media-path=/foo/bar more
"""
assert debian_persistence_tweaker.tweak(content) == """label foo
kernel foo bar
append boot=live foo live-media-path=/multibootusb/{iso-name}/live more ignore_bootid persistence persistence-path=/multibootusb/{iso-name}
"""
print ("Testing if ubuntu tweaker selectively appends persistence params.")
content = """label foo
kernel foo bar
append boot=casper foo live-media-path=/foo/bar more
"""
assert ubuntu_persistence_tweaker.tweak(content) == """label foo
kernel foo bar
append boot=casper foo live-media-path=/multibootusb/{iso-name}/casper more ignore_bootid cdrom-detect/try-usb=true floppy.allowed_drive_mask=0 ignore_uuid root=UUID={usb-uuid} persistent persistent-path=/multibootusb/{iso-name}
"""
# Test rewrite of persistence-aware configuration.
# Only 'live-persistence' line should receive 'persistence-path'
# parameter.
print ("Testing if debian tweaker appends persistence params "
"to relevant lines only.")
content = """label live-forensic
menu label Live (^forensic mode)
linux /live/vmlinuz
initrd /live/initrd.img
append boot=live noconfig=sudo username=root hostname=kali noswap noautomount
label live-persistence
menu label ^Live USB Persistence (check kali.org/prst)
linux /live/vmlinuz
initrd /live/initrd.img
append boot=live noconfig=sudo username=root hostname=kali persistence
"""
assert debian_persistence_tweaker.tweak(content)=="""label live-forensic
menu label Live (^forensic mode)
linux /live/vmlinuz
initrd /live/initrd.img
append boot=live noconfig=sudo username=root hostname=kali noswap noautomount ignore_bootid live-media-path=/multibootusb/{iso-name}/live
label live-persistence
menu label ^Live USB Persistence (check kali.org/prst)
linux /live/vmlinuz
initrd /live/initrd.img
append boot=live noconfig=sudo username=root hostname=kali persistence ignore_bootid live-media-path=/multibootusb/{iso-name}/live persistence-path=/multibootusb/{iso-name}
"""
setup_params = ConfigTweakerParam(
'{iso-name}', '/multibootusb/{iso-name}',
0, '{usb-uuid}', usb_mount, usb_disk)
gentoo_tweaker = GentooConfigTweaker('gentoo', setup_params)
print ("Testing Gentoo-tweaker on syslinux config.")
content = """label pentoo
menu label Pentoo Defaults (verify)
kernel /isolinux/pentoo
append initrd=/isolinux/pentoo.igz root=/dev/ram0 init=/linuxrc nox nodhcp overlayfs max_loop=256 dokeymap looptype=squashfs loop=/image.squashfs cdroot video=uvesafb:mtrr:3,ywrap,1024x768-16 usbcore.autosuspend=1 console=tty0 net.ifnames=0 scsi_mod.use_blk_mq=1 ipv6.autoconf=0 verify
"""
assert gentoo_tweaker.tweak(content)=="""label pentoo
menu label Pentoo Defaults (verify)
kernel /isolinux/pentoo
append initrd=/isolinux/pentoo.igz root=/dev/ram0 init=/linuxrc nox nodhcp overlayfs max_loop=256 dokeymap looptype=squashfs loop=/multibootusb/{iso-name}/image.squashfs cdroot video=uvesafb:mtrr:3,ywrap,1024x768-16 usbcore.autosuspend=1 console=tty0 net.ifnames=0 scsi_mod.use_blk_mq=1 ipv6.autoconf=0 verify real_root=%s slowusb subdir=/multibootusb/{iso-name}
""" % usb_disk
print ("Testing Gentoo-tweaker on grub config.")
content = """insmod all_video
menuentry 'Boot LiveCD (kernel: pentoo)' --class gnu-linux --class os {
linux /isolinux/pentoo root=/dev/ram0 init=/linuxrc nox aufs max_loop=256 dokeymap looptype=squashfs loop=/image.squashfs cdroot cdroot_hash=xxx
initrd /isolinux/pentoo.igz
}
"""
assert gentoo_tweaker.tweak(content)=="""insmod all_video
menuentry 'Boot LiveCD (kernel: pentoo)' --class gnu-linux --class os {
linux /isolinux/pentoo root=/dev/ram0 init=/linuxrc nox max_loop=256 dokeymap looptype=squashfs loop=/multibootusb/{iso-name}/image.squashfs cdroot real_root=%s slowusb subdir=/multibootusb/{iso-name} overlayfs
initrd /isolinux/pentoo.igz
}
""" % usb_disk
print ("Testing centos tweaker on DVD-installer")
saved = os.path.exists
os.path.exists = lambda f: f.endswith(('/.treeinfo','\\.treeinfo')) \
or saved(f)
try:
content = r"""label linux
menu label ^Install CentOS 7
kernel vmlinuz
append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 quiet
"""
assert centos_tweaker.tweak(content)=="""label linux
menu label ^Install CentOS 7
kernel vmlinuz
append initrd=initrd.img inst.stage2=hd:UUID={usb-uuid}:/multibootusb/{iso-name} quiet inst.repo=hd:UUID={usb-uuid}:/multibootusb/{iso-name}/{iso-name}.iso
"""
finally:
os.path.exists = saved
print ("Testing centos tweaker on Net-installer")
assert centos_tweaker.tweak(content)=="""label linux
menu label ^Install CentOS 7
kernel vmlinuz
append initrd=initrd.img inst.stage2=hd:UUID={usb-uuid}:/multibootusb/{iso-name} quiet inst.repo=http://mirror.centos.org/centos/7/os/x86_64/
"""
content = r"""label linux0
menu label ^Start CentOS
kernel vmlinuz0
append initrd=initrd0.img root=live:CDLABEL=CentOS-7-x86_64-LiveGNOME-1708 rootfstype=auto ro rd.live.image quiet rhgb rd.luks=0 rd.md=0 rd.dm=0
menu default
"""
print ("Testing centos tweaker on Live")
assert centos_tweaker.tweak(content)=="""label linux0
menu label ^Start CentOS
kernel vmlinuz0
append initrd=initrd0.img root=live:UUID={usb-uuid} rootfstype=auto ro rd.live.image quiet rhgb rd.luks=0 rd.md=0 rd.dm=0 rd.live.dir=/multibootusb/{iso-name}/LiveOS
menu default
"""
print ("Testing persistent centos tweaker on non-persistence config.")
content = r"""label linux0
menu label ^Start CentOS
kernel vmlinuz0
append initrd=initrd0.img root=live:CDLABEL=CentOS-7-x86_64-LiveGNOME-1708 rootfstype=auto ro rd.live.image quiet rhgb rd.luks=0 rd.md=0 rd.dm=0
menu default
"""
assert centos_persistence_tweaker.tweak(content)=="""label linux0
menu label ^Start CentOS
kernel vmlinuz0
append initrd=initrd0.img root=live:UUID={usb-uuid} rootfstype=auto rd.live.image quiet rhgb rd.luks=0 rd.md=0 rd.dm=0 rd.live.dir=/multibootusb/{iso-name}/LiveOS rw rd.live.overlay=UUID={usb-uuid}
menu default
"""
print ("Testing persistent centos tweaker not touching "
"non-persistent line")
content = r"""label linux0
menu label ^Start CentOS
append kenel=vmlinuz0
append rd.live.overlay=UUID:2234-2223 ro rd.live.image
append initrd=initrd0.img root=live:CDLABEL=CentOS-7-x86_64-LiveGNOME-1708 rootfstype=auto ro rd.live.image quiet rhgb rd.luks=0 rd.md=0 rd.dm=0
menu default
"""
assert centos_persistence_tweaker.tweak(content)=="""label linux0
menu label ^Start CentOS
append kenel=vmlinuz0
append rd.live.overlay=UUID={usb-uuid} rd.live.image rd.live.dir=/multibootusb/{iso-name}/LiveOS rw
append initrd=initrd0.img root=live:UUID={usb-uuid} rootfstype=auto ro rd.live.image quiet rhgb rd.luks=0 rd.md=0 rd.dm=0 rd.live.dir=/multibootusb/{iso-name}/LiveOS
menu default
"""
print ("Testing salix tweaker on legacy tweaking")
content = """menu Old Salix
append initrd=/boot/initrd.img foobar=iso_path tail-param
"""
assert salix_tweaker.tweak(content)=="""menu Old Salix
append fromiso=/multibootusb/{iso-name}/{iso-name}.iso initrd=/boot/initrd.img foobar=/multibootusb/{iso-name}/{iso-name}.iso tail-param
"""
print ("Testing salix tweaker on new tweaking")
content = """menu New Salix
append initrd=/boot/initrd.img tail-param
"""
assert salix_tweaker.tweak(content)=="""menu New Salix
append initrd=/boot/initrd.img tail-param livemedia={usb-uuid}:/multibootusb/{iso-name}/{iso-name}.iso
"""
def do_test_abspath_rewrite():
content = """menuentry "Install Ubuntu" {
linux /casper/vmlinuz.efi file=/cdrom/preseed/ubuntu.seed boot=casper only-ubiquity init=/linuxrc iso-scan/filename=${iso_path} quiet splash grub=/grub/grub.cfg ---
initrd /casper/initrd.lz
}"""
assert fix_abspath(
content, 'g:/multibootusb/ubuntu-14.04.5-desktop-amd64',
'ubuntu-14.04.5-desktop-amd64', 'test_abspath_rewrite')==\
"""menuentry "Install Ubuntu" {
linux /multibootusb/ubuntu-14.04.5-desktop-amd64/casper/vmlinuz file=/cdrom/preseed/ubuntu.seed boot=casper only-ubiquity init=/linuxrc iso-scan/filename=${iso_path} quiet splash grub=/multibootusb/ubuntu-14.04.5-desktop-amd64/boot/grub/grub.cfg ---
initrd /multibootusb/ubuntu-14.04.5-desktop-amd64/casper/initrd.lz
}"""
def test_abspath_rewrite():
def os_path_exists(path):
path = path.replace('\\', '/')
if path.endswith('.efi'):
return False
if path.startswith('g:/multibootusb/ubuntu-14.04.5-desktop-amd64'
'/boot'):
return True
if path.endswith('/boot/grub/grub.cfg'):
return True
if path == '/grub/grub.cfg':
return False
if path.endswith('casper/vmlinuz'):
return True
if path.endswith('/casper/initrd.lz'):
return True
return False
saved = os.path.exists
try:
os.path.exists = os_path_exists
do_test_abspath_rewrite()
finally:
os.path.exists = saved
|
mbusb/multibootusb
|
scripts/update_cfg_file.py
|
Python
|
gpl-2.0
| 64,603
|
[
"VisIt"
] |
e62f2b5098c61d1ccd328f59d9b969c7327998d230ee39abcfce770f158f760e
|
import os
import unittest
import vtk, qt, ctk, slicer
import math
import sys
#
# AstroPVDiagramSelfTest
#
class AstroPVDiagramSelfTest:
def __init__(self, parent):
parent.title = "Astro PVDiagram SelfTest"
parent.categories = ["Testing.TestCases"]
parent.dependencies = ["AstroVolume"]
parent.contributors = ["""
Davide Punzo (Kapteyn Astronomical Institute) and
Thijs van der Hulst (Kapteyn Astronomical Institute)."""]
parent.helpText = """
This module was developed as a self test to generate PVDiagram.
"""
parent.acknowledgementText = """
""" # replace with organization, grant and thanks.
self.parent = parent
# Add this test to the SelfTest module's list for discovery when the module
# is created. Since this module may be discovered before SelfTests itself,
# create the list if it doesn't already exist.
try:
slicer.selfTests
except AttributeError:
slicer.selfTests = {}
slicer.selfTests['Astro PVDiagram SelfTest'] = self.runTest
def runTest(self):
tester = AstroPVDiagramSelfTestTest()
tester.runTest()
#
# qAstroPVDiagramSelfTestWidget
#
class AstroPVDiagramSelfTestWidget:
def __init__(self, parent = None):
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parent
self.layout = self.parent.layout()
if not parent:
self.setup()
self.parent.show()
def setup(self):
# Instantiate and connect widgets ...
# reload button
# (use this during development, but remove it when delivering
# your module to users)
self.reloadButton = qt.QPushButton("Reload")
self.reloadButton.toolTip = "Reload this module."
self.reloadButton.name = "AstroPVDiagramSelfTest Reload"
self.layout.addWidget(self.reloadButton)
self.reloadButton.connect('clicked()', self.onReload)
# reload and test button
# (use this during development, but remove it when delivering
# your module to users)
self.reloadAndTestButton = qt.QPushButton("Reload and Test")
self.reloadAndTestButton.toolTip = "Reload this module and then run the self tests."
self.layout.addWidget(self.reloadAndTestButton)
self.reloadAndTestButton.connect('clicked()', self.onReloadAndTest)
# Add vertical spacer
self.layout.addStretch(1)
def cleanup(self):
pass
def onReload(self,moduleName="AstroPVDiagramSelfTest"):
"""Generic reload method for any scripted module.
ModuleWizard will subsitute correct default moduleName.
"""
globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)
def onReloadAndTest(self,moduleName="AstroPVDiagramSelfTest"):
self.onReload()
evalString = 'globals()["%s"].%sTest()' % (moduleName, moduleName)
tester = eval(evalString)
tester.runTest()
#
# AstroPVDiagramSelfTestLogic
#
class AstroPVDiagramSelfTestLogic:
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget
"""
def __init__(self):
pass
def hasImageData(self,volumeNode):
"""This is a dummy logic method that
returns true if the passed in volume
node has valid image data
"""
if not volumeNode:
print('no volume node')
return False
if volumeNode.GetImageData() is None:
print('no image data')
return False
return True
class AstroPVDiagramSelfTestTest(unittest.TestCase):
"""
This is the test case for your scripted module.
"""
def delayDisplay(self,message,msec=100):
"""This utility method displays a small dialog and waits.
This does two things: 1) it lets the event loop catch up
to the state of the test so that rendering and widget updates
have all taken place before the test continues and 2) it
shows the user/developer/tester the state of the test
so that we'll know when it breaks.
"""
print(message)
self.info = qt.QDialog()
self.infoLayout = qt.QVBoxLayout()
self.info.setLayout(self.infoLayout)
self.label = qt.QLabel(message,self.info)
self.infoLayout.addWidget(self.label)
qt.QTimer.singleShot(msec, self.info.close)
self.info.exec_()
def setUp(self):
slicer.mrmlScene.Clear(0)
def runTest(self):
self.setUp()
self.test_AstroPVDiagramSelfTest()
def test_AstroPVDiagramSelfTest(self):
print("Running AstroPVDiagramSelfTest Test case:")
astroVolume = self.downloadWEIN069()
mainWindow = slicer.util.mainWindow()
mainWindow.moduleSelector().selectModule('AstroVolume')
mainWindow.moduleSelector().selectModule('AstroPVDiagram')
astroPVDiagramModule = module = slicer.modules.astropvdiagram
astroPVDiagramModuleWidget = astroPVDiagramModule.widgetRepresentation()
AstroPVDiagramParameterNode = slicer.util.getNode("AstroPVDiagramParameters")
sourceNode = slicer.util.getNode("PVDiagramSourcePoints")
self.delayDisplay('Adding fiducials and calculating PVDiagram', 700)
sourceNode.AddFiducial(0, 0, 0)
sourceNode.AddFiducial(30, 0, 0)
sourceNode.AddFiducial(30, 0, 40)
outputVolume = slicer.mrmlScene.GetNodeByID(AstroPVDiagramParameterNode.GetOutputVolumeNodeID())
pixelValue = outputVolume.GetImageData().GetScalarComponentAsFloat(50, 25, 0, 0)
if (math.fabs(pixelValue - 0.00205505) < 1.e-7):
self.delayDisplay('Test passed', 700)
else:
self.delayDisplay('Test failed', 700)
# if run from Slicer interface remove the followinf exit
sys.exit()
def downloadWEIN069(self):
import AstroSampleData
astroSampleDataLogic = AstroSampleData.AstroSampleDataLogic()
self.delayDisplay('Getting WEIN069 Astro Volume')
WEIN069Volume = astroSampleDataLogic.downloadSample("WEIN069")
return WEIN069Volume
|
Punzo/SlicerAstro
|
AstroPVDiagram/Testing/Python/AstroPVDiagramSelfTest.py
|
Python
|
bsd-3-clause
| 6,002
|
[
"VTK"
] |
a13fa5ffce75cfa753f530231c8d1b80185cd7d57fa09e0b23d1b049b80a3bdf
|
"""
Taken from ProDy
(http://www.csb.pitt.edu/prody/_modules/prody/proteins/blastpdb.html)
"""
import re
import time
import urllib2
import xml.etree.cElementTree as etree
from urllib import urlencode
def blast_pdb(sequence, nhits=250, expect=1e-10, timeout=60, pause=1):
query = {
'DATABASE': 'pdb',
'ENTREZ_QUERY': '(none)',
'PROGRAM': 'blastp',
'EXPECT': expect,
'HITLIST_SIZE': nhits,
'CMD': 'Put',
'QUERY': sequence
}
url = 'http://blast.ncbi.nlm.nih.gov/Blast.cgi'
data = urlencode(query)
request = urllib2.Request(
url, data=data, headers={'User-agent': 'protutils'}
)
response = urllib2.urlopen(request)
html = response.read()
m = re.search('RID =\s?(.*?)\n', html)
if m:
rid = m.group(1)
else:
raise Exception('Could not parse response.')
query = {
'ALIGNMENTS': 500,
'DESCRIPTIONS': 500,
'FORMAT_TYPE': 'XML',
'RID': rid,
'CMD': 'Get'
}
data = urlencode(query)
slept = 0
while slept < timeout:
request = urllib2.Request(
url, data=data, headers={'User-agent': 'protutils'}
)
response = urllib2.urlopen(request)
results = response.read()
m = re.search('Status=(.*?)\n', results)
if not m:
break
elif m.group(1).strip().upper() == 'READY':
break
else:
time.sleep(pause)
slept += pause
with open('blastp.xml', 'w') as f:
f.write(results)
return etree.XML(results)
def xml_dict(root, tag_prefix):
d = {}
regex = re.compile(r'{0}(.*)'.format(tag_prefix))
for element in root:
tag = element.tag
m = regex.search(tag)
if m:
key = m.group(1)
if len(element) == 0:
d[key] = element.text
else:
d[key] = element
return d
class BLASTPDBRecord(object):
def __init__(self, sequence, nhits=250, expect=1e-10, timeout=60, pause=1):
self.qseq = sequence
root = blast_pdb(sequence, nhits, expect, timeout, pause)
root = xml_dict(root, 'BlastOutput_')
self.query_id = root['query-ID']
if not len(sequence) == int(root['query-len']):
raise ValueError('Sequence length does not match query length')
self.param = xml_dict(root['param'][0], 'Parameters_')
hits = []
for elem in root['iterations']:
for child in xml_dict(elem, 'Iteration_')['hits']:
hit = xml_dict(child, 'Hit_')
data = xml_dict(hit['hsps'][0], 'Hsp_')
for key in ['align-len', 'gaps', 'hit-frame', 'hit-from',
'hit-to', 'identity', 'positive', 'query-frame',
'query-from', 'query-to']:
data[key] = int(data[key])
for key in ['evalue', 'bit-score', 'score']:
data[key] = float(data[key])
p_identity = (data['identity'] /
float(data['query-to'] - data['query-from'] + 1)
* 100)
p_overlap = ((data['align-len'] - data['gaps']) /
float(len(sequence)) * 100)
data['percent_identity'] = p_identity
data['percent_overlap'] = p_overlap
__, gi, __, pdb, chain = hit['id'].split('|')
data['gi'] = gi
data['pdb'] = pdb
data['chain'] = chain
data['def'] = hit['def']
hits.append(data)
hits.sort(key=lambda x: x['percent_identity'], reverse=True)
self.hits = hits
def get_hits(self, percent_identity=90.0, percent_overlap=70.0):
hits = {}
for hit in self.hits:
if hit['percent_identity'] < percent_identity:
break
if hit['percent_overlap'] < percent_overlap:
continue
key = '{pdb}_{chain}'.format(**hit)
hits[key] = hit
return hits
def get_best(self):
return self.hits[0]
def ranking(self):
return {
'{pdb}_{chain}'.format(**hit): hit[
'percent_identity'
] for hit in self.hits
}
|
platinhom/CADDHom
|
python/bioinformatics/blastp.py
|
Python
|
gpl-2.0
| 4,365
|
[
"BLAST"
] |
9a01b2459dd186a6dad43d5968bdad9de6083fb6eec3e081d9132a5b3c502320
|
#!/usr/bin/env python
# File: montage.py
# Created on: Mon Mar 18 10:08:05 2013
# Last Change: Tue Aug 13 14:06:18 2013
# Purpose of script: <+INSERT+>
# Author: Steven Boada
import img_scale
import pyfits as pyf
import pylab as pyl
from mpl_toolkits.axes_grid1 import axes_grid
#from mk_galaxy_struc import mk_galaxy_struc
import cPickle as pickle
import os
def mk_image(galaxy):
base = './../../images_v5/GS_2.5as_matched/gs_all_'
i_img = pyf.getdata(base+str(galaxy)+'_I.fits')
j_img = pyf.getdata(base+str(galaxy)+'_J.fits')
h_img = pyf.getdata(base+str(galaxy)+'_H.fits')
img = pyl.zeros((h_img.shape[0], h_img.shape[1], 3), dtype=float)
img[:,:,0] = img_scale.asinh(h_img)
img[:,:,1] = img_scale.asinh(j_img)
img[:,:,2] = img_scale.asinh(i_img)
return img
# Get the Galaxy info
#galaxies = mk_galaxy_struc()
galaxies = pickle.load(open('galaxies.pickle','rb'))
galaxies = filter(lambda galaxy: galaxy.ston_I > 30., galaxies)
galaxies = pyl.asarray(filter(lambda galaxy: galaxy.ICD_IH < 0.5, galaxies))
# Make the low mass grid first
x = [galaxy.z for galaxy in galaxies]
y = [galaxy.ICD_IH *100 for galaxy in galaxies]
ll = 1.5
ul= 3.5
bins_x =pyl.linspace(ll, ul, 16)
bins_y = pyl.linspace(50, -5, 7)
grid = []
for i in range(bins_x.size-1):
xmin = bins_x[i]
xmax = bins_x[i+1]
for j in range(bins_y.size-1):
ymax = bins_y[j]
ymin = bins_y[j+1]
cond=[cond1 and cond2 and cond3 and cond4 for cond1, cond2, cond3,
cond4 in zip(x>=xmin, x<xmax, y>=ymin, y<ymax)]
grid.append(galaxies.compress(cond))
# Put the grid together
F = pyl.figure(1, figsize=(6, 4))
grid1 = axes_grid.ImageGrid(F, 211, nrows_ncols=(6,15), axes_pad=0.05,
add_all=True, share_all=True, aspect=True, direction='column')
grid2 = axes_grid.ImageGrid(F, 212, nrows_ncols=(6,15), axes_pad=0.05,
add_all=True, share_all=True, aspect=True, direction='column')
from random import choice
base = './../../images_v5/GS_2.5as/gs_all_'
for i in range(len(grid)):
print len(grid[i])
if len(grid[i]) > 1:
galaxy = choice(grid[i])
ID = int(galaxy.ID)
while os.path.isfile(base+str(galaxy)+'_I.fits'):
print 'choose again', ID
galaxy = choice(grid[i])
elif len(grid[i]) == 1:
galaxy = grid[i][0]
else:
pass
grid1[i].axis('off')
if len(grid[i]) != 0:
ID = int(galaxy.ID)
img = mk_image(ID)
grid1[i].imshow(img, origin='lower')
grid1[i].text(0.5, 0.5, str(galaxy.z), color='white' )
grid1[i].text(0.5, 35, str(ID), color='white' )
grid1[i].set_xticks([])
grid1[i].set_yticks([])
else:
pass
pyl.show()
|
boada/ICD
|
sandbox/legacy_plot_code/plot_icd_z_montage.py
|
Python
|
mit
| 2,741
|
[
"Galaxy"
] |
bfa0089efefd7d5b1cfdf1da4b500c458e13684e30b29684138e672ccc01844e
|
'''
Created on Jul 15, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
import os
import sys
import collections
import itertools
import operator
from chimerascan import pysam
from chimerascan.lib import config
from chimerascan.lib.chimera import Chimera, \
DiscordantTags, DISCORDANT_TAG_NAME, \
OrientationTags, ORIENTATION_TAG_NAME, \
DiscordantRead, ChimeraTypes, ChimeraPartner
from chimerascan.lib.breakpoint import Breakpoint
from chimerascan.lib.gene_to_genome import build_tid_tx_map
from chimerascan.lib.fragment_size_distribution import InsertSizeDistribution
from chimerascan.lib.seq import calc_homology
def parse_pairs(bamfh):
bam_iter = iter(bamfh)
try:
while True:
r1 = bam_iter.next()
r2 = bam_iter.next()
yield r1,r2
except StopIteration:
pass
def parse_gene_discordant_reads(bamfh):
"""
return tuples of (5',3') reads that both align to transcripts
"""
for r1,r2 in parse_pairs(bamfh):
# TODO:
# for now we are only going to deal with gene-gene
# chimeras and leave other chimeras for study at a
# later time
dr1 = r1.opt(DISCORDANT_TAG_NAME)
dr2 = r2.opt(DISCORDANT_TAG_NAME)
if (dr1 != DiscordantTags.DISCORDANT_GENE or
dr2 != DiscordantTags.DISCORDANT_GENE):
continue
# organize key in 5' to 3' order
or1 = r1.opt(ORIENTATION_TAG_NAME)
or2 = r2.opt(ORIENTATION_TAG_NAME)
assert or1 != or2
if or1 == OrientationTags.FIVEPRIME:
pair = (r1,r2)
else:
pair = (r2,r1)
yield pair
def get_chimera_type(fiveprime_gene, threeprime_gene, gene_trees):
"""
return tuple containing ChimeraType and distance
between 5' and 3' genes
"""
# get gene information
chrom5p, start5p, end5p, strand1 = fiveprime_gene.chrom, fiveprime_gene.tx_start, fiveprime_gene.tx_end, fiveprime_gene.strand
chrom3p, start3p, end3p, strand2 = threeprime_gene.chrom, threeprime_gene.tx_start, threeprime_gene.tx_end, threeprime_gene.strand
# interchromosomal
if chrom5p != chrom3p:
return ChimeraTypes.INTERCHROMOSOMAL, None
# orientation
same_strand = strand1 == strand2
# genes on same chromosome so check overlap
is_overlapping = (start5p < end3p) and (start3p < end5p)
if is_overlapping:
if not same_strand:
if ((start5p <= start3p and strand1 == "+") or
(start5p > start3p and strand1 == "-")):
return (ChimeraTypes.OVERLAP_CONVERGE, 0)
else:
return (ChimeraTypes.OVERLAP_DIVERGE, 0)
else:
if ((start5p <= start3p and strand1 == "+") or
(end5p >= end3p and strand1 == "-")):
return (ChimeraTypes.OVERLAP_SAME, 0)
else:
return (ChimeraTypes.OVERLAP_COMPLEX, 0)
# if code gets here then the genes are on the same chromosome but do not
# overlap. first calculate distance (minimum distance between genes)
if start5p <= start3p:
distance = start3p - end5p
between_start,between_end = end5p,start3p
else:
distance = end3p - start5p
between_start,between_end = end3p,start5p
# check whether there are genes intervening between the
# chimera candidates
genes_between = []
genes_between_same_strand = []
for hit in gene_trees[chrom5p].find(between_start,
between_end):
if (hit.start > between_start and
hit.end < between_end):
if hit.strand == strand1:
genes_between_same_strand.append(hit)
genes_between.append(hit)
if same_strand:
if len(genes_between_same_strand) == 0:
return ChimeraTypes.READTHROUGH, distance
else:
return ChimeraTypes.INTRACHROMOSOMAL, distance
else:
# check for reads between neighboring genes
if len(genes_between) == 0:
if ((start5p <= start3p and strand1 == "+") or
(start5p > start3p and strand1 == "-")):
return (ChimeraTypes.ADJ_CONVERGE, distance)
elif ((start5p >= start3p and strand1 == "+") or
(start5p < start3p and strand1 == "-")):
return (ChimeraTypes.ADJ_DIVERGE, distance)
elif ((start5p <= start3p and strand1 == "+") or
(start5p > start3p and strand1 == "-")):
return (ChimeraTypes.ADJ_SAME, distance)
elif ((start5p >= start3p and strand1 == "+") or
(start5p < start3p and strand1 == '-')):
return (ChimeraTypes.ADJ_COMPLEX, distance)
else:
return ChimeraTypes.INTRA_COMPLEX, distance
return ChimeraTypes.UNKNOWN, distance
def read_pairs_to_chimera(chimera_name, tid5p, tid3p, readpairs,
tid_tx_map, genome_tx_trees, trim_bp):
# get gene information
tx5p = tid_tx_map[tid5p]
tx3p = tid_tx_map[tid3p]
# categorize chimera type
chimera_type, distance = get_chimera_type(tx5p, tx3p, genome_tx_trees)
# create chimera object
c = Chimera()
iter5p = itertools.imap(operator.itemgetter(0), readpairs)
iter3p = itertools.imap(operator.itemgetter(1), readpairs)
c.partner5p = ChimeraPartner.from_discordant_reads(iter5p, tx5p, trim_bp)
c.partner3p = ChimeraPartner.from_discordant_reads(iter3p, tx3p, trim_bp)
c.name = chimera_name
c.chimera_type = chimera_type
c.distance = distance
# raw reads
c.encomp_read_pairs = readpairs
return c
def calc_isize_prob(isize, isize_dist):
# find percentile of observing this insert size in the reads
isize_per = isize_dist.percentile_at_isize(isize)
# convert to a probability score (0.0-1.0)
isize_prob = 1.0 - (2.0 * abs(50.0 - isize_per))/100.0
return isize_prob
def choose_best_breakpoints(r5p, r3p, tx5p, tx3p, trim_bp, isize_dist):
best_breakpoints = set()
best_isize_prob = None
# iterate through 5' transcript exons
exon_iter_5p = reversed(tx5p.exons) if tx5p.strand == '-' else iter(tx5p.exons)
tx_end_5p = 0
for exon_num_5p,coords5p in enumerate(exon_iter_5p):
genome_start_5p, genome_end_5p = coords5p
exon_size_5p = genome_end_5p - genome_start_5p
tx_end_5p += exon_size_5p
# fast forward on 5' gene to first exon beyond read
if tx_end_5p < (r5p.aend - trim_bp):
continue
#print "tx end 5p", tx_end_5p, "exon_size_5p", exon_size_5p, "r5p.aend", r5p.aend, "trim_bp", trim_bp
# now have a candidate insert size between between 5' read and
# end of 5' exon
isize5p = tx_end_5p - r5p.pos
# iterate through 3' transcript
exon_iter_3p = reversed(tx3p.exons) if tx3p.strand == '-' else iter(tx3p.exons)
tx_start_3p = 0
local_best_breakpoints = set()
local_best_isize_prob = None
for exon_num_3p,coords3p in enumerate(exon_iter_3p):
genome_start_3p, genome_end_3p = coords3p
#print "\t", coords3p
# stop after going past read on 3' transcript
if tx_start_3p >= (r3p.pos + trim_bp):
break
# get another candidate insert size between start of 3'
# exon and 3' read
isize3p = r3p.aend - tx_start_3p
#print "\t", isize5p, isize3p, tx_end_5p, tx_start_3p
# compare the insert size against the known insert size
# distribution
isize_prob = calc_isize_prob(isize5p + isize3p, isize_dist)
if ((local_best_isize_prob is None) or
(isize_prob > local_best_isize_prob)):
local_best_isize_prob = isize_prob
local_best_breakpoints = set([(exon_num_5p, tx_end_5p,
exon_num_3p, tx_start_3p)])
elif (isize_prob == local_best_isize_prob):
local_best_breakpoints.add((exon_num_5p, tx_end_5p,
exon_num_3p, tx_start_3p))
tx_start_3p += genome_end_3p - genome_start_3p
# compare locally best insert size probability to global best
if ((best_isize_prob is None) or
(local_best_isize_prob > best_isize_prob)):
best_isize_prob = local_best_isize_prob
best_breakpoints = local_best_breakpoints
elif (local_best_isize_prob == best_isize_prob):
# for ties we keep all possible breakpoints
best_breakpoints.update(local_best_breakpoints)
# TODO: remove debugging output
#ends5p = [x[1] for x in best_breakpoints]
#starts3p = [x[3] for x in best_breakpoints]
#print ends5p, starts3p, "r1:%d-%d" % (r5p.pos, r5p.aend), "r2:%d-%d" % (r3p.pos, r3p.aend), best_isize_prob
return best_isize_prob, best_breakpoints
def extract_breakpoint_sequence(tx_name_5p, tx_end_5p,
tx_name_3p, tx_start_3p,
ref_fa, max_read_length,
homology_mismatches):
tx_start_5p = max(0, tx_end_5p - max_read_length + 1)
tx_end_3p = tx_start_3p + max_read_length - 1
# fetch sequence
seq5p = ref_fa.fetch(tx_name_5p, tx_start_5p, tx_end_5p)
seq3p = ref_fa.fetch(tx_name_3p, tx_start_3p, tx_end_3p)
# pad sequence if too short
if len(seq5p) < (max_read_length - 1):
logging.warning("Could not extract sequence of length >%d from "
"5' partner at %s:%d-%d, only retrieved "
"sequence of length %d" %
(max_read_length-1, tx_name_5p, tx_start_5p,
tx_end_5p, len(seq5p)))
# pad sequence
padding = (max_read_length - 1) - len(seq5p)
seq5p = ("N" * padding) + seq5p
if len(seq3p) < max_read_length - 1:
logging.warning("Could not extract sequence of length >%d from "
"3' partner at %s:%d-%d, only retrieved "
"sequence of length %d" %
(max_read_length-1, tx_name_3p, tx_start_3p,
tx_end_3p, len(seq3p)))
# pad sequence
padding = (max_read_length - 1) - len(seq3p)
seq3p = seq3p + ("N" * padding)
# if 5' partner continues along its normal transcript
# without fusing, get the sequence that would result
homolog_end_5p = tx_end_5p + max_read_length - 1
homolog_seq_5p = ref_fa.fetch(tx_name_5p, tx_end_5p, homolog_end_5p)
# if 3' partner were to continue in the 5' direction,
# grab the sequence that would be produced
homolog_start_3p = max(0, tx_start_3p - max_read_length + 1)
homolog_seq_3p = ref_fa.fetch(tx_name_3p, homolog_start_3p, tx_start_3p)
# count number of bases in common between downstream 5' sequence
# and the sequence of the 3' partner in the chimera
homology_right = calc_homology(homolog_seq_5p, seq3p,
homology_mismatches)
# count number of bases in common between upstream 3' sequence
# and the sequence of the 5' partner in the chimera
homology_left = calc_homology(homolog_seq_3p[::-1], seq5p[::-1],
homology_mismatches)
return seq5p, seq3p, homology_left, homology_right
def discordant_reads_to_breakpoints(index_dir, isize_dist_file,
input_bam_file, output_file,
trim_bp, max_read_length,
homology_mismatches):
"""
homology_mismatches: number of mismatches to tolerate while computing
homology between chimeric breakpoint sequence and "wildtype" sequence
trim_bp: when selecting the best matching exon for each read, we
account for spurious overlap into adjacent exons by trimming the
read by 'trim_bp'
"""
# read insert size distribution
isize_dist = InsertSizeDistribution.from_file(open(isize_dist_file))
# open BAM alignment file
bamfh = pysam.Samfile(input_bam_file, "rb")
# build a lookup table to get genomic intervals from transcripts
logging.debug("Reading gene information")
gene_file = os.path.join(index_dir, config.GENE_FEATURE_FILE)
tid_tx_map = build_tid_tx_map(bamfh, gene_file,
rname_prefix=config.GENE_REF_PREFIX)
# open the reference sequence fasta file
ref_fasta_file = os.path.join(index_dir, config.ALIGN_INDEX + ".fa")
ref_fa = pysam.Fastafile(ref_fasta_file)
# iterate through read pairs
outfh = open(output_file, "w")
logging.debug("Parsing discordant reads")
for r5p,r3p in parse_gene_discordant_reads(bamfh):
# store pertinent read information in lightweight structure called
# DiscordantRead object. this departs from SAM format into a
# custom read format
dr5p = DiscordantRead.from_read(r5p)
dr3p = DiscordantRead.from_read(r3p)
# get gene information
tx5p = tid_tx_map[r5p.rname]
tx3p = tid_tx_map[r3p.rname]
# given the insert size find the highest probability
# exon junction breakpoint between the two transcripts
isize_prob, breakpoints = \
choose_best_breakpoints(r5p, r3p, tx5p, tx3p,
trim_bp, isize_dist)
# extract the sequence of the breakpoint along with the
# number of homologous bases at the breakpoint between
# chimera and wildtype genes
for breakpoint in breakpoints:
exon_num_5p, tx_end_5p, exon_num_3p, tx_start_3p = breakpoint
breakpoint_seq_5p, breakpoint_seq_3p, homology_left, homology_right = \
extract_breakpoint_sequence(config.GENE_REF_PREFIX + tx5p.tx_name, tx_end_5p,
config.GENE_REF_PREFIX + tx3p.tx_name, tx_start_3p,
ref_fa, max_read_length,
homology_mismatches)
# write breakpoint information for each read to a file
fields = [tx5p.tx_name, 0, tx_end_5p,
tx3p.tx_name, tx_start_3p, tx3p.tx_end,
r5p.rname, # name
isize_prob, # score
tx5p.strand, tx3p.strand, # strand 1, strand 2
# user defined fields
exon_num_5p, exon_num_3p,
breakpoint_seq_5p, breakpoint_seq_3p,
homology_left, homology_right]
fields.append('|'.join(map(str, dr5p.to_list())))
fields.append('|'.join(map(str, dr3p.to_list())))
print >>outfh, '\t'.join(map(str, fields))
# cleanup
ref_fa.close()
outfh.close()
bamfh.close()
return config.JOB_SUCCESS
def main():
from optparse import OptionParser
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = OptionParser("usage: %prog [options] <index> <isizedist.txt> "
"<discordant_reads.bedpe> <chimeras.txt>")
parser.add_option("--trim", dest="trim", type="int",
default=config.EXON_JUNCTION_TRIM_BP, metavar="N",
help="Trim ends of reads by N bp when determining "
"the start/end exon of chimeras [default=%default]")
parser.add_option("--max-read-length", dest="max_read_length", type="int",
default=100, metavar="N",
help="Reads in the BAM file are guaranteed to have "
"length less than N [default=%default]")
parser.add_option("--homology-mismatches", type="int",
dest="homology_mismatches",
default=config.BREAKPOINT_HOMOLOGY_MISMATCHES,
help="Number of mismatches to tolerate when computing "
"homology between gene and its chimeric partner "
"[default=%default]")
options, args = parser.parse_args()
index_dir = args[0]
isize_dist_file = args[1]
input_bam_file = args[2]
output_file = args[3]
return nominate_chimeras(index_dir, isize_dist_file,
input_bam_file, output_file,
trim_bp=options.trim,
max_read_length=options.max_read_length,
homology_mismatches=options.homology_mismatches)
if __name__ == '__main__':
sys.exit(main())
# os.remove(sorted_tmp_file)
#
#
# for line in open(sorted_tmp_file):
# fields = line.strip().split('\t')
# seq = fields[0]
# chimera_name = fields[1]
# if seq != prev_seq:
# if len(chimera_names) > 0:
# # write to fasta
# name = "B%07d" % (breakpoint_num)
# print >>fastafh, ">%s\n%s" % (name, split_seq(prev_seq))
# print >>mapfh, "%s\t%s\t%s" % (name, prev_seq, ",".join(chimera_names))
# chimera_names = set()
# breakpoint_num += 1
# prev_seq = seq
# chimera_names.add(chimera_name)
# if len(chimera_names) > 0:
# name = "B%07d" % (breakpoint_num)
# print >>fastafh, ">%s\n%s" % (name, split_seq(prev_seq))
# print >>mapfh, "%s\t%s\t%s" % (name, prev_seq, ",".join(chimera_names))
# os.remove(sorted_tmp_file)
#
#
#
# # TODO: use make_temp
# # write breakpoint/chimera relationships to file
# tmp_file = os.path.join(tmp_dir, "breakpoint_info.tmp")
# f = open(tmp_file, "w")
# for c in Chimera.parse(open(input_file)):
# # write to temp file
# print >>f, "%s\t%s" % (c.breakpoint_seq_5p + c.breakpoint_seq_3p, c.name)
# f.close()
# # sort breakpoint file
# def sortfunc(line):
# fields = line.strip().split('\t')
# return fields[0]
# tempdirs = [tmp_dir]
# sorted_tmp_file = os.path.join(tmp_dir, "breakpoint_info.srt.tmp")
# batch_sort(input=tmp_file,
# output=sorted_tmp_file,
# key=sortfunc,
# buffer_size=32000,
# tempdirs=tempdirs)
# os.remove(tmp_file)
# # parse and build breakpoint -> chimera map
# fastafh = open(breakpoint_fasta_file, "w")
# mapfh = open(breakpoint_map_file, "w")
# breakpoint_num = 0
# prev_seq = None
# chimera_names = set()
# for line in open(sorted_tmp_file):
# fields = line.strip().split('\t')
# seq = fields[0]
# chimera_name = fields[1]
# if seq != prev_seq:
# if len(chimera_names) > 0:
# # write to fasta
# name = "B%07d" % (breakpoint_num)
# print >>fastafh, ">%s\n%s" % (name, split_seq(prev_seq))
# print >>mapfh, "%s\t%s\t%s" % (name, prev_seq, ",".join(chimera_names))
# chimera_names = set()
# breakpoint_num += 1
# prev_seq = seq
# chimera_names.add(chimera_name)
# if len(chimera_names) > 0:
# name = "B%07d" % (breakpoint_num)
# print >>fastafh, ">%s\n%s" % (name, split_seq(prev_seq))
# print >>mapfh, "%s\t%s\t%s" % (name, prev_seq, ",".join(chimera_names))
# os.remove(sorted_tmp_file)
|
tectronics/chimerascan
|
chimerascan/deprecated/chimeras_to_breakpoints_v2.py
|
Python
|
gpl-3.0
| 20,400
|
[
"pysam"
] |
45c247852fb0e18c5a989ffcae84e9d06d8340cae3673208bdb46aa65ed609cb
|
from nltk.stem import WordNetLemmatizer
input_words = ['liked', 'idol', 'wanna', 'happiness', 'death', 'hormone', 'secretion', 'addition', 'better', 'looking', 'exciting', 'best', 'kcal', 'is', 'CO2']
lemmatizer = WordNetLemmatizer()
lemmatizer_names = ['NOUN LEMMATIZER', 'VERB LEMMATIZER']
formatted_text = '{:>24}' * (len(lemmatizer_names) + 1)
print('\n', formatted_text.format('INPUT WORD', *lemmatizer_names), '\n', '='*75)
for word in input_words:
output = [word, lemmatizer.lemmatize(word, pos='n'), lemmatizer.lemmatize(word, pos='v')]
print(formatted_text.format(*output))
|
heeyong97/2017sejongAI
|
Week9/lemmatizer.py
|
Python
|
gpl-3.0
| 603
|
[
"exciting"
] |
b599109fcaf89146713d4fa8d144a2613115344212b887caf8f87b96dbfe9d16
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on 24/09/14 13:38
@author: Carlos Eduardo Barbosa
Program to load templates from the MILES library for pPXF use.
"""
import os
import numpy as np
import pyfits as pf
import ppxf_util as util
from config import *
def load_templates_regul(velscale):
""" Load templates into 2D array for regularization"""
# cd into template folder to make calculations
current_dir = os.getcwd()
os.chdir(os.path.join(home, "miles_models"))
# Extract the wavelength range and logarithmically rebin one spectrum
# to the same velocity scale of the SAURON galaxy spectrum, to determine
# the size needed for the array which will contain the template spectra.
miles = [x for x in os.listdir(".") if x.endswith(".fits")]
print miles
raw_input()
hdu = pf.open(miles[0])
ssp = hdu[0].data
h2 = hdu[0].header
lamRange2 = h2['CRVAL1'] + np.array([0.,h2['CDELT1']*(h2['NAXIS1']-1)])
sspNew, logLam2, velscale = util.log_rebin(lamRange2, ssp,
velscale=velscale)
# Ordered array of metallicities
Zs = set([x.split("Z")[1].split("T")[0] for x in miles])
Zs = [float(x.replace("m", "-").replace("p", "")) for x in Zs]
Zs.sort()
Z2 = Zs[:]
for i in range(len(Zs)):
if Zs[i] < 0:
Zs[i] = "{0:.2f}".format(Zs[i]).replace("-", "m")
else:
Zs[i] = "p{0:.2f}".format(Zs[i])
# Ordered array of ages
Ts = list(set([x.split("T")[1].split(".fits")[0] for x in miles]))
Ts.sort()
# Create a three dimensional array to store the
# two dimensional grid of model spectra
#
nAges = len(Ts)
nMetal = len(Zs)
templates = np.empty((sspNew.size,nAges,nMetal))
# Here we make sure the spectra are sorted in both [M/H]
# and Age along the two axes of the rectangular grid of templates.
# A simple alphabetical ordering of Vazdekis's naming convention
# does not sort the files by [M/H], so we do it explicitly below
miles = []
for k in range(nMetal):
for j in range(nAges):
filename = "Mun1.30Z{0}T{1}.fits".format(Zs[k], Ts[j])
ssp = pf.getdata(filename)
sspNew, logLam2, velscale = util.log_rebin(lamRange2, ssp,
velscale=velscale)
templates[:,j,k] = sspNew # Templates are *not* normalized here
miles.append(filename)
templates /= np.median(templates) # Normalizes templates by a scalar
os.chdir(current_dir)
return templates, logLam2, Ts, Z2, miles, h2['CDELT1']
def stellar_templates(velscale):
""" Load files with stellar library used as templates. """
current_dir = os.getcwd()
# Template directory is also set in config.py
os.chdir(template_dir)
miles = [x for x in os.listdir(".") if x.startswith("Mun") and
x.endswith(".fits")]
# Ordered array of metallicities
Zs = set([x.split("Z")[1].split("T")[0] for x in miles])
Zs = [float(x.replace("m", "-").replace("p", "").replace("_", "."))
for x in Zs]
Zs.sort()
for i in range(len(Zs)):
if Zs[i] < 0:
Zs[i] = "{0:.2f}".format(Zs[i]).replace("-", "m")
else:
Zs[i] = "p{0:.2f}".format(Zs[i])
Zs = [str(x).replace(".", "_") for x in Zs]
# Ordered array of ages
Ts = list(set([x.split("T")[1].split(".fits")[0].replace("_", ".")
for x in miles]))
Ts.sort()
Ts = [str(x).replace(".", "_") for x in Ts]
miles = []
metal_ages = []
for m in Zs:
for t in Ts:
filename = "Mun1_30Z{0}T{1}.fits".format(m, t)
if os.path.exists(filename):
miles.append(filename)
metal_ages.append([m.replace("_", ".").replace("p",
"+").replace("m", "-"),t.replace("_", ".")])
hdu = pf.open(miles[0])
ssp = hdu[0].data
h2 = hdu[0].header
lamRange2 = h2['CRVAL1'] + np.array([0.,h2['CDELT1']*(h2['NAXIS1']-1)])
sspNew, logLam2, velscale = util.log_rebin(lamRange2, ssp,
velscale=velscale)
templates = np.empty((sspNew.size,len(miles)))
for j in range(len(miles)):
hdu = pf.open(miles[j])
ssp = hdu[0].data
sspNew, logLam2, velscale = util.log_rebin(lamRange2, ssp,
velscale=velscale)
templates[:,j] = sspNew
os.chdir(current_dir)
return templates, logLam2, h2['CDELT1'], miles
def emission_templates(velscale):
""" Load files with stellar library used as templates. """
current_dir = os.getcwd()
# Template directory is also set in setyp.py
os.chdir(template_dir)
emission = [x for x in os.listdir(".") if x.startswith("emission") and
x.endswith(".fits")]
emission.sort()
c = 299792.458
FWHM_tem = 2.1 # MILES library spectra have a resolution FWHM of 2.54A.
# Extract the wavelength range and logarithmically rebin one spectrum
# to the same velocity scale of the SAURON galaxy spectrum, to determine
# the size needed for the array which will contain the template spectra.
#
hdu = pf.open(emission[0])
ssp = hdu[0].data
h2 = hdu[0].header
lamRange2 = h2['CRVAL1'] + np.array([0.,h2['CDELT1']*(h2['NAXIS1']-1)])
sspNew, logLam2, velscale = util.log_rebin(lamRange2, ssp,
velscale=velscale)
templates = np.empty((sspNew.size,len(emission)))
for j in range(len(emission)):
hdu = pf.open(emission[j])
ssp = hdu[0].data
sspNew, logLam2, velscale = util.log_rebin(lamRange2, ssp,
velscale=velscale)
templates[:,j] = sspNew
# templates *= 1e5 # Normalize templates
os.chdir(current_dir)
return templates, logLam2, h2['CDELT1'], emission
def emission_line_template(lines, velscale, res=2.54, intens=None, resamp=15,
return_log=True):
lines = np.atleast_1d(lines)
if intens == None:
intens = np.ones_like(lines) * 1e-5
current_dir = os.getcwd()
# Template directory is also set in setyp.py
os.chdir(template_dir)
refspec = [x for x in os.listdir(".") if x.endswith(".fits")][0]
lamb = wavelength_array(refspec)
delta = lamb[1] - lamb[0]
lamb2 = np.linspace(lamb[0]-delta/2., lamb[-1] + delta/2., len(lamb+1)*resamp)
sigma = res / (2. * np.sqrt(2. * np.log(2.)))
spec = np.zeros_like(lamb2)
for line, intensity in zip(lines, intens):
spec += intensity * np.exp(- (lamb2 - line)**2 / (2 * sigma * sigma))
spec = np.sum(spec.reshape(len(lamb), resamp), axis=1)
if not return_log:
return spec
specNew, logLam2, velscale = util.log_rebin([lamb[0], lamb[-1]], spec,
velscale=velscale)
os.chdir(current_dir)
return specNew
def make_fits(spec, outfile):
hdu = pf.PrimaryHDU(spec)
miles = [x for x in os.listdir(".") if x.startswith("Mun") and
x.endswith(".fits")][0]
w0 = pf.getval(miles, "CRVAL1")
deltaw = pf.getval(miles, "CDELT1")
pix0 = pf.getval(miles, "CRPIX1")
hdu.header["CRVAL1"] = w0
hdu.header["CDELT1"] = deltaw
hdu.header["CRPIX1"] = pix0
pf.writeto(outfile, hdu.data, hdu.header, clobber=True)
return
def wavelength_array(spec):
""" Produces array for wavelenght of a given array. """
w0 = pf.getval(spec, "CRVAL1")
deltaw = pf.getval(spec, "CDELT1")
pix0 = pf.getval(spec, "CRPIX1")
npix = pf.getval(spec, "NAXIS1")
return w0 + deltaw * (np.arange(npix) + 1 - pix0)
if __name__ == "__main__":
os.chdir(template_dir)
stellar_templates(velscale)
em_OIII = emission_line_template([5006.84, 4958.91], velscale,
intens=[1e-5,0.33e-5], return_log=0)
em_hbeta = emission_line_template(4861.333, velscale, return_log=0)
em_NI = emission_line_template(5200.257, velscale, return_log=0)
# make_fits(em_OIII, "emission_OIII_fwhm2.54.fits")
# make_fits(em_hbeta, "emission_hbeta_fwhm2.54.fits")
# make_fits(em_NI, "emission_NI_fwhm2.54.fits")
|
kadubarbosa/hydra1
|
load_templates.py
|
Python
|
gpl-2.0
| 8,293
|
[
"Galaxy"
] |
a6cf57aca1d31916cbe76b9b8edd0efbe2daa67a4acd45a7107bc7c3d72bd929
|
__author__ = 'tylin'
__version__ = '1.0.1'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
import urllib
import copy
import itertools
import mask
import os
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset = {}
self.anns = []
self.imgToAnns = {}
self.catToImgs = {}
self.imgs = {}
self.cats = {}
if not annotation_file == None:
print 'loading annotations into memory...'
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
print 'Done (t=%0.2fs)'%(time.time()- tic)
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
anns = {}
imgToAnns = {}
catToImgs = {}
cats = {}
imgs = {}
if 'annotations' in self.dataset:
imgToAnns = {ann['image_id']: [] for ann in self.dataset['annotations']}
anns = {ann['id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
if 'images' in self.dataset:
imgs = {im['id']: {} for im in self.dataset['images']}
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
cats = {cat['id']: [] for cat in self.dataset['categories']}
for cat in self.dataset['categories']:
cats[cat['id']] = cat
catToImgs = {cat['id']: [] for cat in self.dataset['categories']}
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
# this can be changed by defaultdict
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for catId in catIds:
if len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
if datasetType == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
# res.dataset['info'] = copy.deepcopy(self.dataset['info'])
# res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])
print 'Loading and preparing results... '
tic = time.time()
anns = json.load(open(resFile))
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = mask.area([ann['segmentation']])[0]
if not 'bbox' in ann:
ann['bbox'] = mask.toBbox([ann['segmentation']])[0]
ann['id'] = id+1
ann['iscrowd'] = 0
print 'DONE (t=%0.2fs)'%(time.time()- tic)
res.dataset['annotations'] = anns
res.createIndex()
return res
def download( self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print 'Please specify target directory'
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)
|
chenfsjz/coco
|
PythonAPI/pycocotools/coco.py
|
Python
|
bsd-2-clause
| 14,801
|
[
"VisIt"
] |
26212cc53bff675002dcc6eba12f0459e786e060bb3f6e8ceb89c24be09f6eb1
|
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
# Modified by Marcus Koehler 2017-10-12 <mok21@cam.ac.uk>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/combined_1960-2020/0.5x0.5/combined_sources_OC_biomass_1960-2020_greg.nc'
#
# STASH code emissions are associated with
# 301-320: surface
# m01s00i323: Organic carbon biomass burning emissions
#
# 321-340: full atmosphere
#
stash='m01s00i323'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='OC_biomass'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name='OC biomass burning emissions expressed as carbon'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='high_level'
ocube.attributes['highest_level']='21'
ocube.attributes['lowest_level']='1'
ocube.attributes['um_stash_source']=stash
ocube.attributes['tracer_name']=str.strip(species_name)
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='combined_sources_OC_biomass_1960-2020_greg.nc'
ocube.attributes['title']='Time-varying monthly 3D emissions of organic carbon from 1960 to 2020 (from biomass burning sources only)'
ocube.attributes['File_version']='v2'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010'
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1960-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='gregorian')
ocube.coord(axis='t').points=numpy.array([15.5, 45.5, 75.5, 106, 136.5, 167, 197.5,
228.5, 259, 289.5, 320, 350.5, 381.5, 411, 440.5, 471, 501.5, 532, 562.5, 593.5,
624, 654.5, 685, 715.5, 746.5, 776, 805.5, 836, 866.5, 897, 927.5, 958.5, 989,
1019.5, 1050, 1080.5, 1111.5, 1141, 1170.5, 1201, 1231.5, 1262, 1292.5, 1323.5,
1354, 1384.5, 1415, 1445.5, 1476.5, 1506.5, 1536.5, 1567, 1597.5, 1628, 1658.5,
1689.5, 1720, 1750.5, 1781, 1811.5, 1842.5, 1872, 1901.5, 1932, 1962.5, 1993,
2023.5, 2054.5, 2085, 2115.5, 2146, 2176.5, 2207.5, 2237, 2266.5, 2297, 2327.5,
2358, 2388.5, 2419.5, 2450, 2480.5, 2511, 2541.5, 2572.5, 2602, 2631.5, 2662,
2692.5, 2723, 2753.5, 2784.5, 2815, 2845.5, 2876, 2906.5, 2937.5, 2967.5, 2997.5,
3028, 3058.5, 3089, 3119.5, 3150.5, 3181, 3211.5, 3242, 3272.5, 3303.5, 3333,
3362.5, 3393, 3423.5, 3454, 3484.5, 3515.5, 3546, 3576.5, 3607, 3637.5, 3668.5,
3698, 3727.5, 3758, 3788.5, 3819, 3849.5, 3880.5, 3911, 3941.5, 3972, 4002.5,
4033.5, 4063, 4092.5, 4123, 4153.5, 4184, 4214.5, 4245.5, 4276, 4306.5, 4337,
4367.5, 4398.5, 4428.5, 4458.5, 4489, 4519.5, 4550, 4580.5, 4611.5, 4642, 4672.5,
4703, 4733.5, 4764.5, 4794, 4823.5, 4854, 4884.5, 4915, 4945.5, 4976.5, 5007,
5037.5, 5068, 5098.5, 5129.5, 5159, 5188.5, 5219, 5249.5, 5280, 5310.5, 5341.5,
5372, 5402.5, 5433, 5463.5, 5494.5, 5524, 5553.5, 5584, 5614.5, 5645, 5675.5,
5706.5, 5737, 5767.5, 5798, 5828.5, 5859.5, 5889.5, 5919.5, 5950, 5980.5, 6011,
6041.5, 6072.5, 6103, 6133.5, 6164, 6194.5, 6225.5, 6255, 6284.5, 6315, 6345.5,
6376, 6406.5, 6437.5, 6468, 6498.5, 6529, 6559.5, 6590.5, 6620, 6649.5, 6680,
6710.5, 6741, 6771.5, 6802.5, 6833, 6863.5, 6894, 6924.5, 6955.5, 6985, 7014.5,
7045, 7075.5, 7106, 7136.5, 7167.5, 7198, 7228.5, 7259, 7289.5, 7320.5, 7350.5,
7380.5, 7411, 7441.5, 7472, 7502.5, 7533.5, 7564, 7594.5, 7625, 7655.5, 7686.5,
7716, 7745.5, 7776, 7806.5, 7837, 7867.5, 7898.5, 7929, 7959.5, 7990, 8020.5,
8051.5, 8081, 8110.5, 8141, 8171.5, 8202, 8232.5, 8263.5, 8294, 8324.5, 8355,
8385.5, 8416.5, 8446, 8475.5, 8506, 8536.5, 8567, 8597.5, 8628.5, 8659, 8689.5,
8720, 8750.5, 8781.5, 8811.5, 8841.5, 8872, 8902.5, 8933, 8963.5, 8994.5, 9025,
9055.5, 9086, 9116.5, 9147.5, 9177, 9206.5, 9237, 9267.5, 9298, 9328.5, 9359.5,
9390, 9420.5, 9451, 9481.5, 9512.5, 9542, 9571.5, 9602, 9632.5, 9663, 9693.5,
9724.5, 9755, 9785.5, 9816, 9846.5, 9877.5, 9907, 9936.5, 9967, 9997.5, 10028,
10058.5, 10089.5, 10120, 10150.5, 10181, 10211.5, 10242.5, 10272.5, 10302.5,
10333, 10363.5, 10394, 10424.5, 10455.5, 10486, 10516.5, 10547, 10577.5, 10608.5,
10638, 10667.5, 10698, 10728.5, 10759, 10789.5, 10820.5, 10851, 10881.5, 10912,
10942.5, 10973.5, 11003, 11032.5, 11063, 11093.5, 11124, 11154.5, 11185.5, 11216,
11246.5, 11277, 11307.5, 11338.5, 11368, 11397.5, 11428, 11458.5, 11489, 11519.5,
11550.5, 11581, 11611.5, 11642, 11672.5, 11703.5, 11733.5, 11763.5, 11794,
11824.5, 11855, 11885.5, 11916.5, 11947, 11977.5, 12008, 12038.5, 12069.5, 12099,
12128.5, 12159, 12189.5, 12220, 12250.5, 12281.5, 12312, 12342.5, 12373, 12403.5,
12434.5, 12464, 12493.5, 12524, 12554.5, 12585, 12615.5, 12646.5, 12677, 12707.5,
12738, 12768.5, 12799.5, 12829, 12858.5, 12889, 12919.5, 12950, 12980.5, 13011.5,
13042, 13072.5, 13103, 13133.5, 13164.5, 13194.5, 13224.5, 13255, 13285.5, 13316,
13346.5, 13377.5, 13408, 13438.5, 13469, 13499.5, 13530.5, 13560, 13589.5, 13620,
13650.5, 13681, 13711.5, 13742.5, 13773, 13803.5, 13834, 13864.5, 13895.5, 13925,
13954.5, 13985, 14015.5, 14046, 14076.5, 14107.5, 14138, 14168.5, 14199, 14229.5,
14260.5, 14290, 14319.5, 14350, 14380.5, 14411, 14441.5, 14472.5, 14503, 14533.5,
14564, 14594.5, 14625.5, 14655.5, 14685.5, 14716, 14746.5, 14777, 14807.5,
14838.5, 14869, 14899.5, 14930, 14960.5, 14991.5, 15021, 15050.5, 15081,
15111.5, 15142, 15172.5, 15203.5, 15234, 15264.5, 15295, 15325.5,
15356.5, 15386, 15415.5, 15446, 15476.5, 15507, 15537.5, 15568.5, 15599,
15629.5, 15660, 15690.5, 15721.5, 15751, 15780.5, 15811, 15841.5, 15872,
15902.5, 15933.5, 15964, 15994.5, 16025, 16055.5, 16086.5, 16116.5,
16146.5, 16177, 16207.5, 16238, 16268.5, 16299.5, 16330, 16360.5, 16391,
16421.5, 16452.5, 16482, 16511.5, 16542, 16572.5, 16603, 16633.5,
16664.5, 16695, 16725.5, 16756, 16786.5, 16817.5, 16847, 16876.5, 16907,
16937.5, 16968, 16998.5, 17029.5, 17060, 17090.5, 17121, 17151.5,
17182.5, 17212, 17241.5, 17272, 17302.5, 17333, 17363.5, 17394.5, 17425,
17455.5, 17486, 17516.5, 17547.5, 17577.5, 17607.5, 17638, 17668.5,
17699, 17729.5, 17760.5, 17791, 17821.5, 17852, 17882.5, 17913.5, 17943,
17972.5, 18003, 18033.5, 18064, 18094.5, 18125.5, 18156, 18186.5, 18217,
18247.5, 18278.5, 18308, 18337.5, 18368, 18398.5, 18429, 18459.5,
18490.5, 18521, 18551.5, 18582, 18612.5, 18643.5, 18673, 18702.5, 18733,
18763.5, 18794, 18824.5, 18855.5, 18886, 18916.5, 18947, 18977.5,
19008.5, 19038.5, 19068.5, 19099, 19129.5, 19160, 19190.5, 19221.5,
19252, 19282.5, 19313, 19343.5, 19374.5, 19404, 19433.5, 19464, 19494.5,
19525, 19555.5, 19586.5, 19617, 19647.5, 19678, 19708.5, 19739.5, 19769,
19798.5, 19829, 19859.5, 19890, 19920.5, 19951.5, 19982, 20012.5, 20043,
20073.5, 20104.5, 20134, 20163.5, 20194, 20224.5, 20255, 20285.5,
20316.5, 20347, 20377.5, 20408, 20438.5, 20469.5, 20499.5, 20529.5,
20560, 20590.5, 20621, 20651.5, 20682.5, 20713, 20743.5, 20774, 20804.5,
20835.5, 20865, 20894.5, 20925, 20955.5, 20986, 21016.5, 21047.5, 21078,
21108.5, 21139, 21169.5, 21200.5, 21230, 21259.5, 21290, 21320.5, 21351,
21381.5, 21412.5, 21443, 21473.5, 21504, 21534.5, 21565.5, 21595,
21624.5, 21655, 21685.5, 21716, 21746.5, 21777.5, 21808, 21838.5, 21869,
21899.5, 21930.5, 21960.5, 21990.5, 22021, 22051.5, 22082, 22112.5,
22143.5, 22174, 22204.5, 22235, 22265.5])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([15.5, 45.5, 75.5, 106, 136.5, 167, 197.5,
228.5, 259, 289.5, 320, 350.5, 381.5, 411, 440.5, 471, 501.5, 532, 562.5, 593.5,
624, 654.5, 685, 715.5, 746.5, 776, 805.5, 836, 866.5, 897, 927.5, 958.5, 989,
1019.5, 1050, 1080.5, 1111.5, 1141, 1170.5, 1201, 1231.5, 1262, 1292.5, 1323.5,
1354, 1384.5, 1415, 1445.5, 1476.5, 1506.5, 1536.5, 1567, 1597.5, 1628, 1658.5,
1689.5, 1720, 1750.5, 1781, 1811.5, 1842.5, 1872, 1901.5, 1932, 1962.5, 1993,
2023.5, 2054.5, 2085, 2115.5, 2146, 2176.5, 2207.5, 2237, 2266.5, 2297, 2327.5,
2358, 2388.5, 2419.5, 2450, 2480.5, 2511, 2541.5, 2572.5, 2602, 2631.5, 2662,
2692.5, 2723, 2753.5, 2784.5, 2815, 2845.5, 2876, 2906.5, 2937.5, 2967.5, 2997.5,
3028, 3058.5, 3089, 3119.5, 3150.5, 3181, 3211.5, 3242, 3272.5, 3303.5, 3333,
3362.5, 3393, 3423.5, 3454, 3484.5, 3515.5, 3546, 3576.5, 3607, 3637.5, 3668.5,
3698, 3727.5, 3758, 3788.5, 3819, 3849.5, 3880.5, 3911, 3941.5, 3972, 4002.5,
4033.5, 4063, 4092.5, 4123, 4153.5, 4184, 4214.5, 4245.5, 4276, 4306.5, 4337,
4367.5, 4398.5, 4428.5, 4458.5, 4489, 4519.5, 4550, 4580.5, 4611.5, 4642, 4672.5,
4703, 4733.5, 4764.5, 4794, 4823.5, 4854, 4884.5, 4915, 4945.5, 4976.5, 5007,
5037.5, 5068, 5098.5, 5129.5, 5159, 5188.5, 5219, 5249.5, 5280, 5310.5, 5341.5,
5372, 5402.5, 5433, 5463.5, 5494.5, 5524, 5553.5, 5584, 5614.5, 5645, 5675.5,
5706.5, 5737, 5767.5, 5798, 5828.5, 5859.5, 5889.5, 5919.5, 5950, 5980.5, 6011,
6041.5, 6072.5, 6103, 6133.5, 6164, 6194.5, 6225.5, 6255, 6284.5, 6315, 6345.5,
6376, 6406.5, 6437.5, 6468, 6498.5, 6529, 6559.5, 6590.5, 6620, 6649.5, 6680,
6710.5, 6741, 6771.5, 6802.5, 6833, 6863.5, 6894, 6924.5, 6955.5, 6985, 7014.5,
7045, 7075.5, 7106, 7136.5, 7167.5, 7198, 7228.5, 7259, 7289.5, 7320.5, 7350.5,
7380.5, 7411, 7441.5, 7472, 7502.5, 7533.5, 7564, 7594.5, 7625, 7655.5, 7686.5,
7716, 7745.5, 7776, 7806.5, 7837, 7867.5, 7898.5, 7929, 7959.5, 7990, 8020.5,
8051.5, 8081, 8110.5, 8141, 8171.5, 8202, 8232.5, 8263.5, 8294, 8324.5, 8355,
8385.5, 8416.5, 8446, 8475.5, 8506, 8536.5, 8567, 8597.5, 8628.5, 8659, 8689.5,
8720, 8750.5, 8781.5, 8811.5, 8841.5, 8872, 8902.5, 8933, 8963.5, 8994.5, 9025,
9055.5, 9086, 9116.5, 9147.5, 9177, 9206.5, 9237, 9267.5, 9298, 9328.5, 9359.5,
9390, 9420.5, 9451, 9481.5, 9512.5, 9542, 9571.5, 9602, 9632.5, 9663, 9693.5,
9724.5, 9755, 9785.5, 9816, 9846.5, 9877.5, 9907, 9936.5, 9967, 9997.5, 10028,
10058.5, 10089.5, 10120, 10150.5, 10181, 10211.5, 10242.5, 10272.5, 10302.5,
10333, 10363.5, 10394, 10424.5, 10455.5, 10486, 10516.5, 10547, 10577.5, 10608.5,
10638, 10667.5, 10698, 10728.5, 10759, 10789.5, 10820.5, 10851, 10881.5, 10912,
10942.5, 10973.5, 11003, 11032.5, 11063, 11093.5, 11124, 11154.5, 11185.5, 11216,
11246.5, 11277, 11307.5, 11338.5, 11368, 11397.5, 11428, 11458.5, 11489, 11519.5,
11550.5, 11581, 11611.5, 11642, 11672.5, 11703.5, 11733.5, 11763.5, 11794,
11824.5, 11855, 11885.5, 11916.5, 11947, 11977.5, 12008, 12038.5, 12069.5, 12099,
12128.5, 12159, 12189.5, 12220, 12250.5, 12281.5, 12312, 12342.5, 12373, 12403.5,
12434.5, 12464, 12493.5, 12524, 12554.5, 12585, 12615.5, 12646.5, 12677, 12707.5,
12738, 12768.5, 12799.5, 12829, 12858.5, 12889, 12919.5, 12950, 12980.5, 13011.5,
13042, 13072.5, 13103, 13133.5, 13164.5, 13194.5, 13224.5, 13255, 13285.5, 13316,
13346.5, 13377.5, 13408, 13438.5, 13469, 13499.5, 13530.5, 13560, 13589.5, 13620,
13650.5, 13681, 13711.5, 13742.5, 13773, 13803.5, 13834, 13864.5, 13895.5, 13925,
13954.5, 13985, 14015.5, 14046, 14076.5, 14107.5, 14138, 14168.5, 14199, 14229.5,
14260.5, 14290, 14319.5, 14350, 14380.5, 14411, 14441.5, 14472.5, 14503, 14533.5,
14564, 14594.5, 14625.5, 14655.5, 14685.5, 14716, 14746.5, 14777, 14807.5,
14838.5, 14869, 14899.5, 14930, 14960.5, 14991.5, 15021, 15050.5, 15081,
15111.5, 15142, 15172.5, 15203.5, 15234, 15264.5, 15295, 15325.5,
15356.5, 15386, 15415.5, 15446, 15476.5, 15507, 15537.5, 15568.5, 15599,
15629.5, 15660, 15690.5, 15721.5, 15751, 15780.5, 15811, 15841.5, 15872,
15902.5, 15933.5, 15964, 15994.5, 16025, 16055.5, 16086.5, 16116.5,
16146.5, 16177, 16207.5, 16238, 16268.5, 16299.5, 16330, 16360.5, 16391,
16421.5, 16452.5, 16482, 16511.5, 16542, 16572.5, 16603, 16633.5,
16664.5, 16695, 16725.5, 16756, 16786.5, 16817.5, 16847, 16876.5, 16907,
16937.5, 16968, 16998.5, 17029.5, 17060, 17090.5, 17121, 17151.5,
17182.5, 17212, 17241.5, 17272, 17302.5, 17333, 17363.5, 17394.5, 17425,
17455.5, 17486, 17516.5, 17547.5, 17577.5, 17607.5, 17638, 17668.5,
17699, 17729.5, 17760.5, 17791, 17821.5, 17852, 17882.5, 17913.5, 17943,
17972.5, 18003, 18033.5, 18064, 18094.5, 18125.5, 18156, 18186.5, 18217,
18247.5, 18278.5, 18308, 18337.5, 18368, 18398.5, 18429, 18459.5,
18490.5, 18521, 18551.5, 18582, 18612.5, 18643.5, 18673, 18702.5, 18733,
18763.5, 18794, 18824.5, 18855.5, 18886, 18916.5, 18947, 18977.5,
19008.5, 19038.5, 19068.5, 19099, 19129.5, 19160, 19190.5, 19221.5,
19252, 19282.5, 19313, 19343.5, 19374.5, 19404, 19433.5, 19464, 19494.5,
19525, 19555.5, 19586.5, 19617, 19647.5, 19678, 19708.5, 19739.5, 19769,
19798.5, 19829, 19859.5, 19890, 19920.5, 19951.5, 19982, 20012.5, 20043,
20073.5, 20104.5, 20134, 20163.5, 20194, 20224.5, 20255, 20285.5,
20316.5, 20347, 20377.5, 20408, 20438.5, 20469.5, 20499.5, 20529.5,
20560, 20590.5, 20621, 20651.5, 20682.5, 20713, 20743.5, 20774, 20804.5,
20835.5, 20865, 20894.5, 20925, 20955.5, 20986, 21016.5, 21047.5, 21078,
21108.5, 21139, 21169.5, 21200.5, 21230, 21259.5, 21290, 21320.5, 21351,
21381.5, 21412.5, 21443, 21473.5, 21504, 21534.5, 21565.5, 21595,
21624.5, 21655, 21685.5, 21716, 21746.5, 21777.5, 21808, 21838.5, 21869,
21899.5, 21930.5, 21960.5, 21990.5, 22021, 22051.5, 22082, 22112.5,
22143.5, 22174, 22204.5, 22235, 22265.5],dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='gregorian'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_'+species_name+'.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name','highest_level','lowest_level'])
# end of script
|
acsis-project/emissions
|
emissions/python/timeseries_1960-2020/regrid_OC_biomass_emissions_n96e_greg.py
|
Python
|
gpl-3.0
| 19,112
|
[
"NetCDF"
] |
5e81b803ef1547b304e0f928df0d0446ebc7ccf3489810e3b0152709cf3a413f
|
import os
import glob
from pypers.core.step import CmdLineStep
class VariantRecalibrator(CmdLineStep):
spec = {
"name": "VariantRecalibrator",
"version": "2.3-9",
"descr": [
"Runs gatk vcf VariantRecalibrator,",
"generating recal and tranches files to be used by ApplyRecalibration"
],
"args":
{
"inputs": [
{
"name" : "input_file",
"type" : "file",
"descr" : "input vcf files",
},
{
'name' : 'ref_path',
'type' : 'ref_genome',
'tool' : 'gatk',
'descr' : 'path to the directory containing the reference genome'
}
],
"outputs": [
{
"name" : "output_recal",
"type" : "file",
"value" : "{{input_file}}.recal.txt",
"descr" : "variant recal file",
},
{
"name" : "output_tranches",
"type" : "file",
"value" : "{{input_file}}.tranches.txt",
"descr" : "variant trache file",
}
],
"params" : [
{
"name" : "jvm_args",
"value" : "-Xmx{{jvm_memory}}g -Djava.io.tmpdir={{output_dir}}",
"descr" : "java virtual machine arguments",
"readonly" : True
},
{
'name' : 'gatk_jar',
'type' : 'file',
'value' : '/software/pypers/GATK/GenomeAnalysisTKLite-2.3-9-gdcdccbb/GenomeAnalysisTKLite.jar',
'descr' : 'gatk genome analyser jar file',
"readonly" : True
},
{
'name' : 'annotation_params',
'type' : 'str',
'value' : '-an QD -an HaplotypeScore -an MQRankSum -an ReadPosRankSum -an FS -an MQ',
'descr' : 'extra parameter for the gatk genome analysis',
"readonly" : True
},
{
'name' : 'max_gaussian',
'type' : 'int',
'value' : 4,
'descr' : 'the max gaussian param'
},
{
'name' : 'percent_bad',
'type' : 'int',
'value' : 0.05,
'descr' : 'the percentBad param'
},
{
'name' : 'mode',
'type' : 'str',
'value' : 'SNP',
'descr' : 'the mode param'
},
{
'name' : 'resources',
'type' : 'str',
'value' : "--resource:hapmap,known=false,training=true,truth=true,prior=15.0 \
/Public_data/EXTERNAL_DATA/1000G_GENOME_REFERENCES/v37/GATK_bundle_v1.5/1.5/b37/hapmap_3.3.b37.sites.vcf \
--resource:omni,known=false,training=true,truth=false,prior=12.0 \
/Public_data/EXTERNAL_DATA/1000G_GENOME_REFERENCES/v37/GATK_bundle_v1.5/1.5/b37/1000G_omni2.5.b37.sites.vcf \
--resource:dbsnp,known=true,training=false,truth=false,prior=6.0 \
/Public_data/EXTERNAL_DATA/1000G_GENOME_REFERENCES/v37/GATK_bundle_v1.5/1.5/b37/dbsnp_135.b37.vcf",
'descr' : 'extra parameter add to the gatk genome analysis command',
"readonly" : True
}
]
},
"cmd": [
"/usr/bin/java {{jvm_args}} -jar {{gatk_jar}} ",
"-T VariantRecalibrator -R {{ref_path}} ",
"-input {{input_file}} -recalFile {{output_recal}} -tranchesFile {{output_tranches}} ",
"{{annotation_params}} --maxGaussians {{max_gaussian}} -percentBad {{percent_bad}} ",
"-mode {{mode}} {{resources}}"
],
"requirements": {
"memory": "8"
}
}
def process(self):
self.submit_cmd(self.render())
if not os.path.exists(self.output_recal):
raise Exception("Output file not existing: %s" % self.output_recal)
elif not os.path.exists(self.output_tranches):
raise Exception("Output file not existing: %s" % self.output_tranches)
else:
self.log.info("Step successfully completed")
|
frankosan/pypers
|
pypers/steps/gatk/variantrecalibrator.py
|
Python
|
gpl-3.0
| 4,753
|
[
"Gaussian"
] |
907c4403d4eb368514bb6ad276604655c4f6f89fea491f778bb79dd8210ba56d
|
#!/usr/bin/env python
"""
Functions to generate ROMS netcdf files
Written by Brian Powell on 04/26/13
Copyright (c)2017 University of Hawaii under the MIT-License.
"""
import os
import re
import netCDF4
import numpy as np
from datetime import datetime
from seapy.lib import default_epoch
from seapy.cdl_parser import cdl_parser
from seapy.roms import lib
from warnings import warn
"""
Module variables
"""
_cdl_dir = os.path.dirname(lib.__file__)
_cdl_dir = "/".join((('.' if not _cdl_dir else _cdl_dir), "cdl/"))
_format = "NETCDF4_CLASSIC"
def ncgen(filename, dims=None, vars=None, attr=None, title=None,
clobber=False, format=_format):
"""
Create a new netcdf file with the given definitions. Need to define
the dimensions, the variables, and the attributes.
Parameters
----------
filename : string
name and path of file to create
dims : dict
dictionary of dimensions with dimension name as keys, and the value
as the length of the dimension. NOTE: 0 value means UNLIMITED.
vars: list of dictionaries
each variable to define is a dictionary that contains three keys:
name: string name of variable
type: string type (float, double, etc.)
dims: comma separated string of dimensions ("ocean_time, eta_rho")
attr: dictionary of variable attributes where the key is
the attribute name and the value is the attribute string
attr: dict, optional
optional dictionary of global attributes for the netcdf file:
key is the attribute name and the value is the attribute string
title: string, optional
netcdf attribute title
clobber: bool, optional
If True, destroy existing file
format: string, optional
NetCDF format to use. Default is NETCDF4_CLASSIC
Returns
-------
nc, netCDF4 object
Examples
--------
>>> dims = {"ocean_time":0, "eta_rho":120, "xi_rho":100}
>>> vars = [ {"name":"eta_slice", "type":"double",
"dims":"ocean_time, eta_rho",
"attr":{"units":"degrees Celcius"}},
{"name":"xi_slice", "type":"double",
"dims":"ocean_time, xi_rho",
"attr":{"units":"degrees Celcius"}} ]
>>> seapy.roms.ncgen("test.nc", dims=dims, vars=vars, title="Test")
"""
vars = np.atleast_1d(vars)
if dims is None:
dims = {}
if attr is None:
attr = {}
# Create the file
if not os.path.isfile(filename) or clobber:
_nc = netCDF4.Dataset(filename, "w", format=format)
# Loop over the dimensions and add them
for dim in dims:
_nc.createDimension(dim, dims[dim])
# Loop over the variables and add them
for var in vars:
if var["dims"][0]:
nvar = _nc.createVariable(var["name"], var["type"],
var["dims"])
else:
nvar = _nc.createVariable(var["name"], var["type"])
try:
for key in var["attr"]:
setattr(nvar, key, var["attr"][key])
except KeyError:
pass
# Add global attributes
for a in attr:
setattr(_nc, a, attr[a])
try:
_nc.author = os.getenv('USER') or \
os.getenv('LOGNAME') or \
os.getenv('USERNAME') or \
os.getlogin() or \
'nobody'
except (AttributeError, IOError, OSError, FileNotFoundError) as e:
_nc.author = 'nobody'
_nc.history = datetime.now().strftime(
"Created on %a, %B %d, %Y at %H:%M")
if title is not None:
_nc.title = title
_nc.close()
else:
warn(filename + " already exists. Using existing definition")
return netCDF4.Dataset(filename, "a")
pass
def _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho):
"""
internal method: Set grid dimensions
"""
if "xi_rho" in dims.keys():
dims["xi_rho"] = xi_rho
if "xi_u" in dims.keys():
dims["xi_u"] = xi_rho - 1
if "xi_v" in dims.keys():
dims["xi_v"] = xi_rho
if "xi_psi" in dims.keys():
dims["xi_psi"] = xi_rho - 1
if "eta_rho" in dims.keys():
dims["eta_rho"] = eta_rho
if "eta_u" in dims.keys():
dims["eta_u"] = eta_rho
if "eta_v" in dims.keys():
dims["eta_v"] = eta_rho - 1
if "eta_psi" in dims.keys():
dims["eta_psi"] = eta_rho - 1
if "s_rho" in dims.keys():
dims["s_rho"] = s_rho
if "s_w" in dims.keys():
dims["s_w"] = s_rho + 1
return dims
def _set_time_ref(vars, timevar, reftime, cycle=None):
"""
internal method: Set time reference
"""
if isinstance(timevar, str):
timevar = [timevar]
for tvar in timevar:
for nvar in vars:
if nvar["name"] == tvar:
if "units" in nvar["attr"]:
t = re.findall('(\w+) since .*', nvar["attr"]["units"])
nvar["attr"]["units"] = \
"{:s} since {:s}".format(t[0], str(reftime))
else:
nvar["attr"]["units"] = \
"days since {:s}".format(str(reftime))
if cycle is not None:
nvar["attr"]["cycle_length"] = cycle
return vars
def _create_generic_file(filename, cdl, eta_rho, xi_rho, s_rho,
reftime=None, clobber=False, title="ROMS"):
"""
internal method: Generic file creator that uses ocean_time
"""
# Generate the Structure
dims, vars, attr = cdl_parser(cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
if reftime is not None:
vars = _set_time_ref(vars, "ocean_time", reftime)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_psource(filename, nriver=1, s_rho=5,
reftime=default_epoch, clobber=False, cdl=None, title="My River"):
"""
Create a new, blank point source file
Parameters
----------
filename : string
name and path of file to create
nriver : int, optional
number of rivers to put in file
s_rho: int, optional
number of s-levels
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "frc_rivers.cdl" if cdl is None else cdl)
# Fill in the appropriate river values
dims["river"] = nriver
dims["s_rho"] = s_rho
vars = _set_time_ref(vars, "river_time", reftime)
# Create the river file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_grid(filename, eta_rho=10, xi_rho=10, s_rho=1, clobber=False,
cdl=None, title="My Grid"):
"""
Create a new, blank grid file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "roms_grid.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
# Create the grid file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_adsen(filename, eta_rho=10, xi_rho=10, s_rho=1,
reftime=default_epoch, clobber=False, cdl=None, title="My Adsen"):
"""
Create a new adjoint sensitivity file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Create the general file
return _create_generic_file(filename, _cdl_dir + "adsen.cdl" if cdl is None else cdl,
eta_rho, xi_rho, s_rho, reftime, clobber, title)
def create_bry(filename, eta_rho=10, xi_rho=10, s_rho=1,
reftime=default_epoch, clobber=False, cdl=None, title="My BRY"):
"""
Create a bry forcing file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "bry_unlimit.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
vars = _set_time_ref(vars, "bry_time", reftime)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_clim(filename, eta_rho=10, xi_rho=10, s_rho=1,
reftime=default_epoch, clobber=False, cdl=None, title="My CLIM"):
"""
Create a climatology forcing file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "clm_ts.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
vars = _set_time_ref(vars, "clim_time", reftime)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_frc_bulk(filename, lat=10, lon=10,
reftime=default_epoch, clobber=False, cdl=None,
title="My Forcing"):
"""
Create a bulk flux forcing file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "frc_bulk.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims["lat"] = lat
dims["lon"] = lon
vars = _set_time_ref(vars, "frc_time", reftime)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_frc_direct(filename, eta_rho=10, xi_rho=10,
reftime=default_epoch, clobber=False, cdl=None,
title="My Forcing"):
"""
Create a direct surface forcing file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "frc_direct.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = {'y_rho': eta_rho,
'y_u': eta_rho,
'y_v': eta_rho - 1,
'x_rho': xi_rho,
'x_u': xi_rho - 1,
'x_v': xi_rho,
'frc_time': 0}
vars = _set_time_ref(vars, 'frc_time', reftime)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_frc_flux(filename, eta_rho=10, xi_rho=10, ntimes=1,
cycle=None, reftime=default_epoch, clobber=False,
cdl=None, title="My Flux"):
"""
Create a surface flux forcing file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
ntimes: int, optional
number of time records (climatology files do not have unlimited
dimension)
cycle: int or None, optional
The number of days before cycling the forcing records
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "frc_fluxclm.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, 1)
times = ("srf_time", "shf_time", "swf_time", "sss_time")
for n in times:
dims[n] = ntimes
vars = _set_time_ref(vars, times, reftime)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_frc_srelax(filename, eta_rho=10, xi_rho=10, s_rho=1, cycle=None,
reftime=default_epoch, clobber=False, cdl=None,
title="My Srelaxation"):
"""
Create a Salt Relaxation forcing file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
cycle: int or None, optional
The number of days before cycling the forcing records
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "frc_srelax.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
vars = _set_time_ref(vars, "sss_time", reftime, cycle)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_frc_qcorr(filename, eta_rho=10, xi_rho=10, s_rho=1, cycle=None,
reftime=default_epoch, clobber=False, cdl=None,
title="My Qcorrection"):
"""
Create a Q Correction forcing file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
cycle: int or None, optional
The number of days before cycling the forcing records
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "frc_qcorr.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
vars = _set_time_ref(vars, "sst_time", reftime, cycle)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_frc_wind(filename, eta_rho=10, xi_rho=10, s_rho=1, cycle=None,
reftime=default_epoch, clobber=False, cdl=None,
title="My Winds"):
"""
Create a surface wind stress forcing file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
cycle: int or None, optional
The number of days before cycling the forcing records
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "frc_windstress.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
vars = _set_time_ref(vars, "sms_time", reftime, cycle)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_frc_wave(filename, eta_rho=10, xi_rho=10, reftime=default_epoch,
clobber=False, cdl=None, title="My Waves"):
"""
Create a surface wave forcing file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "frc_wave.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho=1)
vars = _set_time_ref(vars, "wave_time", reftime)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_tide(filename, eta_rho=10, xi_rho=10, s_rho=1, ntides=1,
reftime=default_epoch, clobber=False,
title="My Tides"):
"""
Create a barotropic tide forcing file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
ntides: int, optional
number of tidal frequencies to force with
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(_cdl_dir + "frc_tides.cdl")
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
dims["tide_period"] = ntides
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_ini(filename, eta_rho=10, xi_rho=10, s_rho=1,
reftime=default_epoch, clobber=False, cdl=None, title="My Ini"):
"""
Create an initial condition file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "ini_hydro.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
vars = _set_time_ref(vars, "ocean_time", reftime)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_nudge_coef(filename, eta_rho=10, xi_rho=10, s_rho=1, clobber=False,
cdl=None, title="My Nudging"):
"""
Create a nudging coefficients file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "nudge_coef.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_da_obs(filename, state_variable=20, survey=1, provenance=None,
clobber=False, cdl=None, title="My Observations"):
"""
Create an assimilation observations file
Parameters
----------
filename : string
name and path of file to create
survey: int, optional
number of surveys in the file
state_variable: int, optional
number of state variables in the observations
provenance: string, optional
Description of the provenance values
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "s4dvar_obs.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims["survey"] = survey
dims["state_variable"] = state_variable
# Set the provenance values in the global attributes
if provenance is not None:
attr["obs_provenance"] = str(provenance)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title, format="NETCDF3_64BIT")
# Return the new file
return _nc
def create_da_ray_obs(filename, ray_datum=1, provenance="None",
reftime=default_epoch, clobber=False,
cdl=None, title="My Observations"):
"""
Create an acoustic ray assimilation observations file
Parameters
----------
filename : string
name and path of file to create
ray_datum: int, optional
Number of rays to assimilate
provenance: string, optional
Description of the provenance values
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "s4dvar_obs_ray.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims["ray_datum"] = ray_datum
vars = _set_time_ref(vars, "obs_time", reftime)
# Set the provenance values in the global attributes
attr["obs_provenance"] = provenance
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_da_bry_std(filename, eta_rho=10, xi_rho=10, s_rho=1, bry=4,
reftime=default_epoch, clobber=False, cdl=None,
title="My BRY STD"):
"""
Create a boundaries standard deviation file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
bry: int, optional
number of open boundaries to specify
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "s4dvar_std_b.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
dims["IorJ"] = max(eta_rho, xi_rho)
dims["boundary"] = bry
vars = _set_time_ref(vars, "ocean_time", reftime)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_da_frc_std(filename, eta_rho=10, xi_rho=10, s_rho=1,
reftime=default_epoch, clobber=False,
cdl=None, title="My FRC STD"):
"""
Create a forcing standard deviation file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "s4dvar_std_f.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
vars = _set_time_ref(vars, "ocean_time", reftime)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_da_ini_std(filename, eta_rho=10, xi_rho=10, s_rho=1,
reftime=default_epoch, clobber=False,
cdl=None, title="My INI STD"):
"""
Create an initialization standard deviation file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "s4dvar_std_i.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
vars = _set_time_ref(vars, "ocean_time", reftime)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_da_model_std(filename, eta_rho=10, xi_rho=10, s_rho=1,
reftime=default_epoch, clobber=False,
cdl=None, title="My Model STD"):
"""
Create an time varying model standard deviation file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "s4dvar_std_m.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
vars = _set_time_ref(vars, "ocean_time", reftime)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_zlevel_grid(filename, lat=10, lon=10, depth=1,
clobber=False, cdl=None,
title="Zlevel Grid", dims=2):
"""
Create z-level grid file
Parameters
----------
filename : string
name and path of file to create
lat: int, optional
number of latitudinal rows
lon: int, optional
number of longitudinal columns
depth: int, optional
number of z-levels
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
dims: int, optional
number of dimensions to use for lat/lon
Returns
-------
nc, netCDF4 object
"""
if cdl == None:
if dims == 1:
cdlfile = _cdl_dir + "zlevel_1d_grid.cdl"
else:
cdlfile = _cdl_dir + "zlevel_2d_grid.cdl"
else:
cdlfile = cdl
# Generate the Structure
dims, vars, attr = cdl_parser(cdlfile)
# Fill in the appropriate dimension values
dims["lat"] = lat
dims["lon"] = lon
dims["depth"] = depth
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
def create_zlevel(filename, lat=10, lon=10, depth=1,
reftime=default_epoch,
clobber=False, cdl=None,
title="Zlevel Model Data", dims=2):
"""
Create an time varying model standard deviation file
Parameters
----------
filename : string
name and path of file to create
lat: int, optional
number of latitudinal rows
lon: int, optional
number of longitudinal columns
depth: int, optional
number of z-levels
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
dims: int, optional
number of dimensions to use for lat/lon
Returns
-------
nc, netCDF4 object
"""
if cdl == None:
if dims == 1:
cdlfile = _cdl_dir + "zlevel_1d.cdl"
else:
cdlfile = _cdl_dir + "zlevel_2d.cdl"
else:
cdlfile = cdl
# Generate the Structure
dims, vars, attr = cdl_parser(cdlfile)
# Fill in the appropriate dimension values
dims["lat"] = lat
dims["lon"] = lon
dims["depth"] = depth
vars = _set_time_ref(vars, "time", reftime)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc
if __name__ == "__main__":
grid = create_zlevel("test.nc")
|
ocefpaf/seapy
|
seapy/roms/ncgen.py
|
Python
|
mit
| 38,868
|
[
"Brian",
"NetCDF"
] |
0fd10d653710c4f40bce781f28c8daa38e7b450d28dfae67d94a8ea5e4ee3307
|
#!/usr/bin/env python
import argparse
import pysam
import numpy
import timeit
import re
import networkx as nx
start = timeit.default_timer()
## EXAMPLE COMMAND LINE
# python correct_bam_barcodes_21.11.2016.py --infile PATH/TO/test.bam --outfile PATH/TO/corrected.test.bam --barcodes BOTH
# Function to get the amount of differences
def similarity(a, b):
return sum(x != y for x, y in zip(a, b))
def keywithmaxval(d):
v = list(d.values())
k = list(d.keys())
return k[v.index(max(v))]
def update_tag(TAG, VALUE):
return [VALUE if x[0] == VALUE[0] else x for x in TAG]
def get_edge(CODE, LIST, Errors):
EDGE = [[CODE, LIST[x]] for x in range(0, len(LIST)) if (similarity(LIST[x], CODE) <= Errors)]
return EDGE
def extract_barcode(READ, BC_TYPE): # Individual read. It returns the barcode of the read
NAME = str(READ.qname)
BC = NAME.split(':')[-1]
BC1 = BC.split(',')[0]
BC2 = BC.split(',')[1]
# Grouping by barcodes
if (BC_TYPE == "BEGINNING"):
bc = BC1
elif (BC_TYPE == "END"):
bc = BC2
elif (BC_TYPE == "BOTH"):
bc = BC1 + BC2
##print bc
return (bc)
def extract_bc_groups(BC, BC_NETWORK): # Input, list of reads that start are end are equal
BARCODE_DICT = {}
sortedKeyList = sorted(BC.keys(), key=lambda s: len(BC.get(s)), reverse=True)
while len(BC) > 0:
# The reads are stored in dict BC. Their keys are the Barcodes.
# We get the most frequent KEY (Barcode)
MAX = sortedKeyList[0]
# We create a new key of the most common Barcode and we add later all the similar reads.
BARCODE_DICT[
MAX] = list() # Creating key in the hash based on our barcode where reads of this group will be saved
SIM = list(BC_NETWORK.adj[MAX])
for i in SIM:
BARCODE_DICT[MAX].extend(BC[i]) # Grouping reads based on similarity of the barcodes
del BC[i] # Removing barcodes already considered
BC_NETWORK.remove_node(i)
sortedKeyList.remove(i)
return BARCODE_DICT # Dictionary with Barcode as a key and reads as values
def ReverseComplement(seq):
seq_dict = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'N': 'N'}
return "".join([seq_dict[base] for base in reversed(seq)])
# Reduce mapping quality to < 20
def reduce_mapq(read):
if read.mapq >= 20:
read.mapq = 19
NEW_READ = read
else:
NEW_READ = read
return (NEW_READ)
def ascii2dec(ASCII):
qual = [ord(i) - 33 for i in ASCII]
return qual
def most_common_base(LIST, QUALITIES, minBQ):
QUALITIES_NATIVE = QUALITIES
HQ = [x for x in range(0, len(QUALITIES_NATIVE)) if QUALITIES_NATIVE[x] >= minBQ]
result_list = [LIST[i] for i in HQ]
result_qual = [QUALITIES[i] for i in HQ]
BASE = max(sorted(set(result_list)), key=result_list.count)
NUM_DIFF_BASES = len(set(result_list))
DIFF_COUNT = len([x for x in range(0, len(result_list)) if result_list[x] != BASE])
BASE_COUNT = len([x for x in range(0, len(result_list)) if result_list[x] == BASE])
return BASE, NUM_DIFF_BASES, BASE_COUNT, DIFF_COUNT
def most_common_base_low_qual(LIST):
BASE = max(sorted(set(LIST)), key=LIST.count)
NUM_DIFF_BASES = len(set(LIST))
return BASE, NUM_DIFF_BASES
def recalculate_NM(READ):
# # There is a problem when the cigar length does not fit to the MD tag
#
# print READ.reference_name, READ.pos
# print READ.cigarstring, get_md_reference_length(read.get_tag("MD"))
#
# refSeq = READ.get_reference_sequence()
# readSeq = READ.query_alignment_sequence
#
#
# if ('I' not in READ.cigarstring and 'I' not in READ.cigarstring and len(refSeq) == len(readSeq)):
# New_NM = sum(x.upper()!=y.upper() for x,y in zip(refSeq,readSeq))
# else:
# New_NM = READ.opt("NM")
New_NM = READ.opt("NM")
return New_NM
def get_qualities(BASE, BASES, QUALITIES):
QUAL = [(ord(QUALITIES[x]) - 33) for x in range(0, len(BASES)) if BASES[x] == BASE]
# LIST = [BASES[x] for x in range(0,len(BASES)) if BASES[x] == BASE]
return QUAL
# Change all base qualities of the read to 0 ("!")
def error_read_qual(read):
READ = read
qualities = READ.qual
LEN = len(qualities)
QUAL = ''.join(["!" for i in xrange(LEN)])
READ.qual = QUAL
return (READ)
# Change base errors to Ns
def error_read_seq(read):
READ = read
seq = READ.seq
LEN = len(seq)
SEQ = ''.join(["N" for i in xrange(LEN)])
READ.seq = SEQ
return (READ)
# Check if there is at least one high quality base
def low_quality_base_check(QUALITIES, minBQ):
return (max(QUALITIES) >= minBQ)
# Get consensus read qualities
def consensus_quality(QUALITIES, minBQ, ERRORS, STEP):
QUALITIES_NATIVE = QUALITIES
# How many bases with high good quality
COPIES = len([x for x in range(0, len(QUALITIES_NATIVE)) if QUALITIES_NATIVE[x] >= minBQ])
MAX_QUAL = max(QUALITIES_NATIVE)
if (MAX_QUAL < minBQ):
NEW_QUAL = MAX_QUAL
else:
# We label errors with base quality 0
# For consensus bases, we take the max as the new quality base
if (STEP == 1 or STEP == 2):
if ((ERRORS >= 1 and float(ERRORS) / (COPIES + ERRORS) > 0.25) or ERRORS >= 3):
NEW_QUAL = 0
else:
# We take the max base quality as the concensus one
MAX1 = max(QUALITIES_NATIVE)
MAX = max(30, MAX1)
NEW_QUAL = MAX
return (NEW_QUAL)
# Function to correct reads grouped by barcodes
def GET_FINAL_READ(reads, minBQ, STEP, SET_N):
CONSENSUS_SEQ = list()
CONSENSUS_QUAL = list()
# Getting the amount
# print(len(reads))
# Objects to save info for lexicoorder of the reads
LIST_READNAMES = list()
DICT_READS = {}
READNAME = ''
FLAG = ''
DICT_FLAG = {}
DP1_TAG = list()
if (len(reads) <= 1):
for i in reads:
# Encoding quality
CONSENSUS_READ = i
# We add info about the amount of duplicates per family group
count = len(reads)
color = ['230,242,255', '179,215,255', '128,187,255', '77,160,255', '26,133,255']
current_color = '0,0,0'
if (count > 5):
current_color = color[4]
else:
current_color = color[count - 1]
# adding barcodes to tag in bam file
if (STEP == 1):
CONSENSUS_READ.tags += [('DP', count)]
CONSENSUS_READ.tags += [('YC', current_color)]
elif (STEP == 2):
CONSENSUS_READ.tags += [('DF', count)]
# Info about barcode groups
LOG_INFO = (CONSENSUS_READ.qname, str(CONSENSUS_READ.pos), str(len(reads)))
LOG_INFO = "\t".join(LOG_INFO) + "\n"
else:
REF_LENGTH = [];
#workaround for python 3 support
for k in reads:
if k.reference_length is None:
tmp_reference_length = 0;
else:
tmp_reference_length = k.reference_length
if k.cigarstring is None:
tmp_cigar = ''
else:
tmp_cigar = k.cigarstring;
REF_LENGTH.append( (tmp_reference_length, k.rlen, tmp_cigar) )
MAX_INFO = max(sorted(set(REF_LENGTH), reverse=True), key=REF_LENGTH.count)
# Reference length
MAX_REF_LENGTH = MAX_INFO[0]
# Read length
MAX_READ_LENGTH = MAX_INFO[1]
# Max cigarstring
MAX_CIGAR_LENGTH = MAX_INFO[2]
SEQ = {}
QUAL = {}
MAPQ = list()
LQ_read = 0
# Variable count for compare indels between duplicates
# The first read is taken as reference
# if (reads[0].cigarstring == None):
# Ind_count = 0
# else:
# Ind_count = reads[0].cigarstring.count("I") + reads[0].cigarstring.count("D")
for i in reads:
# In case that the amount of indels differ between duplicates, we take the the first read in the lexico order and we label all them base qualities to 0
if ((MAX_REF_LENGTH != i.reference_length) or (MAX_READ_LENGTH != i.rlen) or (MAX_CIGAR_LENGTH != i.cigarstring)): # and (i.cigarstring == None or "I" in i.cigarstring or "D" in i.cigarstring)):
# if ((MAX_REF_LENGTH != i.reference_length or MAX_READ_LENGTH != i.rlen) and (i.cigarstring == None or "I" in i.cigarstring or "D" in i.cigarstring)):
# print 'BULSHIT READ'
# print MAX_REF_LENGTH, len(i.get_reference_sequence()), i.rlen
try:
LAST_READ
except NameError:
LAST_READ = i
READNAME = i.qname
FLAG = i.flag
DICT_READS[READNAME] = i
DICT_FLAG[READNAME] = FLAG
LIST_READNAMES.append(READNAME)
LQ_read = LQ_read + 1
continue
# else:
# print 'PASS READ'
# print MAX_REF_LENGTH, len(i.get_reference_sequence()), i.rlen
# Saving the amount of duplicates from first round of correction
LAST_READ = i
if (STEP == 2):
DP1_TAG.append(i.opt("DP"))
# Adding reads to a hash to later, sort them in lexicographical order
READNAME = i.qname
FLAG = i.flag
DICT_READS[READNAME] = i
DICT_FLAG[READNAME] = FLAG
LIST_READNAMES.append(READNAME)
LEN = i.rlen
##print LEN
seq = i.seq
qual = i.qual
MAPQ.append(i.mapq)
SOFT = i.pos - i.qstart
for b in range(0, LEN):
BASE = seq[b]
BASE_QUAL = qual[b]
B = SOFT + b
if B in SEQ:
SEQ[B].append(BASE)
QUAL[B].append(BASE_QUAL)
else:
SEQ[B] = list()
QUAL[B] = list()
SEQ[B].append(BASE)
QUAL[B].append(BASE_QUAL)
for position in sorted(SEQ):
CONSENSUS_BASE = ""
NUM_DIFF_BASES = ""
CONSENSUS_BASE_COUNT = ""
DIFF_COUNT = ""
Q = ascii2dec(QUAL[position])
if (low_quality_base_check(Q, minBQ)):
BASE = most_common_base(SEQ[position], Q, minBQ)
CONSENSUS_BASE = BASE[0]
NUM_DIFF_BASES = BASE[1]
CONSENSUS_BASE_COUNT = BASE[2]
DIFF_COUNT = BASE[3]
QUALITIES = get_qualities(CONSENSUS_BASE, SEQ[position], QUAL[position])
if (NUM_DIFF_BASES < 3 and CONSENSUS_BASE_COUNT > DIFF_COUNT):
CONSENSUS_QUALITY_num = consensus_quality(QUALITIES, minBQ, DIFF_COUNT, STEP)
if (SET_N and CONSENSUS_QUALITY_num == 0):
CONSENSUS_QUALITY_num = QUALITIES[0]
CONSENSUS_BASE = "N"
CONSENSUS_QUALITY_ascii = chr(CONSENSUS_QUALITY_num + 33)
CONSENSUS_QUAL.append(CONSENSUS_QUALITY_ascii)
CONSENSUS_SEQ.append(CONSENSUS_BASE)
elif (NUM_DIFF_BASES >= 3 or CONSENSUS_BASE_COUNT <= DIFF_COUNT):
CONSENSUS_QUALITY_num = 0
if (SET_N):
CONSENSUS_QUALITY_num = QUALITIES[0]
CONSENSUS_BASE = "N"
CONSENSUS_QUALITY_ascii = chr(CONSENSUS_QUALITY_num + 33)
CONSENSUS_QUAL.append(CONSENSUS_QUALITY_ascii)
CONSENSUS_SEQ.append(CONSENSUS_BASE)
else:
print("Error")
# print SEQ[position], BASE
else:
CONSENSUS_BASE = most_common_base_low_qual(SEQ[position])[0]
CONSENSUS_QUALITY_num = 0
if (SET_N):
QUALITIES = get_qualities(CONSENSUS_BASE, SEQ[position], QUAL[position])
CONSENSUS_QUALITY_num = QUALITIES[0]
CONSENSUS_BASE = "N"
CONSENSUS_SEQ.append(CONSENSUS_BASE)
CONSENSUS_QUALITY_ascii = chr(CONSENSUS_QUALITY_num + 33)
CONSENSUS_QUAL.append(CONSENSUS_QUALITY_ascii)
# We take the info from the last read in the group
SORTED_READNAMES = sorted(LIST_READNAMES)
##print LIST_READNAMES
READ_COUNT = 0
# We take as template the last HQ read, but we change the read name and the flag
CONSENSUS_READ = LAST_READ
CONSENSUS_READ.qname = SORTED_READNAMES[0]
# Mapping quality == mean of reads' mapq
if len(MAPQ) > 0:
CONSENSUS_READ.mapq = int(round(float(sum(MAPQ)) / len(MAPQ)))
else:
CONSENSUS_READ.mapq = 0
CONSENSUS_READ.flag = DICT_FLAG[SORTED_READNAMES[0]]
# Consensus seq per position
CONSENSUS_SEQ = ''.join(CONSENSUS_SEQ)
CONSENSUS_READ.seq = CONSENSUS_SEQ
# Base qualities are calcultated as the mean of the base qualities of each read.
# In case there were more than one SNP in the position, its base quality was 0.
CONSENSUS_QUAL = ''.join(CONSENSUS_QUAL)
CONSENSUS_READ.qual = CONSENSUS_QUAL
# We add info about the amount of duplicates per family group
count = len(reads) - LQ_read
color = ['230,242,255', '179,215,255', '128,187,255', '77,160,255', '26,133,255']
current_color = '0,0,0'
if (count > 5):
current_color = color[4]
else:
current_color = color[count - 1]
# Different correction type
if (STEP == 1):
# New_NM = recalculate_NM(CONSENSUS_READ)
# Add DP tag
CONSENSUS_READ.tags += [('DP', count)]
# Add color
CONSENSUS_READ.tags += [('YC', current_color)]
## Update NM tag
# CONSENSUS_READ.tags = update_tag(CONSENSUS_READ.tags,('NM', New_NM))
elif (STEP == 2):
DP1_value = int(round(numpy.median(DP1_TAG)))
# New_NM = recalculate_NM(CONSENSUS_READ)
# Update DP tag
CONSENSUS_READ.tags = update_tag(CONSENSUS_READ.tags, ('DP', DP1_value))
# Add DF tag
CONSENSUS_READ.tags += [('DF', count)]
# Update Color tag
CONSENSUS_READ.tags = update_tag(CONSENSUS_READ.tags, ('YC', current_color))
## Update NM tag
# CONSENSUS_READ.tags = update_tag(CONSENSUS_READ.tags,('NM', New_NM))
# Info about barcode groups
LOG_INFO = (CONSENSUS_READ.qname, str(CONSENSUS_READ.pos), str(len(reads)))
LOG_INFO = "\t".join(LOG_INFO) + "\n"
return (CONSENSUS_READ, LOG_INFO)
### Read parameters
parser = argparse.ArgumentParser(description='Correcting bamfiles using barcodes info')
parser.add_argument('--infile', required=True, dest='infile', help='Input BAM file.')
parser.add_argument('--outfile', required=True, dest='outfile', help='Output BAM file.')
parser.add_argument('--barcodes', required=False, dest='barcodes', type=str, choices=['BEGINNING', 'END', 'BOTH'],
default='BOTH',
help='Barcodes to use. BEGGINING = Barcode 1; END = Barcode 2; BOTH = Barcode 1 and 2. Default = BOTH')
parser.add_argument('--minBQ', required=False, dest='minBQ', type=int, default=10,
help='Minimum base quality to be considered. Default = 30')
parser.add_argument('--BCerror', required=False, dest='BCerror', type=int, default=0,
help='Maximum number of sequencing errors allowed in barcode sequence. Default = 0')
parser.add_argument('--step', required=False, dest='step', type=int, default=1, choices=[1, 2],
help='Protocol step. 1: Unique barcode correction; 2: Family correction. Default = 1')
parser.add_argument('--n', required=False, dest='n', action='store_true',
help='Use Ns instead of reducing base quality.')
args = ''
try:
args = parser.parse_args()
except IOError as io:
print(io)
sys.exit('Error reading parameters.')
### Input BAM
try:
samfile = pysam.Samfile(args.infile, "rb")
except:
exit("Cannot open input file.")
### Output BAM
try:
outfile = pysam.Samfile(args.outfile, mode="wb", template=samfile)
except:
exit("Cannot open output file.")
logfile2 = args.outfile + ".log"
LOGFILE2 = open(logfile2, 'w')
minBQ = args.minBQ
Errors = (args.BCerror)
STEP = args.step
SET_N = args.n
pos = 0
end = 0
BARCODE = ""
POSITIONS_DICT = {}
ENDS = []
UNIQUE_BARCODES = {}
EDGE = ''
for read in samfile.fetch():
if not read.is_secondary:
length = str(read.rlen)
ref_end = str(read.aend)
ref_start = str(read.pos)
# Both are required. Start of next read, and tlen shows the sign of othe read (- or +), which helps to separate pair reads when they map to the same coordinates
ref_length = str(read.next_reference_start) + ',' + str(read.tlen)
# Getting the barcodes
NAME = str(read.qname)
bc = extract_barcode(read, args.barcodes) # Extract the barcode
# CODE = bc + ";" + ref_length
CODE = bc
if (ref_start == pos):
# To store the CODEs for each ref_length
if ref_length in POSITIONS_DICT:
if CODE in POSITIONS_DICT[ref_length]:
POSITIONS_DICT[ref_length][CODE].append(read)
else:
POSITIONS_DICT[ref_length][CODE] = list()
POSITIONS_DICT[ref_length][CODE].append(read)
else:
POSITIONS_DICT[ref_length] = {}
POSITIONS_DICT[ref_length][CODE] = list()
POSITIONS_DICT[ref_length][CODE].append(read)
# Allowing errors
if (Errors > 0):
if ref_length in UNIQUE_BARCODES:
if CODE in list(UNIQUE_BARCODES[ref_length].nodes()):
UNIQUE_BARCODES[ref_length].add_node(CODE)
else:
UNIQUE_BARCODES[ref_length].add_node(CODE)
EDGE = get_edge(CODE, list(UNIQUE_BARCODES[ref_length].nodes()), Errors)
UNIQUE_BARCODES[ref_length].add_edges_from(EDGE)
else:
UNIQUE_BARCODES[ref_length] = nx.Graph()
UNIQUE_BARCODES[ref_length].add_node(CODE)
EDGE = get_edge(CODE, list(UNIQUE_BARCODES[ref_length].nodes()), Errors)
UNIQUE_BARCODES[ref_length].add_edges_from(EDGE)
else:
if (len(POSITIONS_DICT) > 0 and Errors > 0):
for pos2 in POSITIONS_DICT:
# When we allow errors in the Barcodes, we re-group them by similarity (Errors specified in parameter)
DICTIONARY = extract_bc_groups(POSITIONS_DICT[pos2], UNIQUE_BARCODES[pos2])
for barcode in DICTIONARY:
# Printing consensus reads to a new bam file
NEW_READ, LOG2 = GET_FINAL_READ(list(DICTIONARY[barcode]), minBQ, STEP, SET_N)
LOGFILE2.write(LOG2)
outfile.write(NEW_READ)
POSITIONS_DICT = {}
UNIQUE_BARCODES = {}
EDGE = ''
pos = ref_start
end = ref_length
if ref_length in POSITIONS_DICT:
if CODE in POSITIONS_DICT[ref_length]:
POSITIONS_DICT[ref_length][CODE].append(read)
else:
POSITIONS_DICT[ref_length][CODE] = list()
POSITIONS_DICT[ref_length][CODE].append(read)
else:
POSITIONS_DICT[ref_length] = {}
POSITIONS_DICT[ref_length][CODE] = list()
POSITIONS_DICT[ref_length][CODE].append(read)
# Allowing errors
if (Errors > 0):
if ref_length in UNIQUE_BARCODES:
if CODE in list(UNIQUE_BARCODES[ref_length].nodes()):
UNIQUE_BARCODES[ref_length].add_node(CODE)
else:
UNIQUE_BARCODES[ref_length].add_node(CODE)
EDGE = get_edge(CODE, list(UNIQUE_BARCODES[ref_length].nodes()), Errors)
UNIQUE_BARCODES[ref_length].add_edges_from(EDGE)
else:
UNIQUE_BARCODES[ref_length] = nx.Graph()
UNIQUE_BARCODES[ref_length].add_node(CODE)
EDGE = get_edge(CODE, list(UNIQUE_BARCODES[ref_length].nodes()), Errors)
UNIQUE_BARCODES[ref_length].add_edges_from(EDGE)
elif (len(POSITIONS_DICT) > 0 and Errors == 0):
DICTIONARY = POSITIONS_DICT
for pos2 in DICTIONARY:
# printing consensus reads to a new bam file
for barcode in DICTIONARY[pos2]:
NEW_READ, LOG2 = GET_FINAL_READ(list(DICTIONARY[pos2][barcode]), minBQ, STEP, SET_N)
# NEW_READ = DICTIONARY[barcode][0]
##print STEP, NEW_READ.qual
LOGFILE2.write(LOG2)
outfile.write(NEW_READ)
POSITIONS_DICT = {}
pos = ref_start
end = ref_length
if ref_length in POSITIONS_DICT:
if CODE in POSITIONS_DICT[ref_length]:
POSITIONS_DICT[ref_length][CODE].append(read)
else:
POSITIONS_DICT[ref_length][CODE] = list()
POSITIONS_DICT[ref_length][CODE].append(read)
else:
POSITIONS_DICT[ref_length] = {}
POSITIONS_DICT[ref_length][CODE] = list()
POSITIONS_DICT[ref_length][CODE].append(read)
else:
POSITIONS_DICT = {}
UNIQUE_BARCODES = {}
EDGE = ''
pos = ref_start
end = ref_end
if ref_length in POSITIONS_DICT:
if CODE in POSITIONS_DICT[ref_length]:
POSITIONS_DICT[ref_length][CODE].append(read)
else:
POSITIONS_DICT[ref_length][CODE] = list()
POSITIONS_DICT[ref_length][CODE].append(read)
else:
POSITIONS_DICT[ref_length] = {}
POSITIONS_DICT[ref_length][CODE] = list()
POSITIONS_DICT[ref_length][CODE].append(read)
# Allowing errors
if (Errors > 0):
if ref_length in UNIQUE_BARCODES:
if CODE in list(UNIQUE_BARCODES[ref_length].nodes()):
UNIQUE_BARCODES[ref_length].add_node(CODE)
else:
UNIQUE_BARCODES[ref_length].add_node(CODE)
EDGE = get_edge(CODE, list(UNIQUE_BARCODES[ref_length].nodes()), Errors)
UNIQUE_BARCODES[ref_length].add_edges_from(EDGE)
else:
UNIQUE_BARCODES[ref_length] = nx.Graph()
UNIQUE_BARCODES[ref_length].add_node(CODE)
EDGE = get_edge(CODE, list(UNIQUE_BARCODES[ref_length].nodes()), Errors)
# print EDGE
UNIQUE_BARCODES[ref_length].add_edges_from(EDGE)
#### We need to print the last groups of reads
if (len(POSITIONS_DICT) > 0 and Errors > 0):
for pos2 in POSITIONS_DICT:
# When we allow errors in the Barcodes, we re-group them by similarity (Errors specified in parameter)
DICTIONARY = extract_bc_groups(POSITIONS_DICT[pos2], UNIQUE_BARCODES[pos2])
for barcode in DICTIONARY:
# Printing consensus reads to a new bam file
NEW_READ, LOG2 = GET_FINAL_READ(list(DICTIONARY[barcode]), minBQ, STEP, SET_N)
LOGFILE2.write(LOG2)
outfile.write(NEW_READ)
elif (len(POSITIONS_DICT) > 0 and Errors == 0):
DICTIONARY = POSITIONS_DICT
for pos2 in DICTIONARY:
# printing consensus reads to a new bam file
for barcode in DICTIONARY[pos2]:
NEW_READ, LOG2 = GET_FINAL_READ(list(DICTIONARY[pos2][barcode]), minBQ, STEP, SET_N)
LOGFILE2.write(LOG2)
outfile.write(NEW_READ)
samfile.close()
LOGFILE2.close()
outfile.close()
stop = timeit.default_timer()
print('TIME')
print(stop - start)
|
imgag/megSAP
|
src/NGS/barcode_correction.py
|
Python
|
gpl-3.0
| 25,086
|
[
"pysam"
] |
44f6904da96f3f68597ac2b003a10c9d7d3cc84e30619e40feb9218281cacc77
|
# Copyright 2012 Google Inc.
#
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Checkers for various standard library functions."""
import six
import sys
import astroid
from astroid.bases import Instance
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers import utils
TYPECHECK_COMPARISON_OPERATORS = frozenset(('is', 'is not', '==', '!=', 'in', 'not in'))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
if sys.version_info >= (3, 0):
OPEN_MODULE = '_io'
TYPE_QNAME = 'builtins.type'
else:
OPEN_MODULE = '__builtin__'
TYPE_QNAME = '__builtin__.type'
def _check_mode_str(mode):
# check type
if not isinstance(mode, six.string_types):
return False
# check syntax
modes = set(mode)
_mode = "rwatb+U"
creating = False
if six.PY3:
_mode += "x"
creating = "x" in modes
if modes - set(_mode) or len(mode) > len(modes):
return False
# check logic
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending or creating and six.PY3:
return False
reading = True
if not six.PY3:
binary = True
if text and binary:
return False
total = reading + writing + appending + (creating if six.PY3 else 0)
if total > 1:
return False
if not (reading or writing or appending or creating and six.PY3):
return False
# other 2.x constraints
if not six.PY3:
if "U" in mode:
mode = mode.replace("U", "")
if "r" not in mode:
mode = "r" + mode
return mode[0] in ("r", "w", "a", "U")
return True
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return (isinstance(call, astroid.CallFunc)
and len(call.args) == 1
and not isinstance(call.args[0], astroid.Keyword))
class StdlibChecker(BaseChecker):
__implements__ = (IAstroidChecker,)
name = 'stdlib'
msgs = {
'W1501': ('"%s" is not a valid mode for open.',
'bad-open-mode',
'Python supports: r, w, a[, x] modes with b, +, '
'and U (only with r) options. '
'See http://docs.python.org/2/library/functions.html#open'),
'W1502': ('Using datetime.time in a boolean context.',
'boolean-datetime',
'Using datetime.time in a boolean context can hide '
'subtle bugs when the time they represent matches '
'midnight UTC. This behaviour was fixed in Python 3.5. '
'See http://bugs.python.org/issue13936 for reference.',
{'maxversion': (3, 5)}),
'W1503': ('Redundant use of %s with constant '
'value %r',
'redundant-unittest-assert',
'The first argument of assertTrue and assertFalse is '
'a condition. If a constant is passed as parameter, that '
'condition will be always true. In this case a warning '
'should be emitted.'),
'W1504': ('Using type() instead of isinstance() for a typecheck.',
'unidiomatic-typecheck',
'The idiomatic way to perform an explicit typecheck in '
'Python is to use isinstance(x, Y) rather than '
'type(x) == Y, type(x) is Y. Though there are unusual '
'situations where these give different results.')
}
@utils.check_messages('bad-open-mode', 'redundant-unittest-assert')
def visit_callfunc(self, node):
"""Visit a CallFunc node."""
if hasattr(node, 'func'):
infer = utils.safe_infer(node.func)
if infer:
if infer.root().name == OPEN_MODULE:
if getattr(node.func, 'name', None) in ('open', 'file'):
self._check_open_mode(node)
if infer.root().name == 'unittest.case':
self._check_redundant_assert(node, infer)
@utils.check_messages('boolean-datetime')
def visit_unaryop(self, node):
if node.op == 'not':
self._check_datetime(node.operand)
@utils.check_messages('boolean-datetime')
def visit_if(self, node):
self._check_datetime(node.test)
@utils.check_messages('boolean-datetime')
def visit_ifexp(self, node):
self._check_datetime(node.test)
@utils.check_messages('boolean-datetime')
def visit_boolop(self, node):
for value in node.values:
self._check_datetime(value)
@utils.check_messages('unidiomatic-typecheck')
def visit_compare(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_redundant_assert(self, node, infer):
if (isinstance(infer, astroid.BoundMethod) and
node.args and isinstance(node.args[0], astroid.Const) and
infer.name in ['assertTrue', 'assertFalse']):
self.add_message('redundant-unittest-assert',
args=(infer.name, node.args[0].value, ),
node=node)
def _check_datetime(self, node):
""" Check that a datetime was infered.
If so, emit boolean-datetime warning.
"""
try:
infered = next(node.infer())
except astroid.InferenceError:
return
if (isinstance(infered, Instance) and
infered.qname() == 'datetime.time'):
self.add_message('boolean-datetime', node=node)
def _check_open_mode(self, node):
"""Check that the mode argument of an open or file call is valid."""
try:
mode_arg = utils.get_argument_from_call(node, position=1,
keyword='mode')
except utils.NoSuchArgumentError:
return
if mode_arg:
mode_arg = utils.safe_infer(mode_arg)
if (isinstance(mode_arg, astroid.Const)
and not _check_mode_str(mode_arg.value)):
self.add_message('bad-open-mode', node=node,
args=mode_arg.value)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (isinstance(left_func, astroid.Class)
and left_func.qname() == TYPE_QNAME):
return
if operator in ('is', 'is not') and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (isinstance(right_func, astroid.Class)
and right_func.qname() == TYPE_QNAME):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message('unidiomatic-typecheck', node=node)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(StdlibChecker(linter))
|
JetChars/vim
|
vim/bundle/python-mode/pymode/libs/pylint/checkers/stdlib.py
|
Python
|
apache-2.0
| 8,282
|
[
"VisIt"
] |
bc182f76edd3a80b671b8050d5a69b72478ff385010a41120f05b568e42d3b4e
|
from __future__ import absolute_import, division, print_function
from jaspyx.visitor import BaseVisitor
class Delete(BaseVisitor):
def visit_Delete(self, node):
for target in node.targets:
self.indent()
self.output('delete ')
self.visit(target)
self.finish()
|
ztane/jaspyx
|
jaspyx/visitor/delete.py
|
Python
|
mit
| 321
|
[
"VisIt"
] |
1dbb08b248e56250121c1be34605da04e14bf70887bc6861f7e8b750fbea7e7e
|
#! /usr/bin/python
# $Id$
# -----------------------------------------------------------------------------
# CppAD: C++ Algorithmic Differentiation: Copyright (C) 2003-12 Bradley M. Bell
#
# CppAD is distributed under multiple licenses. This distribution is under
# the terms of the
# Eclipse Public License Version 1.0.
#
# A copy of this license is included in the COPYING file of this distribution.
# Please visit http://www.coin-or.org/CppAD/ for information on other licenses.
# -----------------------------------------------------------------------------
#
import re # see http://docs.python.org/library/re.html
# --------------------------------------------------------------------------
def remove_simple_cmd(cmd_start, cmd_end, text_in) :
pattern = '(.*?)' + cmd_start + '(.*?)' + cmd_end
pattern = re.compile(pattern, re.DOTALL)
text_out = ''
start = 0
while start < len(text_in):
match = pattern.search(text_in, start)
if match == None :
text_out += text_in[start:-1]
start = len(text_in)
else :
text_out += match.group(1) + match.group(2)
start = match.end(0)
return text_out
# --------------------------------------------------------------------------
def remove_cmd_indent_text(cmd_start, cmd_end, text_in) :
pattern = '(.*?)' + cmd_start + '(.*?)' + cmd_end
pattern = re.compile(pattern, re.DOTALL)
text_out = ''
start = 0
while start < len(text_in):
match = pattern.search(text_in, start)
if match == None :
text_out += text_in[start:-1]
start = len(text_in)
else :
text_out += match.group(1) + '\t'
text_out += re.sub('\n', '\n\t', match.group(2) )
start = match.end(0)
return text_out
# --------------------------------------------------------------------------
#
file_in = open('epl-v10.html', 'rb')
data_in = file_in.read()
# --------------------------------------------------------------------------
# Extract body
pattern = '<body lang="EN-US">(.*)</body>'
match = re.search(pattern, data_in, re.DOTALL)
text = match.group(1)
# --------------------------------------------------------------------------
text = remove_simple_cmd('<b>', '</b>', text)
text = remove_simple_cmd('<p>', '</p>', text)
text = remove_simple_cmd('<h2>', '</h2>', text)
text = remove_cmd_indent_text('<p class="list">', '</p>', text)
# --------------------------------------------------------------------------
data_out = text
file_out = open('epl-v10.txt', 'wb')
file_out.write(data_out)
file_out.close()
|
utke1/cppad
|
bin/epl_html2txt.py
|
Python
|
epl-1.0
| 2,514
|
[
"VisIt"
] |
87a3d62f7415a336edbb8b04a83a2a73f6ea278ce6b4ebab115cff2d4a300ab6
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Security (SSL) Settings
Usage:
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = True
# Optional.
libcloud.security.CA_CERTS_PATH.append('/path/to/cacert.txt')
"""
import os
import ssl
__all__ = [
'VERIFY_SSL_CERT',
'SSL_VERSION',
'CA_CERTS_PATH'
]
VERIFY_SSL_CERT = True
SSL_VERSION = ssl.PROTOCOL_TLSv1
# True to use certifi CA bundle path when certifi library is available
USE_CERTIFI = os.environ.get('LIBCLOUD_SSL_USE_CERTIFI', True)
USE_CERTIFI = str(USE_CERTIFI).lower() in ['true', '1']
# File containing one or more PEM-encoded CA certificates
# concatenated together.
CA_CERTS_PATH = None
# Insert certifi CA bundle path to the front of Libcloud CA bundle search
# path if certifi is available
try:
import certifi
except ImportError:
has_certifi = False
else:
has_certifi = True
if has_certifi and USE_CERTIFI:
certifi_ca_bundle_path = certifi.where()
CA_CERTS_PATH = certifi_ca_bundle_path
# Allow user to explicitly specify which CA bundle to use, using an environment
# variable
environment_cert_file = os.getenv('SSL_CERT_FILE', None)
if environment_cert_file is not None:
# Make sure the file exists
if not os.path.exists(environment_cert_file):
raise ValueError('Certificate file %s doesn\'t exist' %
(environment_cert_file))
if not os.path.isfile(environment_cert_file):
raise ValueError('Certificate file can\'t be a directory')
# If a provided file exists we ignore other common paths because we
# don't want to fall-back to a potentially less restrictive bundle
CA_CERTS_PATH = [environment_cert_file]
CA_CERTS_UNAVAILABLE_ERROR_MSG = (
'No CA Certificates were found in CA_CERTS_PATH. For information on '
'how to get required certificate files, please visit '
'https://libcloud.readthedocs.org/en/latest/other/'
'ssl-certificate-validation.html'
)
VERIFY_SSL_DISABLED_MSG = (
'SSL certificate verification is disabled, this can pose a '
'security risk. For more information how to enable the SSL '
'certificate verification, please visit the libcloud '
'documentation.'
)
|
samuelchong/libcloud
|
libcloud/security.py
|
Python
|
apache-2.0
| 2,950
|
[
"VisIt"
] |
c1bf1a0451b32ffba7b3c7356cf7a70e9a06cf055d0e5dd0189c175eb9e7cbf0
|
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright (C) 2008-2015
# Christian Kohlöffel
# Vinzenz Schulz
#
# This file is part of DXF2GCODE.
#
# DXF2GCODE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DXF2GCODE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DXF2GCODE. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
from __future__ import absolute_import
from core.point import Point
from dxfimport.spline_convert import Spline2Arcs
from dxfimport.classes import PointsClass, ContourClass
import globals.globals as g
class GeoentSpline:
def __init__(self, Nr=0, caller=None):
self.Typ = 'Spline'
self.Nr = Nr
# Initialisieren der Werte
# Initialise the values
self.Layer_Nr = 0
self.Spline_flag = []
self.degree = 1
self.Knots = []
self.Weights = []
self.CPoints = []
self.geo = []
self.length = 0.0
# Lesen der Geometrie
# Read the geometry
self.Read(caller)
# Zuweisen der Toleranz f�rs Fitting
# Assign the fitting tolerance
tol = g.config.fitting_tolerance
check = g.config.vars.Import_Parameters['spline_check']
# Umwandeln zu einem ArcSpline
# Convert to a ArcSpline
Spline2ArcsClass = Spline2Arcs(degree=self.degree, Knots=self.Knots,
Weights=self.Weights, CPoints=self.CPoints, tol=tol, check=check)
self.geo = Spline2ArcsClass.Curve
for geo in self.geo:
self.length += geo.length
def __str__(self):
# how to print the object
s = "\nTyp: Spline" +\
"\nNr: %i" % self.Nr +\
"\nLayer Nr: %i" % self.Layer_Nr +\
"\nSpline flag: %i" % self.Spline_flag +\
"\ndegree: %i" % self.degree +\
"\nlength: %0.3f" % self.length +\
"\nGeo elements: %i" % len(self.geo) +\
"\nKnots: %s" % self.Knots +\
"\nWeights: %s" % self.Weights +\
"\nCPoints: "
for Point in self.CPoints:
s = s + "\n" + str(Point)
s += "\ngeo: "
return s
def reverse(self):
"""
reverse()
"""
self.geo.reverse()
for geo in self.geo:
geo.reverse()
def App_Cont_or_Calc_IntPts(self, cont, points, i, tol, warning):
"""
App_Cont_or_Calc_IntPts()
"""
# Hinzuf�gen falls es keine geschlossener Spline ist
# Add if it is not a closed spline
if self.CPoints[0].within_tol(self.CPoints[-1], tol):
self.analyse_and_opt()
cont.append(ContourClass(len(cont), 1, [[i, 0]], self.length))
else:
points.append(PointsClass(point_nr=len(points), geo_nr=i,
Layer_Nr=self.Layer_Nr,
be=self.geo[0].Ps,
en=self.geo[-1].Pe,
be_cp=[], en_cp=[]))
return warning
def analyse_and_opt(self):
"""
analyse_and_opt()
"""
summe = 0
# Richtung in welcher der Anfang liegen soll (unten links)
# Direction of the top (lower left) ???
Popt = Point(-1e3, -1e6)
# Calculation of the alignment after Gaussian-Elling
# Positive value means CW, negative value indicates CCW
# closed polygon
for Line in self.geo:
summe += Line.Ps.x * Line.Pe.y - Line.Pe.x * Line.Ps.y
if summe > 0.0:
self.reverse()
# Find the smallest starting point from bottom left X (Must be new loop!)
# logger.debug(self.geo)
min_distance = self.geo[0].Ps.distance(Popt)
min_geo_nr = 0
for geo_nr in range(1, len(self.geo)):
if self.geo[geo_nr].Ps.distance(Popt) < min_distance:
min_distance = self.geo[geo_nr].Ps.distance(Popt)
min_geo_nr = geo_nr
# Order contour so the new starting point is at the beginning
self.geo = self.geo[min_geo_nr:len(self.geo)] + self.geo[0:min_geo_nr]
def Read(self, caller):
"""
Read()
"""
# Assign short name
lp = caller.line_pairs
e = lp.index_code(0, caller.start + 1)
# Assign layer
s = lp.index_code(8, caller.start + 1)
self.Layer_Nr = caller.Get_Layer_Nr(lp.line_pair[s].value)
# Spline Flap zuweisen
# Assign Spline Flap
s = lp.index_code(70, s + 1)
self.Spline_flag = int(lp.line_pair[s].value)
# Spline Ordnung zuweisen
# Spline order to assign
s = lp.index_code(71, s + 1)
self.degree = int(lp.line_pair[s].value)
# Number of CPts
st = lp.index_code(73, s + 1)
nCPts = int(lp.line_pair[s].value)
s = st
# Read the node (knot)
while True:
# N ode (knot) value
sk = lp.index_code(40, s + 1, e)
if sk is None:
break
self.Knots.append(float(lp.line_pair[sk].value))
s = sk
# Read the weights
s = st
while True:
# Node (knot) weights
sg = lp.index_code(41, s + 1, e)
if sg is None:
break
self.Weights.append(float(lp.line_pair[sg].value))
s = sg
# Read the control points
s = st
while True:
# X value
s = lp.index_code(10, s + 1, e)
# Wenn kein neuer Punkt mehr gefunden wurde abbrechen ...
# Cancel if no new item was detected
if s is None:
break
x = float(lp.line_pair[s].value)
# Y value
s = lp.index_code(20, s + 1, e)
y = float(lp.line_pair[s].value)
self.CPoints.append(Point(x, y))
if len(self.Weights) == 0:
for nr in range(len(self.CPoints)):
self.Weights.append(1)
caller.start = e
# print nCPts
# print len(self.Knots)
# print len(self.Weights)
# print len(self.CPoints)
# print self
def get_start_end_points(self, direction=0):
"""
get_start_end_points()
"""
if not direction:
punkt, angle = self.geo[0].get_start_end_points(direction)
else:
punkt, angle = self.geo[-1].get_start_end_points(direction)
return punkt, angle
|
Poofjunior/dxf2gcode
|
dxfimport/geoent_spline.py
|
Python
|
gpl-3.0
| 7,412
|
[
"Gaussian"
] |
228fd1aa870ab6e3f94e9b431243f4245722ec34d162849eb769fa9a9a1ce125
|
from __future__ import division
import random, sys, pygame, asteroid, explosion, player
from random import randint
class Game(object):
width, height = 800, 600
asteroids = pygame.sprite.Group()
shoots = pygame.sprite.Group()
explosions = []
points = 0
level = 5
levelAsteroidFrequency = {
1: 80,
2: 60,
3: 40,
4: 30,
5: 25,
6: 10,
}
levelUpdateFrequency = {
2: 25 * 1000,
3: 50 * 1000,
4: 75 * 1000,
5: 110 * 1000,
6: 150 * 1000,
}
asteroidProbability = {
1: [1],
2: [1] + [2] * 3,
3: [1] * 2 + [2] * 3 + [3],
4: [1] * 3+ [2] * 3 + [3] * 3,
5: [1] * 3 + [2] * 1 + [3] * 2 + [4] * 5,
6: [1] * 3 + [2] * 3 + [4] * 2 + [5] * 5,
}
def __init__(self):
pass
def randomAsteroids(self):
if randint(1, self.levelAsteroidFrequency[self.level])==1:
astLevel = random.choice(self.asteroidProbability[self.level])
newasteroid = asteroid.Asteroid(astLevel)
randx = randint(0, self.screen.get_width() - 50)
randy = newasteroid.image.get_height() * -1
newasteroid.rect.x, newasteroid.rect.y = randx, randy
newasteroid.speedx = randint(-2, 2)
newasteroid.speedy = randint(1, 2)
self.asteroids.add(newasteroid)
def initialState(self):
pygame.init()
self.font = pygame.font.SysFont(u"monospace", 35)
self.asteroids.empty()
self.shoots.empty()
self.points = 0
self.level = 1
self.paused = 0
self.time = 0
def gameover(self):
while 1:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
self.run()
if event.key == pygame.K_q:
sys.exit()
self.screen.fill((0, 0, 0))
gameoverLabel = self.font.render(u"Game over", 1, (255, 0, 0))
pointsLabel = self.font.render(u"Score: " + unicode(self.points),
1, (255, 255, 255))
restartLabel = self.font.render(u"R to restart", 1, (255, 255, 255))
quitLabel = self.font.render(u"Q to quit", 1, (255, 255, 255))
self.screen.blit(gameoverLabel, (((self.screen.get_width() - gameoverLabel.get_width()) / 2), 50))
self.screen.blit(pointsLabel, (((self.screen.get_width() - pointsLabel.get_width()) / 2), 100))
self.screen.blit(restartLabel, (((self.screen.get_width() - restartLabel.get_width()) / 2), 150))
self.screen.blit(quitLabel, (((self.screen.get_width() - quitLabel.get_width()) / 2), 200))
pygame.display.flip()
print u"Game Over\nPontuacao:", self.points
sys.exit()
def checkCollision(self):
#if pygame.sprite.spritecollide(self.p1, self.asteroids, False, pygame.sprite.collide_mask):
#print('collide', randint(1, 100))
for shoot in self.shoots:
collide = pygame.sprite.spritecollide(shoot, self.asteroids,
False, pygame.sprite.collide_mask)
for c in collide:
self.shoots.remove(shoot)
c.life -= shoot.damage
if c.life > 0:
expl = explosion.Explosion((shoot.rect.x, shoot.rect.y),
(25, 25))
#expl.rect.y -= expl.rect.height / 2
#expl.rect.x -= expl.rect.width / 2
self.explosions.append(expl)
break
expl = explosion.Explosion((c.rect.x, c.rect.y),
(c.rect.height, c.rect.width))
self.explosions.append(expl)
self.points += c.points
self.asteroids.remove(c)
for e in self.explosions:
done = e.explode()
if done:
self.explosions.remove(e)
e = None
if pygame.sprite.spritecollide(self.p1, self.asteroids,
True, pygame.sprite.collide_mask):
self.running = False
#if pygame.sprite.collide_mask(self.p1, self.a):
#print('collide', randint(1, 100))
#if pygame.sprite.groupcollide(self.shoots, self.asteroids, False, pygame.sprite.collide_mask):
#print("collide")
def score(self):
self.scoreLabel = self.font.render(u"Score: " + unicode(self.points), 1,
(255, 255, 200))
self.levelLabel = self.font.render(u"Level: " + unicode(self.level), 1,
(255, 255, 200))
def updateLevel(self):
tick = self.time
if self.level+1 in self.levelUpdateFrequency and\
tick > self.levelUpdateFrequency[self.level+1]:
self.level += 1
def pause(self):
#self.screen.fill((0, 0, 0))
pauseLabel = self.font.render(u"Paused", 1, (255, 255, 255))
self.screen.blit(pauseLabel, ((self.screen.get_width() - pauseLabel.get_width()) / 2, 100))
pygame.display.flip()
while self.paused:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_p:
self.paused = 0
def run(self):
self.initialState()
self.screen = pygame.display.set_mode((self.width, self.height))
pygame.display.set_caption("Galaxy Destroyer")
self.p1 = player.Player(self)
self.bg = pygame.image.load(u'img/bg.png')
self.clock = pygame.time.Clock()
self.running = True
while self.running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
shoot = self.p1.moveKeyDown(event)
if shoot:
self.shoots.add(shoot)
if event.key == pygame.K_p:
self.paused = 1
if self.paused: self.pause()
self.score()
self.updateLevel();
key = pygame.key.get_pressed()
self.randomAsteroids()
self.p1.move(key, self.shoots)
self.checkCollision()
self.screen.blit(self.bg, (0, 0))
self.screen.blit(self.p1.image, self.p1.rect)
for asteroid in self.asteroids:
asteroid.update()
self.screen.blit(asteroid.image, asteroid.rect)
if asteroid.rect.top > self.screen.get_height() or\
(asteroid.rect.right < 0 and asteroid.speedx < 0) or\
(asteroid.rect.left > self.screen.get_width() and asteroid.speedx > 0):
self.asteroids.remove(asteroid)
for shoot in self.shoots:
shoot.update()
self.screen.blit(shoot.image, shoot.rect)
if shoot.rect.bottom < 0:
self.shoots.remove(shoot)
for e in self.explosions:
if hasattr(e, u'image'):
self.screen.blit(e.image, e.rect)
self.screen.blit(self.levelLabel, (10, 40))
self.screen.blit(self.scoreLabel, (10, 10))
self.clock.tick(40)
pygame.display.flip()
self.time += self.clock.get_time()
self.gameover()
|
sollidsnake/galaxy-destroyer
|
game.py
|
Python
|
gpl-2.0
| 7,638
|
[
"Galaxy"
] |
a5d7a85ea783e6c7b5a0deef15c087a6533327fdfa0eb23dbdcb746ac75f3cda
|
# read_cm1.py
class readcm1(object): ####################################################################################
'''
The binary output option in cm1 returns files in a very unusable and inconvenient
output structure, unlike some nicer output options such as netCDF. This class is
designed to easily and nicely unpack the cm1 binary datafiles into a usable
dictionary format, designed to emulate netCDF output.
Currently, this class is equipped with methods to handle:
Scalar output files (_s.dat, _w.dat)
Parcel output files
Statistics output files
from cm1r18
^ While the vector output files are arranged in a very similar fashion to the
scalar output, there is not a reading method for them implemented at this time.
Initial publishing: Oct 20,2015
Update April 5, 2016: Implemented a method 'mesh' which produces a meshgrid of
any 2 dimensions from the 3 dimensional cm1r18 output,
as long as there exists a '_s.ctl' file.
Update July 7, 2016: Implemented a method 'w_read' which reads '_w.dat' output
in the same fashion as the '_s.dat' output files.
'''
###########################################################################################################
def __init__(self,filetag):
'''
Init only requires the filetag that is common to all of your model's output.
eg: '/lustre/scratch/jsmith/testrun'
'''
self.filetag = filetag
self.statsctl = filetag+'_stats.ctl'
self.statsfile = filetag+'_stats.dat'
self.scalarctl = filetag+'_s.ctl'
self.wctl = filetag+'_w.ctl'
# Scalar .dat files are defined in 'scalar read' below.
self.pfile = filetag+'_pdata.dat'
self.pctl = filetag+'_pdata.ctl'
###########################################################################################################
def scalar_read(self,filenumber,):
'''
Reads the _s.dat files from cm1. Note that this is specifically for cm1r18
or earlier. The only input necessary is the integer value of the scalar
data file. Output is a dictionary of all of the data stored in the scalar
file.
NOTE: This code requires the scalar data to be output at EACH TIMESTEP in
in a SEPERATE FILE (namelist: output_format = 1, output_filetype = 2).
'''
import numpy as np
number_svars = 0
number_twodvars = 0
number_threedvars = 0
skeys =[]
start_var_key_list = 99999
# Determine the size of the model domain.
f = open(self.scalarctl,'r')
for i,line in enumerate(f):
linewords = line.split()
if linewords[0] == 'xdef':
nx = int(linewords[1])
elif linewords[0] == 'ydef':
ny = int(linewords[1])
elif linewords[0] == 'zdef':
nz = int(linewords[1])
elif linewords[0] == 'vars':
number_svars = int(linewords[1])
start_var_key_list = i
elif linewords[0] == 'tdef':
number_stimes = int(linewords[1])
if i > start_var_key_list and i < start_var_key_list+number_svars+1:
skeys.append(linewords[0])
if linewords[1] == '0':
number_twodvars = number_twodvars+1
if linewords[0] == 'endvars': # Head off a common error here.
raise SystemExit('\n\nError reading scalar file....reached EOF.\n\n')
f.close()
# Read the data file.
sdata = []
number_threedvars = number_svars-number_twodvars # This will differentiate between 2d and 3d vars.
scalarfile = self.filetag+'_%06d_s.dat' %filenumber
f = open(scalarfile,'r')
# Read the 2D variables at the start of the file.
twodvar_data = np.fromfile(f,dtype='float32',count=nx*ny*number_twodvars,sep='')
twodvar_data = twodvar_data.reshape(number_twodvars,ny,nx)
for i in range(number_twodvars):
sdata.append(twodvar_data[i,:,:])
# Read the 3D variables afterward.
threedvar_data = np.fromfile(f,dtype='float32',count=nx*ny*nz*number_threedvars,sep='')
threedvar_data = threedvar_data.reshape(number_threedvars,nz,ny,nx)
for i in range(number_threedvars):
sdata.append(threedvar_data[i,:,:,:])
print('\n'+scalarfile+'-> 2D vars: '+str(number_twodvars)+' 3D vars: '+str(number_threedvars)+'\n')
return dict(zip(skeys,sdata))
###########################################################################################################
def w_read(self,filenumber,):
'''
Reads the _w.dat files from cm1. Note that this is specifically for cm1r18
or earlier. The only input necessary is the integer value of the scalar
data file. Output is a dictionary of all of the data stored in the scalar
file.
NOTE: This code requires the scalar data to be output at EACH TIMESTEP in
in a SEPERATE FILE (namelist: output_format = 1, output_filetype = 2).
'''
import numpy as np
number_svars = 0
number_twodvars = 0
number_threedvars = 0
skeys =[]
start_var_key_list = 99999
# Determine the size of the model domain.
f = open(self.wctl,'r')
for i,line in enumerate(f):
linewords = line.split()
if linewords[0] == 'xdef':
nx = int(linewords[1])
elif linewords[0] == 'ydef':
ny = int(linewords[1])
elif linewords[0] == 'zdef':
nz = int(linewords[1])
elif linewords[0] == 'vars':
number_svars = int(linewords[1])
start_var_key_list = i
elif linewords[0] == 'tdef':
number_stimes = int(linewords[1])
if i > start_var_key_list and i < start_var_key_list+number_svars+1:
skeys.append(linewords[0])
if linewords[1] == '0':
number_twodvars = number_twodvars+1
if linewords[0] == 'endvars': # Head off a common error here.
raise SystemExit('\n\nError reading scalar file....reached EOF.\n\n')
f.close()
# Read the data file.
sdata = []
number_threedvars = number_svars-number_twodvars # This will differentiate between 2d and 3d vars.
wfile = self.filetag+'_%06d_w.dat' %filenumber
f = open(wfile,'r')
# Read the 2D variables at the start of the file.
twodvar_data = np.fromfile(f,dtype='float32',count=nx*ny*number_twodvars,sep='')
twodvar_data = twodvar_data.reshape(number_twodvars,ny,nx)
for i in range(number_twodvars):
sdata.append(twodvar_data[i,:,:])
# Read the 3D variables afterward.
threedvar_data = np.fromfile(f,dtype='float32',count=nx*ny*nz*number_threedvars,sep='')
threedvar_data = threedvar_data.reshape(number_threedvars,nz,ny,nx)
for i in range(number_threedvars):
sdata.append(threedvar_data[i,:,:,:])
print('\n'+wfile+'-> 2D vars: '+str(number_twodvars)+' 3D vars: '+str(number_threedvars)+'\n')
return dict(zip(skeys,sdata))
###########################################################################################################
def stats_read(self,):
'''
Read the binary statistics file output from cm1, and return a dictionary
of the data contained therein, much like netCDF output. No arguments
are necessary so long as an instance of readcm1 is extant.
'''
import os
import numpy as np
# Read the CTL file and determine keys, timesteps, and number of variables:
keys = [] # Initialize the key list.
f = open(self.statsctl,'r')
for i,line in enumerate(f):
linewords = line.split()
if i == 7:
statsvars = int(linewords[1])
elif i > 7 and linewords[0] != 'endvars':
keys.append(linewords[0])
f.close()
# NEW METHOD TO DETERMINE TIMES INCLUDED THAT ISN'T AS KLUDGY.
# Determine the times included in a new method with the filesize.
statstimes = os.path.getsize(self.statsfile)/(4*statsvars)
print('\n'+self.statsfile+': '+str(statsvars)+' stats variables with '+str(statstimes)+' timesteps.\n')
# Read the statistics outfile.
stats_data = [] # Initialize the data list.
total = statsvars*statstimes
read_stats_data = np.fromfile(self.statsfile,dtype='float32',count=total,sep='')
#
# A note how the stats are organized:
#
# 32 bit (4 byte) per value.
# Printed time sequentially, such that
# all the variables for a time are
# printed before the next time starts.
#
read_stats_data = read_stats_data.reshape(statstimes,statsvars)
for i in range(statsvars):
stats_data.append(read_stats_data[:,i])
return dict(zip(keys,stats_data))
###########################################################################################################
def parcel_read(self,parcel_desired,):
'''
Reads the timeseries data along a single parcel trajectory from a cm1 _pdata.dat
file. Please be aware that this method reads the pdata CTL file to obtain info
on the contents of the pdata.dat file, so if there are modifications to the cm1
source code to output different variables such that they are not recorded in the
pdata CTL file, you will need to modify the pdata CTL file accordingly in order
for this method to work properly.
Input: integer callvalue of parcel.
Output: dictionary of timeseries data along that parcel's trajectory.
'''
import os
import numpy as np
# Determine the number of parcels, timesteps, and variables written to the pdata file.
keys =[]
f = open(self.pctl,'r')
for i,line in enumerate(f):
linewords = line.split()
if i == 3:
number_parcels = int(linewords[1])-1 # on xdef line, for some reason.
if i == 6:
ptimes = int(linewords[1])
elif i == 7:
pvars = int(linewords[1])
elif i > 7 and linewords[0] != 'endvars':
keys.append(linewords[0])
f.close()
if parcel_desired >= number_parcels:
raise SystemExit('\n\nError: Desired parcel must be between 0 and '+str(number_parcels-1)+'.\n\n')
print('\n'+self.pfile+': Reading '+str(pvars)+' variables from parcel # '+str(parcel_desired)+'.\n')
# Read the parcel data. NOTE: cm1r18 outputs a pdata file much larger than just
# the parcel data here, however, this still works because the actual parcel
# variable data is contained at the front of the pdata file.
parcel_data = []
pdata = np.zeros((ptimes,pvars))
h = open(self.pfile,'r')
for var in range(pvars):
for time in range(ptimes):
h.seek(time*number_parcels*pvars*4+number_parcels*var*4+parcel_desired*4)
a = np.fromfile(h,dtype='float32',count=1,sep='')
#print(a,time,var)
pdata[time,var] = a
for i in range(pvars):
parcel_data.append(pdata[:,i])
return dict(zip(keys,parcel_data))
###########################################################################################################
def mesh(self,dimensions):
'''
Creates and returns a meshgrid for plotting any 2 dimensions of the 3 dimensional
model output.
Input: 2 dimensions (x,y, or z) with which to create the meshgrid (NOTE:
THE ARGUMENTS MUST BE IN HORIZONTAL, VERTICAL AXES ORDER, AND
LOWER CASE, LIKE ['x','y']).
Output: 2D meshgrid of input dimensions in the form X,Y (two outputs to unpack).
'''
import numpy as np
# Assert that the proper type of argument was received by the 'mesh' method.
assert isinstance(dimensions, list), '\n\nERROR: Wrong argument...readcm1.mesh requires a list.\n\n'
# Determine the size of the model domain and define the model horizontal resolution.
f = open(self.scalarctl,'r')
for i,line in enumerate(f):
linewords = line.split()
if linewords[0] == 'xdef':
nx = int(linewords[1])
x0 = float(linewords[3])
dx = float(linewords[4])
elif linewords[0] == 'ydef':
ny = int(linewords[1])
y0 = float(linewords[3])
dy = float(linewords[4])
elif linewords[0] == 'zdef':
nz = int(linewords[1])
start_z_levels = i + 1
end_z_levels = start_z_levels + nz
mesh_dims = dict()
# Create the meshgrid as specified by the 'dimensions' input argument.
for i,dimension in enumerate(dimensions):
if dimension == 'x':
mesh_dims[i] = np.arange(x0,x0+nx*dx,dx)
elif dimension == 'y':
mesh_dims[i] = np.arange(y0,y0+ny*dy,dy)
elif dimension == 'z':
zs = []
f.seek(0)
for j,line in enumerate(f):
linewords = line.split()
if j >= start_z_levels and j < end_z_levels:
zs.append(float(linewords[0]))
mesh_dims[i] = np.array(zs) # Not necessary to make array, but will for consistency's sake.
else:
raise SystemExit('\n\nERROR: readcm1.mesh requires dimensions x,y,or z in a 2 member list\n\n')
return(np.meshgrid(mesh_dims[0],mesh_dims[1]))
###########################################################################################################
if __name__=='__main__': # For testing purposes:
test = readcm1('/lustre/scratch/avandegu/extrapLBC/idd1')
pdata = test.parcel_read(240000)
x = pdata['x']
y = pdata['y']
z = pdata['z']
print('X: %s, Y: %s, Z: %s' % (x[0],y[0],z[0]))
|
vandegu/umich
|
read_cm1.py
|
Python
|
mit
| 14,904
|
[
"NetCDF"
] |
e3d31da5da5b8d76eaa10bc12d27329c6ea5e1f7a3564b48a5c6e13e79b27ab8
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Observe the environment to produce object sequences given a path."""
import abc
import collections
import functools
import math
import random
from typing import Dict, Sequence, TypeVar
import attr
from crafty import data
from crafty import hmm
from crafty import mp3d
from crafty import util
from crafty.appraise import Appraiser
from valan.datasets.common import graph_utils
from valan.r2r import house_utils as r2r_utils
Tag = TypeVar('Tag')
_GAUSSIAN_MEAN_FOR_ALL_NORMALIZATION = 0.0
# Gives higher values for near distances that aren't in the agent's face.
_GAMMA_SHAPE_DISTANCE = 2.0
# For sweep and interest, we want higher values the closer the magnitude is to
# zero, which is accomplished by values less than one.
_GAMMA_SHAPE_SWEEP = 0.9
_GAMMA_SHAPE_INTEREST = 0.9
@attr.s
class Observer(metaclass=abc.ABCMeta):
"""An agent that follows paths and observes an object sequence.
`mp_data`: MatterportData object enabling access to simulator information.
`appraiser`: Appraiser object that gives a prominence score to each category
of objects in the environment.
`magnitude_normalization`: For calculating probability of magnitude.
`distance_dev`: Parameter governing how far this Observer looks. Smaller
values will give preference to objects near the Observer's position.
`sweep_dev`: Parameter governing how likely the Observer is to notice objects
given its current heading. Larger values will increase the likelihood of
fixating on objects in its peripheral vision.
`interest_dev`: Parameter governing how much the Observer cares about the
Appraiser's scores. Larger values increase the impact of the Appraiser's
scores on the Observer's preferences.
`fixation_boost`: Parameter governing how much the Observer tends to remain
fixated on a given object. The value itself is used as a boost to
self-transitions in the HMM.
"""
mp_data: mp3d.MatterportData = attr.ib()
appraiser: Appraiser = attr.ib()
magnitude_normalization: str = attr.ib()
distance_dev: float = attr.ib()
sweep_dev: float = attr.ib()
interest_dev: float = attr.ib()
fixation_boost: float = attr.ib()
def __attrs_post_init__(self):
"""Selects a magnitude normalizer on initialization.
Source: https://www.attrs.org/en/stable/init.html
"""
# Note: *_loc are the location params for the chosen distribution.
# For Gaussian, it is the mean; for Gamma, gamma shape.
self.magnitude_normalizer = functools.partial(
util.MagnitudeNormalizer.create, self.magnitude_normalization)
if self.magnitude_normalization == 'normal':
self.dist_loc = _GAUSSIAN_MEAN_FOR_ALL_NORMALIZATION
self.sweep_loc = _GAUSSIAN_MEAN_FOR_ALL_NORMALIZATION
self.interest_loc = _GAUSSIAN_MEAN_FOR_ALL_NORMALIZATION
elif self.magnitude_normalization == 'gamma':
self.dist_loc = _GAMMA_SHAPE_DISTANCE
self.sweep_loc = _GAMMA_SHAPE_SWEEP
self.interest_loc = _GAMMA_SHAPE_INTEREST
# Stores cached taggers for EM HMM.
# Note: unlike PathSpecificHMM, where taggers are made on the fly,
# Hard EM based HMM are trained on per-house stats and cached
# for repeated use.
self.scan_id_to_hmm = dict()
@abc.abstractmethod
def __call__(self):
return
def dist_prob(
self,
distance: float,
modifier: float = 1.0,
) -> float:
"""Gives probability of a distance given the observer's distance deviation.
Args:
distance: the distance in meters to convert into a probability.
modifier: a multiplicative parameter to modify the distance deviation of
the observer for obtaining this probability. This allows one to scale
the viewing distance in some cases without having another distance_dev
parameter.
Returns:
A probability of the given distance obtained by normalizing its
magnitude.
"""
return self.magnitude_normalizer(self.dist_loc,
self.distance_dev * modifier)(
distance)
def interest_prob(self, score: float) -> float:
"""Probability of an object being interesting given its score.
Args:
score: the interestingness score of the object.
Returns:
A probability of the given interestingness score obtained by normalizing
its magnitude.
"""
return 1 - self.magnitude_normalizer(self.sweep_loc, self.interest_dev)(
score)
def view_prob(
self,
view_diff: float,
modifier: float = 1.0,
) -> float:
"""Gives probability of a given change in view, both up/down and sideways.
Think of this as "how willing is the observer to swivel its head up, down,
or around to observe objects within a given panorama.
Args:
view_diff: the radians difference in view between two directions, e.g.
either side-to-side heading difference or up-to-down pitch difference.
This mainly captures how much objects deviate from a level point-of-view
in a given direction within a panorama.
modifier: a multiplicative parameter to modify the sweep deviation of the
observer for obtaining this probability. This allows one to scale the
sweep change in some cases without having another sweep_dev parameter.
Returns:
A probability of the given distance obtained by normalizing its
magnitude.
"""
return self.magnitude_normalizer(self.interest_loc,
self.sweep_dev * modifier)(
view_diff)
def get_object_pano_affinity(
self,
object_pano_distances: Dict[data.ObjectKey, Dict[str, float]],
) -> Dict[data.ObjectKey, Dict[str, float]]:
"""Computes distance-based affinity of objects to each pano.
Args:
object_pano_distances: dictionary containing the distances from each
object to all the panoramas in the scan.
Returns:
A dictionary containing normalized distance scores for each object to all
the panoramas in the scan. Note: these are not a probability distribution
per object over panoramas, but instead each score for each object,
panorama pair is [0,1].
"""
object_pano_affinity = collections.defaultdict(
lambda: collections.defaultdict(float))
for obj, pano_distances in object_pano_distances.items():
for pano, distance in pano_distances.items():
object_pano_affinity[obj][pano] = self.dist_prob(distance)
return object_pano_affinity
def get_prominence(
self,
pano_context: data.PanoContext,
heading: float,
path_category_counts: Dict[str, int],
) -> Dict[data.ObjectKey, float]:
"""Gets the visual prominence of each object in the given panorama.
Args:
pano_context: PanoContext object containing all the information for the
panorama of interest.
heading: The direction the agent is looking within the panorama.
path_category_counts: The number of times a given category (e.g. 'couch')
has been observed on this path.
Returns:
A dictionary giving a prominence probability distribution over all objects
in the panorama.
"""
pano_center = pano_context.center
scores = []
objects = []
for obj in pano_context.objects:
obj_heading = r2r_utils.compute_heading_angle(pano_center, obj.center)
obj_pitch = r2r_utils.compute_pitch_angle(pano_center, obj.center)
ppitch = self.view_prob(abs(obj_pitch), 0.5)
pview = self.view_prob(abs(heading - obj_heading))
pdist = self.dist_prob(obj.distance)
path_prominence = path_prominence_factor(
path_category_counts.get(obj.clean_category, 1))
interestingness = self.appraiser(obj.clean_category,
1.0 / path_prominence)
pinterest = self.interest_prob(interestingness)
scores.append(pview * pdist * pinterest * ppitch)
objects.append(data.get_object_key(obj))
total = sum(scores)
if total:
probs = [x / total for x in scores]
else:
probs = [1.0 / len(scores) for _ in scores]
return dict(zip(objects, probs))
def get_transitions_for_motion(
self,
motion: data.Motion,
path_category_counts: Dict[str, int],
) -> Dict[data.ObjectKey, Dict[data.ObjectKey, float]]:
"""Gets object-object transition information between pano/heading pairs.
Args:
motion: Motion object representing the transition from one panorama to
another.
path_category_counts: The number of times a given category (e.g. 'couch')
has been observed on this path.
Returns:
A transition pseudo-count matrix, represented as a dictionary from objects
to a dictionary of scores for all other objects present in the panoramas
represented in this motion.
"""
prominence = self.get_prominence(motion.goal, motion.heading,
path_category_counts)
scores = collections.defaultdict(lambda: collections.defaultdict(float))
for o1 in motion.source.objects:
so1 = data.get_object_key(o1)
for o2 in motion.goal.objects:
so2 = data.get_object_key(o2)
head_o1_o2 = r2r_utils.compute_heading_angle(o1.center, o2.center)
head_o1_c2 = r2r_utils.compute_heading_angle(motion.source.center,
o2.center)
dist_o1_to_c2 = r2r_utils.compute_distance(o1.center,
motion.goal.center)
dist_o2_to_c2 = r2r_utils.compute_distance(o2.center,
motion.goal.center)
pscan = (1 + math.cos((head_o1_o2 - motion.heading))) / 2
pdist = self.dist_prob(dist_o1_to_c2 + dist_o2_to_c2, 2.0)
pview = self.view_prob(abs(motion.heading - head_o1_c2), 2.0)
scores[so1][so2] = pscan * pdist * pview * prominence[so2]
return scores
def get_step_transition_counts(
self, motions: Sequence[data.Motion], path_category_counts: Dict[str, int]
) -> Sequence[Dict[Tag, Dict[Tag, float]]]:
"""Extracts transition information from motions (of a single path).
Args:
motions: A sequence of Motion object which represents the transition from
one panorama to another.
path_category_counts: The number of times a given category (e.g. 'couch')
has been observed on this path.
Returns:
A sequence of transitions.
"""
all_transitions = []
for motion in motions:
all_transitions.append(
self.get_transitions_for_motion(motion, path_category_counts))
return all_transitions
def get_merged_transition_counts(
self, motions_list: Sequence[Sequence[data.Motion]],
path_category_counts_list: Sequence[Dict[str, int]]
) -> Sequence[Dict[Tag, Dict[Tag, float]]]:
"""Extracts transition information from motions (of a multiple paths).
Args:
motions_list: A list of `motions` sequences where each such sequence
corresponds to a path.
path_category_counts_list: A list of path_category_counts info, where each
corresponds to a path.
Returns:
Merged transition information from all the paths in a single house.
"""
merged_transition_dict = dict()
for motions, path_category_counts in zip(motions_list,
path_category_counts_list):
step_transitions = self.get_step_transition_counts(
motions, path_category_counts)
for step_transition in step_transitions:
merged_transition_dict.update(step_transition)
return [merged_transition_dict]
class PathSpecificObserver(Observer):
"""An Observer agent that works off of path-specific HMM transitions.
The class constructs an agent on the fly for each path based on the stats.
"""
def __call__(
self,
motions: Sequence[data.Motion],
scan_data: mp3d.ScanData,
) -> Sequence[data.ActionObservation]:
"""Traverses a path and produces an object observation sequence.
Args:
motions: The Motion sequence provided by a Walker.
scan_data: Cached information about the scan, especially which objects are
visible in each panorama.
Returns:
A sequence of ActionObservations that indicate actions (where each action
is one or more pano-to-pano steps) and the objects the observer has
fixated on during each action. For example, a single action could be two
steps forward and one step to the left, fixating on a television, such
that the multi-step instruction could be "head left before the TV" instead
of the more verbose "go forward. go forward to the TV. turn left."
"""
path_pano_objects = set()
for motion in motions:
for obj in motion.source.objects:
path_pano_objects.add(data.get_object_key(obj))
path_category_instances = [so.category for so in path_pano_objects]
path_category_counts = collections.Counter(path_category_instances)
raw_emission_counts = self.get_object_pano_affinity(
scan_data.object_pano_distances)
raw_transitions_counts = self.get_step_transition_counts(
motions, path_category_counts)
start_prominence = self.get_prominence(
motions[0].source, motions[0].source.heading_change.init,
path_category_counts)
end_scores = {}
final_objects = motions[-1].goal.objects
for obj in final_objects:
obj_key = data.get_object_key(obj)
prob_of_distance = self.dist_prob(obj.distance)
obj_category_count = path_category_counts.get(obj_key.category, 1)
path_prominence = path_prominence_factor(obj_category_count)
interestingness = self.appraiser(obj.category, 1.0 / path_prominence)
prob_of_interest = self.interest_prob(interestingness)
end_scores[obj_key] = prob_of_distance * prob_of_interest
tagger = hmm.PathSpecificHMM(raw_emission_counts, start_prominence,
raw_transitions_counts, end_scores,
self.fixation_boost, 1e-10, 1e-10,
scan_data.pano_index)
path = [motion.source.pano for motion in motions]
tag_sequence = tagger(path, path_pano_objects)
observations = []
for motion, tag in zip(motions, tag_sequence):
observations.append(data.Observation(motion.source, motion.heading, tag))
return build_action_observations(observations)
class HardEMObserver(Observer):
"""An Observer agent that works off of per-house stats.
An agent here processes a path P with an HMM built from the path samples
obtained for the house from which P is constructed. The per-house HMMs
are only built once then used on all the individual paths from that house.
"""
def hard_em_trainer(self, motions_list: Sequence[Sequence[data.Motion]],
scan_data: mp3d.ScanData) -> hmm.HardEMHMM:
"""Train an HMM with per-house stats and Hard EM.
HardEM: https://ttic.uchicago.edu/~dmcallester/ttic101-07/lectures/em/em.pdf
Args:
motions_list: A list of Motion sequences provided by a Walker.
scan_data: Cached information about the scan, especially which objects are
visible in each panorama.
Returns:
Trained hmm.HardEMHMM tagger.
"""
# If a tagger is computed for this scan data, return it.
# Otherwise compute one and cache.
scan_id = scan_data.scan_id
if scan_id in self.scan_id_to_hmm:
return self.scan_id_to_hmm[scan_id]
path_category_counts_list = []
path_pano_objects_list = []
end_scores = {}
for motions in motions_list:
path_pano_objects = set()
for motion in motions:
for obj in motion.source.objects:
path_pano_objects.add(data.get_object_key(obj))
path_category_instances = [so.category for so in path_pano_objects]
path_category_counts = collections.Counter(path_category_instances)
path_category_counts_list.append(path_category_counts)
path_pano_objects_list.append(path_pano_objects)
final_objects = motions[-1].goal.objects
for obj in final_objects:
obj_key = data.get_object_key(obj)
prob_of_distance = self.dist_prob(obj.distance)
obj_category_count = path_category_counts.get(obj_key.category, 1)
# TODO(wangsu) if EM across all paths were proven to work out well
# later, then drop this.
path_prominence = path_prominence_factor(obj_category_count)
interestingness = self.appraiser(obj.category, 1.0 / path_prominence)
prob_of_interest = self.interest_prob(interestingness)
end_scores[obj_key] = prob_of_distance * prob_of_interest
raw_emission_counts = self.get_object_pano_affinity(
scan_data.object_pano_distances)
raw_transitions_counts = self.get_merged_transition_counts(
motions_list, path_category_counts_list)
start_prominence = self.get_prominence(
motions_list[0][0].source,
motions_list[0][0].source.heading_change.init, path_category_counts)
# TODO(wangsu) address Jason's comments below in comparison experiment:
# Open question: perhaps we can get away from these initializations
# when we use EM? I was thinking we could seed all the emission
# distributions as P(pano | object) being proportional to
# distance(pano_i, object), and similarly P(pano_next | pano_prev)
# is proportional to distance(pano_i, pano_prev). (Possibly using
# the Gamma normalization in there.)
tagger = hmm.HardEMHMM(raw_emission_counts, start_prominence,
raw_transitions_counts, end_scores,
self.fixation_boost, 1e-10, 1e-10,
scan_data.pano_index)
# Initializes transitions and emissions with EM.
paths = [
[motion.source.pano for motion in motions] for motions in motions_list
]
tagger.train(paths, path_pano_objects_list)
self.scan_id_to_hmm[scan_id] = tagger
def __call__(
self,
motions: Sequence[data.Motion],
scan_data: mp3d.ScanData,
) -> Sequence[data.ActionObservation]:
"""Traverses a path and produces an object observation sequence.
Args:
motions: The Motion sequence provided by a Walker.
scan_data: Cached information about the scan, especially which objects are
visible in each panorama.
Returns:
A sequence of ActionObservations that indicate actions (same format as
the output of `path_specific_call`).
"""
assert scan_data.scan_id in self.scan_id_to_hmm
tagger = self.scan_id_to_hmm[scan_data.scan_id]
path = [motion.source.pano for motion in motions]
path_pano_objects = set()
for motion in motions:
for obj in motion.source.objects:
path_pano_objects.add(data.get_object_key(obj))
tag_sequence = tagger(path, path_pano_objects)
observations = []
for motion, tag in zip(motions, tag_sequence):
observations.append(data.Observation(motion.source, motion.heading, tag))
return build_action_observations(observations)
class RandomSampleObserver(Observer):
"""An Observer agent that works off of randomly sampled landmark objects.
The class randomly samples an object at each pano/path-point from the
objects visible. To keep interface consistency with PathSpecific & HardEM
Observers, the constructor here takes in the same args but do not act upon
all of them.
"""
def __call__(
self,
motions: Sequence[data.Motion],
scan_data: mp3d.ScanData,
) -> Sequence[data.ActionObservation]:
"""Traverses a path and produces an object observation sequence.
Args:
motions: The Motion sequence provided by a Walker.
scan_data: Cached information about the scan, especially which objects are
visible in each panorama.
Returns:
A sequence of ActionObservations that indicate actions (where each action
is one or more pano-to-pano steps) and the objects the observer has
fixated on during each action. For example, a single action could be two
steps forward and one step to the left, fixating on a television, such
that the multi-step instruction could be "head left before the TV" instead
of the more verbose "go forward. go forward to the TV. turn left."
"""
tag_sequence = []
for motion in motions:
candidate_objects = []
for obj in motion.source.objects:
candidate_objects.append(data.get_object_key(obj))
if candidate_objects:
sample_object = random.choice(candidate_objects)
tag_sequence.append(sample_object)
observations = []
for motion, tag in zip(motions, tag_sequence):
observations.append(data.Observation(motion.source, motion.heading, tag))
return build_action_observations(observations)
def path_prominence_factor(category_count):
"""Simply returns base-2 log of a category count."""
return math.log(1 + category_count, 2)
def build_action_observations(
observations: Sequence[data.Observation]
) -> Sequence[data.ActionObservation]:
"""Given observations, creates the actions associated with them.
Args:
observations: a sequence of Observation objects capturing a PanoContext, a
heading, and the object the observer fixated on in that moment.
Returns:
A sequence of ActionObservations that contextualize each Observation in the
panorama-to-panorama movements required to go from start to finish.
"""
action_observations = []
# Create the first ActionObservation.
obs = observations[0]
obj_connection = mp3d.get_connection_info(obs.pano_context.center,
obs.object_key.location)
obj_direction = mp3d.get_heading_change_type(obj_connection.heading,
obs.heading)
action_observations.append(
data.ActionObservation(data.DirectionType.STOP, 'intra', obj_direction,
obs))
# Create intermediate ActionObservations.
prev_heading = obs.heading
prev_pano_context = obs.pano_context
prev_pano_center = prev_pano_context.center
for obs in observations[1:]:
pano_center = obs.pano_context.center
panos_connection = graph_utils.ConnectionInfo(
distance=r2r_utils.compute_distance(prev_pano_center, pano_center),
heading=prev_heading,
pitch=r2r_utils.compute_pitch_angle(prev_pano_center, pano_center))
move_direction = mp3d.get_direction_type(panos_connection, obs.heading)
obj_connection = mp3d.get_connection_info(pano_center,
obs.object_key.location)
obj_direction = mp3d.get_heading_change_type(obj_connection.heading,
obs.heading)
move_type = 'intra' if obs.pano_context.pano == prev_pano_context.pano else 'inter'
action_observations.append(
data.ActionObservation(move_direction, move_type, obj_direction, obs))
prev_heading = obs.heading
prev_pano_context = obs.pano_context
prev_pano_center = pano_center
return action_observations
|
google-research/crafty
|
observe.py
|
Python
|
apache-2.0
| 23,915
|
[
"Gaussian"
] |
81e24a307b53d61517acba6f885cbc15c94db2c6162a9e3b05e2e4b955fbefef
|
import sys, os
sys.path.insert(1, os.path.join("..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
def weights_and_distributions():
htable = h2o.upload_file(pyunit_utils.locate("smalldata/gbm_test/moppe.csv"))
htable["premiekl"] = htable["premiekl"].asfactor()
htable["moptva"] = htable["moptva"].asfactor()
htable["zon"] = htable["zon"]
# gamma
dl = H2ODeepLearningEstimator(distribution="gamma")
dl.train(x=range(3),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
# gaussian
dl = H2ODeepLearningEstimator(distribution="gaussian")
dl.train(x=range(3),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
# poisson
dl = H2ODeepLearningEstimator(distribution="poisson")
dl.train(x=range(3),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
# tweedie
dl = H2ODeepLearningEstimator(distribution="tweedie")
dl.train(x=range(3),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
if __name__ == "__main__":
pyunit_utils.standalone_test(weights_and_distributions)
else:
weights_and_distributions()
|
madmax983/h2o-3
|
h2o-py/tests/testdir_algos/deeplearning/pyunit_weights_and_distributions_deeplearning.py
|
Python
|
apache-2.0
| 1,284
|
[
"Gaussian"
] |
a6c772e7400548c10a04c8ab5f871188116b3f13f5497a4e6b515cc60a8c21dd
|
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate imagination function in a multimodal VAE.
A good imagination function has the following desiderata:
1. Correctness: q(z| y) should lead to a point in 'z' which corresponds
to the concept "y".
2. Coverage: q(z| y) should cover the variation of the concept 'y'
based on how specific the concept is.
3. Compositionality: We should be able to generalize to unseen/novel
label combinations.
4. Comprehnsability: We should be able to generate an image that
captures the enssence of what we conditioned on.
NOTE: Currently this eval is specific to deepmind 2d shapes and labels.
Author: vrama@
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
from collections import namedtuple
import cPickle as pickle
import math
import os
import numpy as np
from scipy import stats
import tensorflow as tf
from datasets import label_map
from experiments import image_utils
from experiments import configuration
from experiments import vae_eval # pylint: disable=unused-import
from experiments.convolutional_multi_vae import ConvolutionalMultiVae
from experiments import comprehensibility
from joint_vae import utils
from third_party.interpolate import interpolate
flags = tf.flags
gfile = tf.gfile
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('iclr_results_path', '/tmp/results',
'Directory where all the NIPS\'17 Imagination results will '
'be stored.')
tf.app.flags.DEFINE_integer('max_global_step', 500000, 'Maximum global step')
tf.app.flags.DEFINE_string('distance_metric', 'kl', 'Options: \'kl\' or \'l2\'.'
'Metric to use for retrieval.')
tf.app.flags.DEFINE_string('input_queries', '',
'A set of queries and associated masks for eval. If not '
'specified, the labels from validation are used to query.')
tf.app.flags.DEFINE_boolean(
'visualize_means', True,
'If true, visualize the mean images, if false visualize all images.')
tf.app.flags.DEFINE_boolean(
'run_interpolation', True,
'If true, run interpolation between pairs of queries.')
tf.app.flags.DEFINE_boolean(
'evaluate_once', False, 'If true just evaluate once and break.')
tf.app.flags.DEFINE_integer('num_images_comprehension_eval', 10,
'Number of samples to draw '
'from the model for performing comprehension evaluation.')
tf.app.flags.DEFINE_integer('interp_samples', 100,
'Number of pairs of queries to interpolate between.')
tf.app.flags.DEFINE_integer('interp_steps', 9,
'Number of interpolation steps between queries.')
Query = namedtuple('Query', ['label', 'mask'])
RetrievalDatapoint = namedtuple('RetrievalDatapoint',
['gaussian', 'label', 'latent'])
QueryDatapoint = namedtuple('QueryDatapoint', ['gaussian', 'label'])
FeatureExtractionOps = namedtuple('FeatureExtractionOps', [
'latent_conditioned_image', 'labels', 'true_latents', 'saver'
])
InferenceOps = namedtuple('InferenceOps', [
'inference_label_ph', 'ignore_label_mask_ph', 'latent_conditioned_label',
'images_generation_op', 'z_ph', 'p_x_z_mean', 'p_x_z_sample', 'saver'
])
def construct_feature_extraction_graph(config):
"""Construct the graph for the model in feature extration mode.
We set the model in validation mode, which means that we turn off random
shuffling. We then read from a FIFO queue, extracting representations for each
image in say the validation set.
Args:
config: An object of class configuration.get_configuration()
Returns:
g_features: A tf.Graph() object.
vae: A ConvolutionalMultiVae/ConvolutionalVae/KroneckerMultiVae object.
temp_saver: A tf.train.Saver() object
num_iter: Int, number of iterations to run the graph for.
"""
g_features = tf.Graph()
with g_features.as_default():
tf.set_random_seed(123)
if FLAGS.model_type == 'multi':
vae = ConvolutionalMultiVae(
config,
mode=FLAGS.split_name,
split_name=FLAGS.split_name,
add_summary=False)
elif FLAGS.model_type == 'single' or 'kronecker':
raise NotImplementedError
vae.build_model()
latent_conditioned_image = vae.latent_conditioned_image
gt_label = vae.labels
true_latents = vae.true_latents
temp_saver = tf.train.Saver(
var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))
feature_extraction_ops = FeatureExtractionOps(
latent_conditioned_image, gt_label, true_latents, temp_saver)
print(vae.num_samples)
num_iter = int(math.ceil(vae.num_samples / FLAGS.batch_size))
return g_features, feature_extraction_ops, temp_saver, num_iter
def construct_inference_graph(config):
"""Construct the graph for the model in inference mode.
Inference mode provides utilities to pass in inputs to the model as
placeholders, enabling us to interact with the model freely.
Args:
config: An object of class configuration.get_configuration()
Returns:
g_inference: A tf.Graph() object.
vae: A ConvolutionalMultiVae/ConvolutionalVae/KroneckerMultiVae object.
"""
g_inference = tf.Graph()
with g_inference.as_default():
tf.set_random_seed(123)
if FLAGS.model_type == 'multi':
vae = ConvolutionalMultiVae(
config, mode='inference', split_name=FLAGS.split_name)
elif FLAGS.model_type == 'single' or 'kronecker':
raise NotImplementedError
vae.build_model()
inference_label_ph = vae.inference_label_ph
ignore_label_mask_ph = vae.ignore_label_mask_ph
latent_conditioned_label = vae.latent_conditioned_label
mean_or_sample = 'sample'
if FLAGS.visualize_means:
mean_or_sample = 'both'
images_generation_op = vae.generate_images_conditioned_label(
FLAGS.num_images_comprehension_eval, mean_or_sample=mean_or_sample)
z_ph = tf.placeholder(dtype=tf.float32, shape=[None, config.num_latent])
p_x_z, _ = vae.model.predict(z_ph)
saver = tf.train.Saver()
inference_ops = InferenceOps(inference_label_ph, ignore_label_mask_ph,
latent_conditioned_label, images_generation_op,
z_ph, p_x_z.mean(), p_x_z.sample(), saver)
return g_inference, inference_ops
def extract_features(feature_ops, inference_ops, g_features, g_inference, saver,
num_iter, checkpoint_path):
"""Extract attribute and image features for retrieval.
The function first constructs a graph to process the validation or test set
as specified and extracts the features for each image. Then it processes
either a set of queries provided externally (if available) or the labels from
validation as the queries and extracts the representations for them and
finally retruns the image representations and the label/query representations.
Args:
feature_ops: An object of FeatureExtractionOps which contains some ops to
run during feature extraction.
inference_ops: An object of InferenceOps which contains some ops to
run during feature extraction.
g_features: A tf.Graph() object in which the feature extraction has been
instantiated.
g_inference: A tf.Graph() object in which the vae_inference model has been
instantiated.
saver: a tf.train.Saver() object used for saving back the checkpoint we loaded
from after extracting features, since checkpoints get deleted
periodically.
num_iter: int, number of iterations to go through model evaluation when
extracting features for the whole dataset.
checkpoint_path: The path to the checkpoint to load the model parameters
from.
Returns:
image_latents_and_labels: A list of objects of class RetrievalDatapoint
query_latents_and_labels: A list of objects of class RetrievalDatapoint
"""
with g_features.as_default() as g:
latent_conditioned_image = feature_ops.latent_conditioned_image
gt_label = feature_ops.labels
true_latents = feature_ops.true_latents
init_fn, global_step = utils.create_restore_fn(checkpoint_path,
feature_ops.saver)
sv = tf.train.Supervisor(
graph=g,
init_fn=init_fn,
logdir=FLAGS.eval_dir,
saver=None,
summary_op=None,
summary_writer=None)
with sv.managed_session(start_standard_services=True) as sess:
image_latents_and_labels = []
if not FLAGS.input_queries:
for batch_iter in range(num_iter):
tf.logging.info('Extracting latent representations for %d of %d',
batch_iter + 1, num_iter)
gaussian_means, gaussian_stds, labels, latents = sess.run([
latent_conditioned_image.density.loc,
latent_conditioned_image.density.scale, gt_label, true_latents
])
gaussian_means = np.split(gaussian_means, len(gaussian_means), axis=0)
gaussian_stds = np.split(gaussian_stds, len(gaussian_stds), axis=0)
labels = utils.unbatchify_list(labels)
labels = [tuple(x) for x in labels]
latents = np.split(latents, len(latents), axis=0)
for gauss_mean, gauss_std, label, latent in zip(
gaussian_means, gaussian_stds, labels, latents):
gaussian = utils.Gaussian(gauss_mean, gauss_std)
image_latents_and_labels.append(
RetrievalDatapoint(gaussian, label, latent))
# Save back the checkpoint we loaded because it might have vanished, as
# TF deletes checkpoints which are too old.
temp_checkpoint_path = os.path.join(FLAGS.eval_dir,
'eval_model.ckpt-%s' % (global_step))
saver.save(
sess,
save_path=temp_checkpoint_path,
latest_filename='checkpoint_temp')
# The query generator produces output in the following format:
# {
# 'queries': list of queries, each an np.array of [1, num_attributes]
# with each entry in the array setting the label value for the
# attribute.
# 'masks': list of masks, each an np.array of [1, num_attributes]
# with 0/1 values specifying attributes to select and which to
# ignore.
# }
# Each entry in masks[i] correponds to the entry in queries[i]
if FLAGS.input_queries:
tf.logging.info('****Using input query file %s.****', FLAGS.input_queries)
with tf.gfile.Open(FLAGS.input_queries, 'r') as f:
queries_and_masks = pickle.load(f)
queries = queries_and_masks['queries']
masks = queries_and_masks['masks']
# If no query file is specified, use the labels from the validation set as
# the queries.
else:
tf.logging.info('Defaulting to using queries from SSTables.')
queries = set([x.label for x in image_latents_and_labels])
queries = [np.expand_dims(np.array(x), axis=0) for x in queries]
masks = np.ones((len(queries), queries[0].shape[1]))
masks = np.split(masks, len(masks), axis=0)
with g_inference.as_default() as g:
inference_label_ph = inference_ops.inference_label_ph
ignore_label_mask_ph = inference_ops.ignore_label_mask_ph
latent_conditioned_label = inference_ops.latent_conditioned_label
images_generation_op = inference_ops.images_generation_op
z_ph = inference_ops.z_ph
pxz_mu = inference_ops.p_x_z_mean
pxz_sample = inference_ops.p_x_z_sample
init_fn, global_step = utils.create_restore_fn(temp_checkpoint_path,
inference_ops.saver)
sv = tf.train.Supervisor(
graph=g,
init_fn=init_fn,
logdir=FLAGS.eval_dir,
saver=None,
summary_op=None,
summary_writer=None,)
with sv.managed_session(start_standard_services=True) as sess:
query_latents_and_labels = []
tf.logging.info('Extracting representations for queries and computing '
'posterior entropies.')
queries_and_generated_images = []
entropies_for_queries = []
for query, mask in zip(queries, masks):
gaussian_mean, gaussian_std, generated_images_for_query = sess.run(
[
latent_conditioned_label.density.loc,
latent_conditioned_label.density.scale,
images_generation_op,
],
feed_dict={inference_label_ph: query,
ignore_label_mask_ph: mask})
gaussian_query = utils.Gaussian(gaussian_mean, gaussian_std)
entropy_for_query = 0.5 * np.log(np.prod(2*np.pi*np.e*np.square(gaussian_std)))
# Need to compute the entropy of this gaussian
current_query = Query(query, mask)
entropies_for_queries.append({'Query': current_query,
'Entropy': entropy_for_query})
# FIX Decoder Size Error: If we misspecified the decoder size, then
# we subsample the channels for further evaluation.
if not isinstance(generated_images_for_query, list):
generated_images_for_query = [generated_images_for_query]
if FLAGS.dataset=='affine_mnist' and generated_images_for_query[0].shape[-1] == 6:
use_gen_images_for_query = []
for gen_image in generated_images_for_query:
use_gen_images_for_query.append(gen_image[:, :, :, [0, 3]])
generated_images_for_query = use_gen_images_for_query
# We typically draw multiple samples for each image to get robustness.
queries_and_generated_images.append((current_query,
generated_images_for_query))
query_latents_and_labels.append(
QueryDatapoint(gaussian_query, current_query))
# Store / serialize the entropy results
pickle_name = (FLAGS.results_tag + '_' + FLAGS.split_name + '_' +
'_'.join(FLAGS.eval_dir.split('/')[-2:]) +
FLAGS.distance_metric +'_entropy_' + '_%s.p' % global_step)
output_file = os.path.join(FLAGS.iclr_results_path, pickle_name)
tf.logging.info('Writing the entropy file to %s.' % (output_file))
with tf.gfile.Open(output_file, 'w') as f:
pickle.dump(entropies_for_queries, f)
if FLAGS.run_interpolation:
queries_and_interpolated_images = interpolate_queries(
sess, query_latents_and_labels, z_ph, pxz_mu, pxz_sample)
else:
queries_and_interpolated_images = None
tf.logging.info('Extracted representations for all %d queries',
len(queries))
image_latents_and_labels = None
return (image_latents_and_labels, query_latents_and_labels,
queries_and_generated_images, queries_and_interpolated_images,
global_step)
def interpolate_queries(sess,
query_latents_and_labels,
z_ph,
mean_images_op,
sample_images_op,
also_sample=True,
interp_samples=FLAGS.interp_samples,
interp_steps=FLAGS.interp_steps):
"""Interpolate between all pairs of queries and generate images."""
qs_and_imgs = []
interps = []
rng = np.random.RandomState(1)
if len(query_latents_and_labels) > 24:
queries = rng.choice(
len(query_latents_and_labels),
size=min(len(query_latents_and_labels), interp_samples * 2),
replace=False)
start_queries = queries[:len(queries) // 2]
end_queries = queries[len(queries) // 2:]
for start_q_i, end_q_i in zip(start_queries, end_queries):
start_q = query_latents_and_labels[start_q_i]
end_q = query_latents_and_labels[end_q_i]
interps.append(((start_q, end_q), interpolate.do_interpolation(
interpolate.slerp, start_q.gaussian.mean[0], end_q.gaussian.mean[0],
interp_steps)))
if also_sample:
for _ in range(3):
start_v = rng.normal(start_q.gaussian.mean[0],
start_q.gaussian.std[0])
end_v = rng.normal(end_q.gaussian.mean[0], end_q.gaussian.std[0])
interps.append(((start_q, end_q), interpolate.do_interpolation(
interpolate.slerp, start_v, end_v, interp_steps)))
else:
start_queries = []
end_queries = []
for i, start_q in enumerate(query_latents_and_labels):
for j, end_q in enumerate(query_latents_and_labels[i + 1:]):
start_queries.append(i)
end_queries.append(i + j + 1)
interps.append(((start_q, end_q), interpolate.do_interpolation(
interpolate.slerp, start_q.gaussian.mean[0], end_q.gaussian.mean[0],
interp_steps)))
if also_sample:
for _ in range(3):
start_v = rng.normal(start_q.gaussian.mean[0],
start_q.gaussian.std[0])
end_v = rng.normal(end_q.gaussian.mean[0], end_q.gaussian.std[0])
interps.append(((start_q, end_q), interpolate.do_interpolation(
interpolate.slerp, start_v, end_v, interp_steps)))
for i, (queries, interpolated_means) in enumerate(interps):
if i % 100 == 0:
print(i, 'Running interpolation')
z_means, z_samples = sess.run(
[mean_images_op, sample_images_op],
feed_dict={z_ph: interpolated_means})
# Add a grey line between each image.
z_means[:-1, :, -1, :] = 0.5
z_samples[:-1, :, -1, :] = 0.5
query_index = i // 4 if also_sample else i
start_label = query_latents_and_labels[start_queries[query_index]].label
start_label = ':'.join(
str(l) if start_label.mask[0][j] == 1 else '_'
for j, l in enumerate(start_label.label[0]))
end_label = query_latents_and_labels[end_queries[query_index]].label
end_label = ':'.join(
str(l) if end_label.mask[0][j] == 1 else '_'
for j, l in enumerate(end_label.label[0]))
labels = [[dict(label='', color='#000000')]
for _ in range(interp_steps + 1)]
labels[0][0]['label'] = start_label
labels[-1][0]['label'] = end_label
filename_suffix = ('%04d_sl%s_el%s' %
(i, start_label.replace(':', '_').replace('?', 'q'),
end_label.replace(':', '_').replace('?', 'q')))
image_utils.plot_images(
z_means,
n=len(z_means),
annotations=labels,
filename=os.path.join(FLAGS.iclr_results_path, '%s_interp_mean_%s' %
(FLAGS.results_tag, filename_suffix)))
# image_utils.plot_images(
# z_samples,
# n=len(z_samples),
# annotations=labels,
# filename=os.path.join(FLAGS.iclr_results_path, '%s_interp_samp_%s' %
# (FLAGS.results_tag, filename_suffix)))
qs_and_imgs.append((queries, z_means, z_samples))
return qs_and_imgs
def evaluate_comprehensibility(queries_and_gen_images,
comprehensibility_eval,
global_step,
num_classes_per_attribute,
metric_results,
summary_writer=None):
"""Evaluate comprehensibility of generated images.
Comprehensibility is evaluated by passing generated images through a
a pretrained classifier which tries to check if the images that were generated
yeild the same predictions as what we conditioned on.
Args:
queries_and_gen_images: Set of queries and generated images, a tuple of
namedtuple query and list of np.array images.
comprehensibility_eval: an instance of class Comprehensibility, which
is a helper for evaluating comprehensibility.
global_step: Global step at which we are doing the evaluation.
num_classes_per_attribute: List of ints.
metric_results: Stores the results of all the metrics indexed by
query_label, and metric name. For example (4, cluster_recall_100) means
we are asking for results at the leaf node (for MNISTa) for cluster recall
@100 metric.
summary_writer: An object of tf.summary.FileWriter to write summaries.
Returns:
metric_results: metric results with the comprehensability numbers.
"""
tf.logging.info('Starting comprehensibility eval.')
all_query_data = []
for query, gen_image_list in queries_and_gen_images:
comprehensibility_scores = []
visualization_images = []
# Each query can have multiple images drawn for it, which is stored in
# a list.
all_predicted_labels = []
# Compute the reference uniform distributions.
empirical_histograms = []
for attribute in num_classes_per_attribute:
empirical_histograms.append(np.zeros(attribute))
for gen_image in gen_image_list:
compre_eval, predicted_label, vis_image = comprehensibility_eval.evaluate(
gen_image, query.label, query.mask)
visualization_images.append(vis_image)
all_predicted_labels.append(predicted_label)
for attribute in xrange(predicted_label.shape[-1]):
empirical_histograms[attribute][predicted_label[attribute]] += 1
comprehensibility_scores.append(compre_eval.sum())
# Normalize the emprical histogram into a distribution, and compute the KL
# divergence against a reference uniform distribution.
all_kld = []
all_jsd_sim = []
# Consolidated JSD is jenson shannon computed across all attributes --
# observed as well as unobserved.
all_consolidated_jsd_sim = []
for index, _ in enumerate(empirical_histograms):
attribute_histogram = empirical_histograms[index] / len(gen_image_list)
# Unspecified dimensions should be ignored.
if query.mask[0, index] == 0:
# Compute the jenson shannon divergence.
kld = -1 * stats.entropy(attribute_histogram) + np.log(
num_classes_per_attribute[index])
reference_distribution = np.ones(
num_classes_per_attribute[index]) / num_classes_per_attribute[index]
elif query.mask[0, index] == 1:
# For observed queries the reference distribution is a one hot vector
# with the observed bit on.
reference_distribution = np.zeros(num_classes_per_attribute[index])
reference_distribution[query.label[0, index]] = 1
jenson_shannon_m = (
0.5 * attribute_histogram + 0.5 * reference_distribution)
# Confusingly, stats.entropoy computes KLD when given two inputs.
jsd = 0.5 * (stats.entropy(attribute_histogram, jenson_shannon_m) +
stats.entropy(reference_distribution, jenson_shannon_m))
jsd_sim = 1 - jsd
assert jsd >= 0 and jsd <= 1, 'Invalid value for JSD.'
if query.mask[0, index] == 0:
assert kld >= 0, 'Invalid value for KL divergence.'
all_kld.append(kld)
all_jsd_sim.append(jsd_sim)
all_consolidated_jsd_sim.append(jsd_sim)
assert len(all_kld) == query.mask.shape[-1] - np.sum(
query.mask), 'Check the KLD array, wrong dimensions.'
assert len(all_jsd_sim) == query.mask.shape[-1] - np.sum(
query.mask), 'Check the JSD array, wrong dimensions.'
if not all_kld:
overall_kld = 0
else:
overall_kld = np.mean(all_kld)
if not all_jsd_sim:
overall_jsd_sim = 1
else:
overall_jsd_sim = np.mean(all_jsd_sim)
overall_consolidated_jsd_sim = np.mean(all_consolidated_jsd_sim)
# Compute the KL(p, q) we see in the predictions, relative to the unifrom
# distribution. q in this case is the predictions we make. p is the uniform
# distribution.
metric_results[(
query.mask.sum(),
'comprehensibility')].append(np.mean(comprehensibility_scores))
metric_results[(query.mask.sum(),
'parametric_coverage')].append(overall_kld)
metric_results[(query.mask.sum(),
'parametric_jsd_sim')].append(overall_jsd_sim)
metric_results[(query.mask.sum(),
'parametric_consolidated_jsd_sim')].append(
overall_consolidated_jsd_sim)
# Only store the first 10 images to disk.
subset_predicted_labels = all_predicted_labels[:10]
visualization_images = visualization_images[:10]
comprehensibility_scores = comprehensibility_scores[:10]
# Serialize information about the query to disk.
all_query_data.append(
(query.label, query.mask, subset_predicted_labels, visualization_images,
comprehensibility_scores, overall_kld, overall_jsd_sim, overall_consolidated_jsd_sim))
for k, v in metric_results.iteritems():
if (k[1] == 'comprehensibility' or k[1] == 'parametric_coverage' or
k[1] == 'parametric_jsd_sim' or k[1] == 'parametric_consolidated_jsd_sim'):
tf.logging.info('%s error: %f', str(k[0]) + '_' + k[1], np.mean(v))
if summary_writer:
utils.add_simple_summary(summary_writer,
np.mean(v), str(k[0]) + '_' + k[1],
global_step)
if len(all_query_data) > 1000:
# Only store results for a maximum of 1000 queries.
subset = np.random.choice(range(len(all_query_data)), 1000)
all_query_data = [all_query_data[idx] for idx in subset]
# Serialize the results to disk.
tf.logging.info('Writing evaluation metadata to disk.')
pickle_name = (FLAGS.results_tag + '_' + FLAGS.split_name + '_' +
'_'.join(FLAGS.eval_dir.split('/')[-2:]) +
'parametric_eval_metadata_%s.p' % global_step)
full_result_filepath = os.path.join(FLAGS.iclr_results_path, pickle_name)
with tf.gfile.Open(full_result_filepath, 'w') as f:
pickle.dump(all_query_data, f)
return metric_results
def extract_features_and_eval_imagination(summary_writer):
"""Extract features for retrieval and perform imagination evals.
Args:
summary_writer: an instance of tf.summary.FileWriter
"""
config = configuration.get_configuration()
config.batch_size = FLAGS.batch_size
label_mapping = label_map.LabelMap(config.label_map_json)
g_features, feature_ops, temp_saver, num_iter = (
construct_feature_extraction_graph(config))
g_inference, inference_ops = construct_inference_graph(config)
comprehensibility_eval = comprehensibility.Comprehensibility(
config.comprehensibility_ckpt,
config,
config.num_classes_per_attribute,
config.image_size,
FLAGS.visualize_means,
attribute_names=label_mapping.attributes,
hidden_units=config.comprehensibility_hidden_units)
for checkpoint_path in slim.evaluation.checkpoints_iterator(
FLAGS.checkpoint_dir, FLAGS.eval_interval_secs):
(image_latents_and_labels, query_latents_and_labels, queries_and_gen_images,
queries_and_interp_images, global_step) = (extract_features(
feature_ops, inference_ops, g_features, g_inference, temp_saver,
num_iter, checkpoint_path))
# Stores the results of all the metrics indexed by query_label, and
# metric name. For example (4, cluster_recall_100) means that we are
# asking for results at the leaf node (for MNISTa) for cluster recall
# @100 metric.
all_metric_results = defaultdict(list)
#tf.logging.info('Performing Retrieval.')
#all_metric_results = retrieval_utils.text_to_image_retrieval(
# image_latents_and_labels,
# query_latents_and_labels,
# global_step,
# FLAGS.eval_dir,
# all_metric_results,
# distance_metric=FLAGS.distance_metric,
# summary_writer=summary_writer)
tf.logging.info('Evaluating Comprehensibility.')
all_metric_results = evaluate_comprehensibility(
queries_and_gen_images,
comprehensibility_eval,
global_step,
config.num_classes_per_attribute,
all_metric_results,
summary_writer=summary_writer)
if all_metric_results is None: # Never True
all_metric_results['queries_and_interpolated_images'] = (
queries_and_interp_images)
# Dump the results into a Pickle.
pickle_name = (FLAGS.results_tag + '_' + FLAGS.split_name + '_' +
'_'.join(FLAGS.eval_dir.split('/')[-2:]) +
FLAGS.distance_metric + '_%s.p' % global_step)
full_result_filepath = os.path.join(FLAGS.iclr_results_path, pickle_name)
tf.logging.info('Storing results at %s.', full_result_filepath)
with tf.gfile.Open(full_result_filepath, 'w') as f:
pickle.dump(all_metric_results, f)
if global_step >= FLAGS.max_global_step or FLAGS.evaluate_once:
print(global_step, 'No longer waiting for a new checkpoint.')
break
def main(_):
if not FLAGS.iclr_results_path:
raise ValueError('iclr_results_path must be specified!')
iclr_results_path = FLAGS.iclr_results_path
split_names = ['val', 'test']
for i, split_name in enumerate(split_names):
np.random.seed(42)
FLAGS.split_name = split_name
print(FLAGS.split_name)
if FLAGS.input_queries:
FLAGS.input_queries = FLAGS.input_queries.replace(
'_' + split_names[1 - i] + '_', '_' + split_name + '_')
print(FLAGS.input_queries)
FLAGS.iclr_results_path = iclr_results_path + '_' + FLAGS.split_name
print(FLAGS.iclr_results_path)
if not tf.gfile.Exists(FLAGS.iclr_results_path):
tf.logging.info('Creating the ICLR results directory %s',
FLAGS.iclr_results_path)
tf.gfile.MakeDirs(FLAGS.iclr_results_path)
assert FLAGS.checkpoint_dir is not None, ('Please specify a checkpoint '
'directory.')
assert FLAGS.eval_dir is not None, 'Please specify an evaluation directory.'
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir)
extract_features_and_eval_imagination(summary_writer)
if __name__ == '__main__':
tf.app.run()
|
google/joint_vae
|
experiments/vae_imagination_eval.py
|
Python
|
apache-2.0
| 30,719
|
[
"Gaussian"
] |
22f6ff167666a16dc269c7e79085a56372ad80ae133b7dd71b789e753affac20
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd
import numpy as np
import espressomd.observables
def calc_com_x(system, x, id_list):
"""Mass-weighted average, skipping virtual sites"""
masses = system.part[id_list].mass
# Filter out virtual particles by using mass=0 for them
virtual = system.part[id_list].virtual
for i in range(len(masses)):
if virtual[i]:
masses[i] = 0.
com_x = np.average(
getattr(system.part[id_list], x), weights=masses, axis=0)
return com_x
class Observables(ut.TestCase):
N_PART = 200
# Handle for espresso system
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
system.part.add(
id=np.arange(3, 3 + 2 * N_PART, 2),
pos=np.random.random((N_PART, 3)) * system.box_l,
v=np.random.random((N_PART, 3)) * 3.2 - 1,
f=np.random.random((N_PART, 3)))
if espressomd.has_features(["MASS"]):
system.part[:].mass = np.random.random(N_PART)
if espressomd.has_features(["DIPOLES"]):
system.part[:].dip = np.random.random((N_PART, 3)) - .3
if espressomd.has_features(["ROTATION"]):
system.part[:].omega_body = np.random.random((N_PART, 3)) - .5
system.part[:].torque_lab = np.random.random((N_PART, 3)) - .5
system.part[:].quat = np.random.random((N_PART, 4))
if espressomd.has_features("DIPOLES"):
system.part[:].dipm = np.random.random(N_PART) + 2
if espressomd.has_features("ELECTROSTATICS"):
system.part[:].q = np.random.random(N_PART)
if espressomd.has_features("VIRTUAL_SITES"):
p = system.part[system.part[:].id[8]]
p.virtual = True
def generate_test_for_pid_observable(
_obs_name, _pprop_name, _agg_type=None):
"""Generates test cases for observables working on particle id lists.
"""
pprop_name = _pprop_name
obs_name = _obs_name
agg_type = _agg_type
def func(self):
# This code is run at the execution of the generated function.
# It will use the state of the variables in the outer function,
# which was there, when the outer function was called
# Randomly pick a subset of the particles
id_list = sorted(
np.random.choice(
self.system.part[:].id,
size=int(
self.N_PART * .9),
replace=False))
for id in id_list:
assert(self.system.part.exists(id))
# Get data from particles
if pprop_name == "f":
for p_id in id_list:
if self.system.part[p_id].virtual:
id_list.remove(p_id)
part_data = getattr(self.system.part[id_list], pprop_name)
# Reshape and aggregate to linear array
if len(part_data.shape) > 1:
if agg_type == "average":
part_data = np.average(part_data, 0)
if agg_type == "sum":
part_data = np.sum(part_data, 0)
if agg_type == 'com':
part_data = calc_com_x(self.system, pprop_name, id_list)
# Data from observable
observable = obs_name(ids=id_list)
obs_data = observable.calculate()
# Check
self.assertEqual(obs_data.shape, part_data.shape)
np.testing.assert_equal(id_list, observable.ids)
np.testing.assert_array_almost_equal(
obs_data,
part_data, err_msg="Data did not agree for observable " +
str(obs_name) +
" and particle property " +
pprop_name, decimal=11)
# Test setters and getters
self.assertEqual(observable.ids, id_list)
new_pids = [id_list[0]]
observable.ids = new_pids
self.assertEqual(observable.ids, new_pids)
return func
test_pos = generate_test_for_pid_observable(
espressomd.observables.ParticlePositions, "pos")
test_v = generate_test_for_pid_observable(
espressomd.observables.ParticleVelocities, "v")
test_f = generate_test_for_pid_observable(
espressomd.observables.ParticleForces, "f")
test_com_position = generate_test_for_pid_observable(
espressomd.observables.ComPosition, 'pos', 'com')
test_com_velocity = generate_test_for_pid_observable(
espressomd.observables.ComVelocity, 'v', 'com')
if espressomd.has_features(["DIPOLES"]):
test_mag_dip = generate_test_for_pid_observable(
espressomd.observables.MagneticDipoleMoment, "dip", "sum")
if espressomd.has_features(["ROTATION"]):
test_body_angular_velocity = generate_test_for_pid_observable(
espressomd.observables.ParticleBodyAngularVelocities, "omega_body")
test_lab_angular_velocity = generate_test_for_pid_observable(
espressomd.observables.ParticleAngularVelocities, "omega_lab")
@utx.skipIfMissingFeatures(['ROTATION'])
def test_particle_body_velocities(self):
obs = espressomd.observables.ParticleBodyVelocities(
ids=self.system.part[:].id)
obs_data = obs.calculate()
part_data = np.array([p.convert_vector_space_to_body(p.v)
for p in self.system.part])
self.assertEqual(obs_data.shape, part_data.shape)
np.testing.assert_array_almost_equal(part_data, obs_data,
err_msg="Data did not agree for observable ParticleBodyVelocities and particle derived values.",
decimal=9)
def test_energy(self):
s = self.system.analysis.energy()["total"]
obs_data = espressomd.observables.Energy().calculate()
self.assertEqual(obs_data.shape, (1,))
np.testing.assert_array_almost_equal(
obs_data,
s,
err_msg="Energy from analysis and observable did not agree",
decimal=9)
def test_pressure(self):
s = self.system.analysis.pressure()["total"]
obs_data = espressomd.observables.Pressure().calculate()
self.assertEqual(obs_data.shape, (1,))
np.testing.assert_array_almost_equal(
obs_data,
s,
err_msg="Pressure from analysis and observable did not agree",
decimal=9)
def test_pressure_tensor(self):
s = self.system.analysis.pressure_tensor()["total"]
obs_data = espressomd.observables.PressureTensor().calculate()
self.assertEqual(obs_data.shape, s.shape)
np.testing.assert_array_almost_equal(
obs_data,
s,
err_msg="Pressure tensor from analysis and observable did not agree",
decimal=9)
@utx.skipIfMissingFeatures('ELECTROSTATICS')
def test_current(self):
obs_data = espressomd.observables.Current(
ids=self.system.part[:].id).calculate()
part_data = self.system.part[:].q.dot(
self.system.part[:].v)
self.assertEqual(obs_data.shape, part_data.shape)
np.testing.assert_array_almost_equal(
obs_data, np.copy(part_data), err_msg="Data did not agree for observable 'Current'", decimal=9)
@utx.skipIfMissingFeatures('ELECTROSTATICS')
def test_dipolemoment(self):
obs = espressomd.observables.DipoleMoment(ids=self.system.part[:].id)
obs_data = obs.calculate()
part_data = self.system.part[:].q.dot(self.system.part[:].pos)
self.assertEqual(obs_data.shape, part_data.shape)
np.testing.assert_array_almost_equal(
obs_data, part_data, err_msg="Data did not agree for observable 'DipoleMoment'", decimal=9)
def test_com_force(self):
id_list = sorted(
np.random.choice(
self.system.part[:].id,
size=int(
self.N_PART * .9),
replace=False))
particles = self.system.part.select(
lambda p: p.id in id_list and not p.virtual)
np.testing.assert_allclose(
np.sum(particles.f, axis=0),
espressomd.observables.TotalForce(ids=id_list).calculate())
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/observables.py
|
Python
|
gpl-3.0
| 9,091
|
[
"ESPResSo"
] |
929f9da72c459be00783849081af79c12576d4425ff8bebe58f0d83ec14897f1
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: David
#
# Created: 13/03/2017
# Copyright: (c) David 2017
# Licence: <your licence>
#-------------------------------------------------------------------------------
import cv2
import numpy as np
import os
import sys
class Portrait:
"""A simple portrait class"""
originalImage = None
imageWithMargins = None
headerHeightPerc = 0.20
marginPerc = 0.02
m_font = cv2.FONT_HERSHEY_COMPLEX
m_thickness = 1
m_filtersize = 5
m_name = "Original"
def __init__ (self, imageLocation):
if not os.path.isfile(imageLocation):
raise IOError("File " + imageLocation + " not found")
self.originalImage = cv2.imread(imageLocation)
self.AddMargins()
self.WriteSecondLineText()
self.imageWithMargins = self.DoFilter(self.imageWithMargins)
if len(self.imageWithMargins.shape) is 2:
self.imageWithMargins = self.ToColor(self.imageWithMargins)
self.WriteFirstLineText()
def __del__(self):
pass
def AddMargins(self):
marginpx = int(self.originalImage.shape[1] * self.marginPerc)
headerpx = int(self.originalImage.shape[1] * self.headerHeightPerc)
self.imageWithMargins = np.zeros([self.originalImage.shape[0]+marginpx + headerpx,self.originalImage.shape[1]+2*marginpx,self.originalImage.shape[2]],dtype=np.uint8)
self.imageWithMargins[:, :, :] = 255
self.imageWithMargins[headerpx:headerpx+self.originalImage.shape[0], marginpx:marginpx+self.originalImage.shape[1]] = self.originalImage
def WriteFirstLineText(self):
headerpx = int(self.originalImage.shape[1] * self.headerHeightPerc)
fontsize = headerpx / 100
thickness = self.m_thickness
textsize = cv2.getTextSize(self.m_name, self.m_font, fontsize, thickness)
cv2.putText(self.imageWithMargins, self.m_name,
(self.imageWithMargins.shape[1] / 2 - textsize[0][0] / 2, headerpx / 3), self.m_font, fontsize,
(255, 255, 255), thickness+1)
cv2.putText(self.imageWithMargins, self.m_name,
(self.imageWithMargins.shape[1]/2-textsize[0][0]/2, headerpx / 3), self.m_font, fontsize,
(0, 0, 0), thickness)
def WriteSecondLineText(self):
headerpx = int(self.originalImage.shape[1] * self.headerHeightPerc)
fontsize = headerpx / 100
thickness = self.m_thickness
textsize = cv2.getTextSize(self.m_name, self.m_font, fontsize, thickness)
cv2.putText(self.imageWithMargins, self.m_name,
(self.imageWithMargins.shape[1] / 2 - textsize[0][0] / 2, headerpx/2 + headerpx / 3), self.m_font, fontsize,
(255, 255, 255), thickness+1)
cv2.putText(self.imageWithMargins, self.m_name,
(self.imageWithMargins.shape[1]/2-textsize[0][0]/2, headerpx/2 + headerpx / 3), self.m_font, fontsize,
(0, 0, 0), thickness)
def DoFilter(self, image):
"""Returns filtered image"""
return image
def GetImage(self):
return self.imageWithMargins
def ToGray(self, image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def Blur(self, image):
return cv2.blur(image,(self.m_filtersize,self.m_filtersize))
def ToColor(self, image):
return cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
def Theshold(self, image):
startThreshold = 10;
if len(image.shape) is 3:
image = self.ToGray(image)
multiplier = 1
mean = 0
while mean < 128:
ret, threshImage = cv2.threshold(image, int(255 - startThreshold * multiplier), 255, cv2.THRESH_BINARY)
mean = np.mean(threshImage)
multiplier += 1
if startThreshold * multiplier > 255:
break;
image = threshImage
return image
class SobelXFilter(Portrait):
m_name = "SobelX"
def DoFilter(self, image):
image = self.ToGray(image)
image = self.Blur(image)
image = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize = self.m_filtersize)
image = np.absolute(image)
max = np.max( image )
image = np.uint8(image*255/max)
image = self.ToColor(image)
return image
class SobelYFilter(Portrait):
m_name = "SobelY"
def DoFilter(self, image):
image = self.ToGray(image)
image = self.Blur(image)
image = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize = self.m_filtersize)
image = np.absolute(image)
max = np.max( image )
image = np.uint8(image*255/max)
image = self.ToColor(image)
return image
class SobelFilter(Portrait):
m_name = "Sobel"
def DoFilter(self, image):
image = self.ToGray(image)
image = self.Blur(image)
imageX = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize = self.m_filtersize)
imageX = np.absolute(imageX)
imageY = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize = self.m_filtersize)
imageY = np.absolute(imageY)
image = imageX + imageY
max = np.max(image)
image = np.uint8(image*255/max)
image = self.ToColor(image)
return image
class LaPlacianFilter(Portrait):
m_name = "Laplacian"
def DoFilter(self, image):
image = self.ToGray(image)
image = self.Blur(image)
image = cv2.Laplacian(image,cv2.CV_64F,ksize = self.m_filtersize)
image = np.absolute(image)
max = np.max(image)
image = np.uint8(image*255/max)
image = self.ToColor(image)
return image
class CannyEdgeFilter(Portrait):
m_name = "Canny Edge"
def DoFilter(self, image):
minVal = 25
maxVal = 100
expectedmean = 13 #is used to let canny produce enough edges
image = self.ToGray(image)
image = self.Blur(image)
#optimize canny
mean = 0
multiplier = 255/maxVal
imageCan = None
while mean < expectedmean:
imageCan = cv2.Canny(image, int(minVal * multiplier), int(maxVal * multiplier))
mean = np.mean(imageCan)
multiplier -= 0.5
#print "mean: " + str(mean)
#print "minVal: " + str(minVal * multiplier)
#cv2.imshow("test",imageCan)
#cv2.waitKey()
if minVal * multiplier <= 0:
break
image = imageCan
image = self.ToColor(image)
return image
class BlurFilter(Portrait):
m_name = "Blur"
def DoFilter(self, image):
return cv2.blur(image,(self.m_filtersize*3,self.m_filtersize*3))
class GaussianBlurFilter(Portrait):
m_name = "Gaussian Blur"
def DoFilter(self, image):
return cv2.GaussianBlur(image,(self.m_filtersize*7,self.m_filtersize*7),0)
class ThresholdFilter(Portrait):
m_name = "Threshold"
def DoFilter(self, image):
return self.Theshold(image)
class ErodeFilter(Portrait):
m_name = "Erode"
def DoFilter(self, image):
image = self.Theshold(image)
size = 5
image = cv2.erode(image, (self.m_filtersize * size, self.m_filtersize * size))
return cv2.erode(image,(self.m_filtersize*size,self.m_filtersize*size))
class DilateFilter(Portrait):
m_name = "Dilate"
def DoFilter(self, image):
image = self.Theshold(image)
size = 5
image = cv2.dilate(image, (self.m_filtersize * size, self.m_filtersize * size))
return cv2.dilate(image,(self.m_filtersize*size,self.m_filtersize*size))
|
davidvdp/CVFilterOverview
|
Portrait.py
|
Python
|
apache-2.0
| 7,688
|
[
"Gaussian"
] |
5a1302be39c7ce0b8643f71867cc2e6d91fc11b3fbf0759ffd374ef85eaf5b64
|
"""
View for Courseware Index
"""
# pylint: disable=attribute-defined-outside-init
from datetime import datetime
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.http import Http404
from django.utils.decorators import method_decorator
from django.utils.timezone import UTC
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import View
from django.shortcuts import redirect
from courseware.url_helpers import get_redirect_url_for_global_staff
from edxmako.shortcuts import render_to_response, render_to_string
import logging
import newrelic.agent
import urllib
from xblock.fragment import Fragment
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference
from shoppingcart.models import CourseRegistrationCode
from student.models import CourseEnrollment
from student.views import is_course_blocked
from student.roles import GlobalStaff
from util.views import ensure_valid_course_key
from xmodule.modulestore.django import modulestore
from xmodule.x_module import STUDENT_VIEW
from survey.utils import must_answer_survey
from ..access import has_access, _adjust_start_date_for_beta_testers
from ..access_utils import in_preview_mode
from ..courses import get_studio_url, get_course_with_access
from ..entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
get_entrance_exam_score,
user_has_passed_entrance_exam,
user_must_complete_entrance_exam,
)
from ..exceptions import Redirect
from ..masquerade import setup_masquerade
from ..model_data import FieldDataCache
from ..module_render import toc_for_course, get_module_for_descriptor
from .views import get_current_child, registered_for_course
from attendance.views import track_attendance
log = logging.getLogger("edx.courseware.views.index")
TEMPLATE_IMPORTS = {'urllib': urllib}
CONTENT_DEPTH = 2
class CoursewareIndex(View):
"""
View class for the Courseware page.
"""
@method_decorator(login_required)
@method_decorator(ensure_csrf_cookie)
@method_decorator(cache_control(no_cache=True, no_store=True, must_revalidate=True))
@method_decorator(ensure_valid_course_key)
def get(self, request, course_id, chapter=None, section=None, position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right
chapter.
If neither chapter or section are specified, displays the user's most
recent chapter, or the first chapter if this is the user's first visit.
Arguments:
request: HTTP request
course_id (unicode): course id
chapter (unicode): chapter url_name
section (unicode): section url_name
position (unicode): position in module, eg of <sequential> module
"""
self.course_key = CourseKey.from_string(course_id)
self.request = request
self.original_chapter_url_name = chapter
self.original_section_url_name = section
self.chapter_url_name = chapter
self.section_url_name = section
self.position = position
self.chapter, self.section = None, None
self.url = request.path
track_attendance(request)
try:
self._init_new_relic()
self._clean_position()
with modulestore().bulk_operations(self.course_key):
self.course = get_course_with_access(request.user, 'load', self.course_key, depth=CONTENT_DEPTH)
self.is_staff = has_access(request.user, 'staff', self.course)
self._setup_masquerade_for_effective_user()
return self._get()
except Redirect as redirect_error:
return redirect(redirect_error.url)
except UnicodeEncodeError:
raise Http404("URL contains Unicode characters")
except Http404:
# let it propagate
raise
except Exception: # pylint: disable=broad-except
return self._handle_unexpected_error()
def _setup_masquerade_for_effective_user(self):
"""
Setup the masquerade information to allow the request to
be processed for the requested effective user.
"""
self.real_user = self.request.user
self.masquerade, self.effective_user = setup_masquerade(
self.request,
self.course_key,
self.is_staff,
reset_masquerade_data=True
)
# Set the user in the request to the effective user.
self.request.user = self.effective_user
def _get(self):
"""
Render the index page.
"""
self._redirect_if_needed_to_access_course()
self._prefetch_and_bind_course()
if self.course.has_children_at_depth(CONTENT_DEPTH):
self._reset_section_to_exam_if_required()
self.chapter = self._find_chapter()
self.section = self._find_section()
if self.chapter and self.section:
self._redirect_if_not_requested_section()
self._save_positions()
self._prefetch_and_bind_section()
return render_to_response('courseware/courseware.html', self._create_courseware_context())
def _redirect_if_not_requested_section(self):
"""
If the resulting section and chapter are different from what was initially
requested, redirect back to the index page, but with an updated URL that includes
the correct section and chapter values. We do this so that our analytics events
and error logs have the appropriate URLs.
"""
if (
self.chapter.url_name != self.original_chapter_url_name or
(self.original_section_url_name and self.section.url_name != self.original_section_url_name)
):
raise Redirect(
reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course_key),
'chapter': self.chapter.url_name,
'section': self.section.url_name,
},
)
)
def _init_new_relic(self):
"""
Initialize metrics for New Relic so we can slice data in New Relic Insights
"""
newrelic.agent.add_custom_parameter('course_id', unicode(self.course_key))
newrelic.agent.add_custom_parameter('org', unicode(self.course_key.org))
def _clean_position(self):
"""
Verify that the given position is an integer. If it is not positive, set it to 1.
"""
if self.position is not None:
try:
self.position = max(int(self.position), 1)
except ValueError:
raise Http404(u"Position {} is not an integer!".format(self.position))
def _redirect_if_needed_to_access_course(self):
"""
Verifies that the user can enter the course.
"""
self._redirect_if_needed_to_pay_for_course()
self._redirect_if_needed_to_register()
self._redirect_if_needed_for_prereqs()
self._redirect_if_needed_for_course_survey()
def _redirect_if_needed_to_pay_for_course(self):
"""
Redirect to dashboard if the course is blocked due to non-payment.
"""
self.real_user = User.objects.prefetch_related("groups").get(id=self.real_user.id)
redeemed_registration_codes = CourseRegistrationCode.objects.filter(
course_id=self.course_key,
registrationcoderedemption__redeemed_by=self.real_user
)
if is_course_blocked(self.request, redeemed_registration_codes, self.course_key):
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
log.warning(
u'User %s cannot access the course %s because payment has not yet been received',
self.real_user,
unicode(self.course_key),
)
raise Redirect(reverse('dashboard'))
def _redirect_if_needed_to_register(self):
"""
Verify that the user is registered in the course.
"""
if not registered_for_course(self.course, self.effective_user):
log.debug(
u'User %s tried to view course %s but is not enrolled',
self.effective_user,
unicode(self.course.id)
)
user_is_global_staff = GlobalStaff().has_user(self.effective_user)
user_is_enrolled = CourseEnrollment.is_enrolled(self.effective_user, self.course_key)
if user_is_global_staff and not user_is_enrolled:
redirect_url = get_redirect_url_for_global_staff(self.course_key, _next=self.url)
raise Redirect(redirect_url)
raise Redirect(reverse('about_course', args=[unicode(self.course.id)]))
def _redirect_if_needed_for_prereqs(self):
"""
See if all pre-requisites (as per the milestones app feature) have been
fulfilled. Note that if the pre-requisite feature flag has been turned off
(default) then this check will always pass.
"""
if not has_access(self.effective_user, 'view_courseware_with_prerequisites', self.course):
# Prerequisites have not been fulfilled.
# Therefore redirect to the Dashboard.
log.info(
u'User %d tried to view course %s '
u'without fulfilling prerequisites',
self.effective_user.id, unicode(self.course.id))
raise Redirect(reverse('dashboard'))
def _redirect_if_needed_for_course_survey(self):
"""
Check to see if there is a required survey that must be taken before
the user can access the course.
"""
if must_answer_survey(self.course, self.effective_user):
raise Redirect(reverse('course_survey', args=[unicode(self.course.id)]))
def _reset_section_to_exam_if_required(self):
"""
Check to see if an Entrance Exam is required for the user.
"""
if (
course_has_entrance_exam(self.course) and
user_must_complete_entrance_exam(self.request, self.effective_user, self.course)
):
exam_chapter = get_entrance_exam_content(self.effective_user, self.course)
if exam_chapter and exam_chapter.get_children():
exam_section = exam_chapter.get_children()[0]
if exam_section:
self.chapter_url_name = exam_chapter.url_name
self.section_url_name = exam_section.url_name
def _get_language_preference(self):
"""
Returns the preferred language for the actual user making the request.
"""
language_preference = get_user_preference(self.real_user, LANGUAGE_KEY)
if not language_preference:
language_preference = settings.LANGUAGE_CODE
return language_preference
def _is_masquerading_as_student(self):
"""
Returns whether the current request is masquerading as a student.
"""
return self.masquerade and self.masquerade.role == 'student'
def _is_masquerading_as_specific_student(self):
"""
Returns whether the current request is masqueurading as a specific student.
"""
return self._is_masquerading_as_student() and self.masquerade.user_name
def _find_block(self, parent, url_name, block_type, min_depth=None):
"""
Finds the block in the parent with the specified url_name.
If not found, calls get_current_child on the parent.
"""
child = None
if url_name:
child = parent.get_child_by(lambda m: m.location.name == url_name)
if not child:
# User may be trying to access a child that isn't live yet
if not self._is_masquerading_as_student():
raise Http404('No {block_type} found with name {url_name}'.format(
block_type=block_type,
url_name=url_name,
))
elif min_depth and not child.has_children_at_depth(min_depth - 1):
child = None
if not child:
child = get_current_child(parent, min_depth=min_depth, requested_child=self.request.GET.get("child"))
return child
def _find_chapter(self):
"""
Finds the requested chapter.
"""
return self._find_block(self.course, self.chapter_url_name, 'chapter', CONTENT_DEPTH - 1)
def _find_section(self):
"""
Finds the requested section.
"""
if self.chapter:
return self._find_block(self.chapter, self.section_url_name, 'section')
def _prefetch_and_bind_course(self):
"""
Prefetches all descendant data for the requested section and
sets up the runtime, which binds the request user to the section.
"""
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course_key, self.effective_user, self.course, depth=CONTENT_DEPTH,
)
self.course = get_module_for_descriptor(
self.effective_user,
self.request,
self.course,
self.field_data_cache,
self.course_key,
course=self.course,
)
def _prefetch_and_bind_section(self):
"""
Prefetches all descendant data for the requested section and
sets up the runtime, which binds the request user to the section.
"""
# Pre-fetch all descendant data
self.section = modulestore().get_item(self.section.location, depth=None, lazy=False)
self.field_data_cache.add_descriptor_descendents(self.section, depth=None)
# Bind section to user
self.section = get_module_for_descriptor(
self.effective_user,
self.request,
self.section,
self.field_data_cache,
self.course_key,
self.position,
course=self.course,
)
def _save_positions(self):
"""
Save where we are in the course and chapter.
"""
save_child_position(self.course, self.chapter_url_name)
save_child_position(self.chapter, self.section_url_name)
def _create_courseware_context(self):
"""
Returns and creates the rendering context for the courseware.
Also returns the table of contents for the courseware.
"""
courseware_context = {
'csrf': csrf(self.request)['csrf_token'],
'COURSE_TITLE': self.course.display_name_with_default_escaped,
'course': self.course,
'init': '',
'fragment': Fragment(),
'staff_access': self.is_staff,
'studio_url': get_studio_url(self.course, 'course'),
'masquerade': self.masquerade,
'real_user': self.real_user,
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
'bookmarks_api_url': reverse('bookmarks'),
'language_preference': self._get_language_preference(),
'disable_optimizely': True,
}
table_of_contents = toc_for_course(
self.effective_user,
self.request,
self.course,
self.chapter_url_name,
self.section_url_name,
self.field_data_cache,
)
courseware_context['accordion'] = render_accordion(
self.request,
self.course,
table_of_contents['chapters'],
)
# entrance exam data
if course_has_entrance_exam(self.course):
if getattr(self.chapter, 'is_entrance_exam', False):
courseware_context['entrance_exam_current_score'] = get_entrance_exam_score(self.request, self.course)
courseware_context['entrance_exam_passed'] = user_has_passed_entrance_exam(self.request, self.course)
# staff masquerading data
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(self.effective_user, self.course, self.course_key)
if not in_preview_mode() and self.is_staff and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
courseware_context['disable_student_access'] = True
if self.section:
# chromeless data
if self.section.chrome:
chrome = [s.strip() for s in self.section.chrome.lower().split(",")]
if 'accordion' not in chrome:
courseware_context['disable_accordion'] = True
if 'tabs' not in chrome:
courseware_context['disable_tabs'] = True
# default tab
if self.section.default_tab:
courseware_context['default_tab'] = self.section.default_tab
# section data
courseware_context['section_title'] = self.section.display_name_with_default_escaped
section_context = self._create_section_context(
table_of_contents['previous_of_active_section'],
table_of_contents['next_of_active_section'],
)
courseware_context['fragment'] = self.section.render(STUDENT_VIEW, section_context)
return courseware_context
def _create_section_context(self, previous_of_active_section, next_of_active_section):
"""
Returns and creates the rendering context for the section.
"""
def _compute_section_url(section_info, requested_child):
"""
Returns the section URL for the given section_info with the given child parameter.
"""
return "{url}?child={requested_child}".format(
url=reverse(
'courseware_section',
args=[unicode(self.course_key), section_info['chapter_url_name'], section_info['url_name']],
),
requested_child=requested_child,
)
section_context = {
'activate_block_id': self.request.GET.get('activate_block_id'),
'requested_child': self.request.GET.get("child"),
'progress_url': reverse('progress', kwargs={'course_id': unicode(self.course_key)}),
}
if previous_of_active_section:
section_context['prev_url'] = _compute_section_url(previous_of_active_section, 'last')
if next_of_active_section:
section_context['next_url'] = _compute_section_url(next_of_active_section, 'first')
# sections can hide data that masquerading staff should see when debugging issues with specific students
section_context['specific_masquerade'] = self._is_masquerading_as_specific_student()
return section_context
def _handle_unexpected_error(self):
"""
Handle unexpected exceptions raised by View.
"""
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
log.exception(
u"Error in index view: user=%s, effective_user=%s, course=%s, chapter=%s section=%s position=%s",
self.real_user,
self.effective_user,
unicode(self.course_key),
self.chapter_url_name,
self.section_url_name,
self.position,
)
try:
return render_to_response('courseware/courseware-error.html', {
'staff_access': self.is_staff,
'course': self.course
})
except:
# Let the exception propagate, relying on global config to
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
def render_accordion(request, course, table_of_contents):
"""
Returns the HTML that renders the navigation for the given course.
Expects the table_of_contents to have data on each chapter and section,
including which ones are active.
"""
context = dict(
[
('toc', table_of_contents),
('course_id', unicode(course.id)),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format),
] + TEMPLATE_IMPORTS.items()
)
return render_to_string('courseware/accordion.html', context)
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, child in enumerate(seq_module.get_display_items(), start=1):
if child.location.name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def save_positions_recursively_up(user, request, field_data_cache, xmodule, course=None):
"""
Recurses up the course tree starting from a leaf
Saving the position property based on the previous node as it goes
"""
current_module = xmodule
while current_module:
parent_location = modulestore().get_parent_location(current_module.location)
parent = None
if parent_location:
parent_descriptor = modulestore().get_item(parent_location)
parent = get_module_for_descriptor(
user,
request,
parent_descriptor,
field_data_cache,
current_module.location.course_key,
course=course
)
if parent and hasattr(parent, 'position'):
save_child_position(parent, current_module.location.name)
current_module = parent
|
synergeticsedx/deployment-wipro
|
lms/djangoapps/courseware/views/index.py
|
Python
|
agpl-3.0
| 22,684
|
[
"VisIt"
] |
b298f10cbb65d8281cdc02c61fbead7166d5f75ca1bfbdb8df996befbbdfefd8
|
"""Options manager for :class:`Poly` and public API functions. """
from __future__ import print_function, division
__all__ = ["Options"]
from sympy.core import S, Basic, sympify
from sympy.core.compatibility import string_types, with_metaclass
from sympy.utilities import numbered_symbols, topological_sort, public
from sympy.utilities.iterables import has_dups
from sympy.polys.polyerrors import GeneratorsError, OptionError, FlagError
import sympy.polys
import re
class Option(object):
"""Base class for all kinds of options. """
option = None
is_Flag = False
requires = []
excludes = []
after = []
before = []
@classmethod
def default(cls):
return None
@classmethod
def preprocess(cls, option):
return None
@classmethod
def postprocess(cls, options):
pass
class Flag(Option):
"""Base class for all kinds of flags. """
is_Flag = True
class BooleanOption(Option):
"""An option that must have a boolean value or equivalent assigned. """
@classmethod
def preprocess(cls, value):
if value in [True, False]:
return bool(value)
else:
raise OptionError("'%s' must have a boolean value assigned, got %s" % (cls.option, value))
class OptionType(type):
"""Base type for all options that does registers options. """
def __init__(cls, *args, **kwargs):
@property
def getter(self):
try:
return self[cls.option]
except KeyError:
return cls.default()
setattr(Options, cls.option, getter)
Options.__options__[cls.option] = cls
@public
class Options(dict):
"""
Options manager for polynomial manipulation module.
Examples
========
>>> from sympy.polys.polyoptions import Options
>>> from sympy.polys.polyoptions import build_options
>>> from sympy.abc import x, y, z
>>> Options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
>>> build_options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
**Options**
* Expand --- boolean option
* Gens --- option
* Wrt --- option
* Sort --- option
* Order --- option
* Field --- boolean option
* Greedy --- boolean option
* Domain --- option
* Split --- boolean option
* Gaussian --- boolean option
* Extension --- option
* Modulus --- option
* Symmetric --- boolean option
* Strict --- boolean option
**Flags**
* Auto --- boolean flag
* Frac --- boolean flag
* Formal --- boolean flag
* Polys --- boolean flag
* Include --- boolean flag
* All --- boolean flag
* Gen --- flag
* Series --- boolean flag
"""
__order__ = None
__options__ = {}
def __init__(self, gens, args, flags=None, strict=False):
dict.__init__(self)
if gens and args.get('gens', ()):
raise OptionError(
"both '*gens' and keyword argument 'gens' supplied")
elif gens:
args = dict(args)
args['gens'] = gens
defaults = args.pop('defaults', {})
def preprocess_options(args):
for option, value in args.items():
try:
cls = self.__options__[option]
except KeyError:
raise OptionError("'%s' is not a valid option" % option)
if issubclass(cls, Flag):
if flags is None or option not in flags:
if strict:
raise OptionError("'%s' flag is not allowed in this context" % option)
if value is not None:
self[option] = cls.preprocess(value)
preprocess_options(args)
for key, value in dict(defaults).items():
if key in self:
del defaults[key]
else:
for option in self.keys():
cls = self.__options__[option]
if key in cls.excludes:
del defaults[key]
break
preprocess_options(defaults)
for option in self.keys():
cls = self.__options__[option]
for require_option in cls.requires:
if self.get(require_option) is None:
raise OptionError("'%s' option is only allowed together with '%s'" % (option, require_option))
for exclude_option in cls.excludes:
if self.get(exclude_option) is not None:
raise OptionError("'%s' option is not allowed together with '%s'" % (option, exclude_option))
for option in self.__order__:
self.__options__[option].postprocess(self)
@classmethod
def _init_dependencies_order(cls):
"""Resolve the order of options' processing. """
if cls.__order__ is None:
vertices, edges = [], set([])
for name, option in cls.__options__.items():
vertices.append(name)
for _name in option.after:
edges.add((_name, name))
for _name in option.before:
edges.add((name, _name))
try:
cls.__order__ = topological_sort((vertices, list(edges)))
except ValueError:
raise RuntimeError(
"cycle detected in sympy.polys options framework")
def clone(self, updates={}):
"""Clone ``self`` and update specified options. """
obj = dict.__new__(self.__class__)
for option, value in self.items():
obj[option] = value
for option, value in updates.items():
obj[option] = value
return obj
def __setattr__(self, attr, value):
if attr in self.__options__:
self[attr] = value
else:
super(Options, self).__setattr__(attr, value)
@property
def args(self):
args = {}
for option, value in self.items():
if value is not None and option != 'gens':
cls = self.__options__[option]
if not issubclass(cls, Flag):
args[option] = value
return args
@property
def options(self):
options = {}
for option, cls in self.__options__.items():
if not issubclass(cls, Flag):
options[option] = getattr(self, option)
return options
@property
def flags(self):
flags = {}
for option, cls in self.__options__.items():
if issubclass(cls, Flag):
flags[option] = getattr(self, option)
return flags
class Expand(with_metaclass(OptionType, BooleanOption)):
"""``expand`` option to polynomial manipulation functions. """
option = 'expand'
requires = []
excludes = []
@classmethod
def default(cls):
return True
class Gens(with_metaclass(OptionType, Option)):
"""``gens`` option to polynomial manipulation functions. """
option = 'gens'
requires = []
excludes = []
@classmethod
def default(cls):
return ()
@classmethod
def preprocess(cls, gens):
if isinstance(gens, Basic):
gens = (gens,)
elif len(gens) == 1 and hasattr(gens[0], '__iter__'):
gens = gens[0]
if gens == (None,):
gens = ()
elif has_dups(gens):
raise GeneratorsError("duplicated generators: %s" % str(gens))
elif any(gen.is_commutative is False for gen in gens):
raise GeneratorsError("non-commutative generators: %s" % str(gens))
return tuple(gens)
class Wrt(with_metaclass(OptionType, Option)):
"""``wrt`` option to polynomial manipulation functions. """
option = 'wrt'
requires = []
excludes = []
_re_split = re.compile(r"\s*,\s*|\s+")
@classmethod
def preprocess(cls, wrt):
if isinstance(wrt, Basic):
return [str(wrt)]
elif isinstance(wrt, str):
wrt = wrt.strip()
if wrt.endswith(','):
raise OptionError('Bad input: missing parameter.')
if not wrt:
return []
return [ gen for gen in cls._re_split.split(wrt) ]
elif hasattr(wrt, '__getitem__'):
return list(map(str, wrt))
else:
raise OptionError("invalid argument for 'wrt' option")
class Sort(with_metaclass(OptionType, Option)):
"""``sort`` option to polynomial manipulation functions. """
option = 'sort'
requires = []
excludes = []
@classmethod
def default(cls):
return []
@classmethod
def preprocess(cls, sort):
if isinstance(sort, str):
return [ gen.strip() for gen in sort.split('>') ]
elif hasattr(sort, '__getitem__'):
return list(map(str, sort))
else:
raise OptionError("invalid argument for 'sort' option")
class Order(with_metaclass(OptionType, Option)):
"""``order`` option to polynomial manipulation functions. """
option = 'order'
requires = []
excludes = []
@classmethod
def default(cls):
return sympy.polys.orderings.lex
@classmethod
def preprocess(cls, order):
return sympy.polys.orderings.monomial_key(order)
class Field(with_metaclass(OptionType, BooleanOption)):
"""``field`` option to polynomial manipulation functions. """
option = 'field'
requires = []
excludes = ['domain', 'split', 'gaussian']
class Greedy(with_metaclass(OptionType, BooleanOption)):
"""``greedy`` option to polynomial manipulation functions. """
option = 'greedy'
requires = []
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric']
class Composite(with_metaclass(OptionType, BooleanOption)):
"""``composite`` option to polynomial manipulation functions. """
option = 'composite'
@classmethod
def default(cls):
return None
requires = []
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric']
class Domain(with_metaclass(OptionType, Option)):
"""``domain`` option to polynomial manipulation functions. """
option = 'domain'
requires = []
excludes = ['field', 'greedy', 'split', 'gaussian', 'extension']
after = ['gens']
_re_realfield = re.compile(r"^(R|RR)(_(\d+))?$")
_re_complexfield = re.compile(r"^(C|CC)(_(\d+))?$")
_re_finitefield = re.compile(r"^(FF|GF)\((\d+)\)$")
_re_polynomial = re.compile(r"^(Z|ZZ|Q|QQ)\[(.+)\]$")
_re_fraction = re.compile(r"^(Z|ZZ|Q|QQ)\((.+)\)$")
_re_algebraic = re.compile(r"^(Q|QQ)\<(.+)\>$")
@classmethod
def preprocess(cls, domain):
if isinstance(domain, sympy.polys.domains.Domain):
return domain
elif hasattr(domain, 'to_domain'):
return domain.to_domain()
elif isinstance(domain, string_types):
if domain in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ
if domain in ['Q', 'QQ']:
return sympy.polys.domains.QQ
if domain == 'EX':
return sympy.polys.domains.EX
r = cls._re_realfield.match(domain)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return sympy.polys.domains.RR
else:
return sympy.polys.domains.RealField(int(prec))
r = cls._re_complexfield.match(domain)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return sympy.polys.domains.CC
else:
return sympy.polys.domains.ComplexField(int(prec))
r = cls._re_finitefield.match(domain)
if r is not None:
return sympy.polys.domains.FF(int(r.groups()[1]))
r = cls._re_polynomial.match(domain)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ.poly_ring(*gens)
else:
return sympy.polys.domains.QQ.poly_ring(*gens)
r = cls._re_fraction.match(domain)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ.frac_field(*gens)
else:
return sympy.polys.domains.QQ.frac_field(*gens)
r = cls._re_algebraic.match(domain)
if r is not None:
gens = list(map(sympify, r.groups()[1].split(',')))
return sympy.polys.domains.QQ.algebraic_field(*gens)
raise OptionError('expected a valid domain specification, got %s' % domain)
@classmethod
def postprocess(cls, options):
if 'gens' in options and 'domain' in options and options['domain'].is_Composite and \
(set(options['domain'].symbols) & set(options['gens'])):
raise GeneratorsError(
"ground domain and generators interfere together")
elif ('gens' not in options or not options['gens']) and \
'domain' in options and options['domain'] == sympy.polys.domains.EX:
raise GeneratorsError("you have to provide generators because EX domain was requested")
class Split(with_metaclass(OptionType, BooleanOption)):
"""``split`` option to polynomial manipulation functions. """
option = 'split'
requires = []
excludes = ['field', 'greedy', 'domain', 'gaussian', 'extension',
'modulus', 'symmetric']
@classmethod
def postprocess(cls, options):
if 'split' in options:
raise NotImplementedError("'split' option is not implemented yet")
class Gaussian(with_metaclass(OptionType, BooleanOption)):
"""``gaussian`` option to polynomial manipulation functions. """
option = 'gaussian'
requires = []
excludes = ['field', 'greedy', 'domain', 'split', 'extension',
'modulus', 'symmetric']
@classmethod
def postprocess(cls, options):
if 'gaussian' in options and options['gaussian'] is True:
options['extension'] = set([S.ImaginaryUnit])
Extension.postprocess(options)
class Extension(with_metaclass(OptionType, Option)):
"""``extension`` option to polynomial manipulation functions. """
option = 'extension'
requires = []
excludes = ['greedy', 'domain', 'split', 'gaussian', 'modulus',
'symmetric']
@classmethod
def preprocess(cls, extension):
if extension == 1:
return bool(extension)
elif extension == 0:
raise OptionError("'False' is an invalid argument for 'extension'")
else:
if not hasattr(extension, '__iter__'):
extension = set([extension])
else:
if not extension:
extension = None
else:
extension = set(extension)
return extension
@classmethod
def postprocess(cls, options):
if 'extension' in options and options['extension'] is not True:
options['domain'] = sympy.polys.domains.QQ.algebraic_field(
*options['extension'])
class Modulus(with_metaclass(OptionType, Option)):
"""``modulus`` option to polynomial manipulation functions. """
option = 'modulus'
requires = []
excludes = ['greedy', 'split', 'domain', 'gaussian', 'extension']
@classmethod
def preprocess(cls, modulus):
modulus = sympify(modulus)
if modulus.is_Integer and modulus > 0:
return int(modulus)
else:
raise OptionError(
"'modulus' must a positive integer, got %s" % modulus)
@classmethod
def postprocess(cls, options):
if 'modulus' in options:
modulus = options['modulus']
symmetric = options.get('symmetric', True)
options['domain'] = sympy.polys.domains.FF(modulus, symmetric)
class Symmetric(with_metaclass(OptionType, BooleanOption)):
"""``symmetric`` option to polynomial manipulation functions. """
option = 'symmetric'
requires = ['modulus']
excludes = ['greedy', 'domain', 'split', 'gaussian', 'extension']
class Strict(with_metaclass(OptionType, BooleanOption)):
"""``strict`` option to polynomial manipulation functions. """
option = 'strict'
@classmethod
def default(cls):
return True
class Auto(with_metaclass(OptionType, BooleanOption, Flag)):
"""``auto`` flag to polynomial manipulation functions. """
option = 'auto'
after = ['field', 'domain', 'extension', 'gaussian']
@classmethod
def default(cls):
return True
@classmethod
def postprocess(cls, options):
if ('domain' in options or 'field' in options) and 'auto' not in options:
options['auto'] = False
class Frac(with_metaclass(OptionType, BooleanOption, Flag)):
"""``auto`` option to polynomial manipulation functions. """
option = 'frac'
@classmethod
def default(cls):
return False
class Formal(with_metaclass(OptionType, BooleanOption, Flag)):
"""``formal`` flag to polynomial manipulation functions. """
option = 'formal'
@classmethod
def default(cls):
return False
class Polys(with_metaclass(OptionType, BooleanOption, Flag)):
"""``polys`` flag to polynomial manipulation functions. """
option = 'polys'
class Include(with_metaclass(OptionType, BooleanOption, Flag)):
"""``include`` flag to polynomial manipulation functions. """
option = 'include'
@classmethod
def default(cls):
return False
class All(with_metaclass(OptionType, BooleanOption, Flag)):
"""``all`` flag to polynomial manipulation functions. """
option = 'all'
@classmethod
def default(cls):
return False
class Gen(with_metaclass(OptionType, Flag)):
"""``gen`` flag to polynomial manipulation functions. """
option = 'gen'
@classmethod
def default(cls):
return 0
@classmethod
def preprocess(cls, gen):
if isinstance(gen, (Basic, int)):
return gen
else:
raise OptionError("invalid argument for 'gen' option")
class Series(with_metaclass(OptionType, BooleanOption, Flag)):
"""``series`` flag to polynomial manipulation functions. """
option = 'series'
@classmethod
def default(cls):
return False
class Symbols(with_metaclass(OptionType, Flag)):
"""``symbols`` flag to polynomial manipulation functions. """
option = 'symbols'
@classmethod
def default(cls):
return numbered_symbols('s', start=1)
@classmethod
def preprocess(cls, symbols):
if hasattr(symbols, '__iter__'):
return iter(symbols)
else:
raise OptionError("expected an iterator or iterable container, got %s" % symbols)
class Method(with_metaclass(OptionType, Flag)):
"""``method`` flag to polynomial manipulation functions. """
option = 'method'
@classmethod
def preprocess(cls, method):
if isinstance(method, str):
return method.lower()
else:
raise OptionError("expected a string, got %s" % method)
def build_options(gens, args=None):
"""Construct options from keyword arguments or ... options. """
if args is None:
gens, args = (), gens
if len(args) != 1 or 'opt' not in args or gens:
return Options(gens, args)
else:
return args['opt']
def allowed_flags(args, flags):
"""
Allow specified flags to be used in the given context.
Examples
========
>>> from sympy.polys.polyoptions import allowed_flags
>>> from sympy.polys.domains import ZZ
>>> allowed_flags({'domain': ZZ}, [])
>>> allowed_flags({'domain': ZZ, 'frac': True}, [])
Traceback (most recent call last):
...
FlagError: 'frac' flag is not allowed in this context
>>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])
"""
flags = set(flags)
for arg in args.keys():
try:
if Options.__options__[arg].is_Flag and not arg in flags:
raise FlagError(
"'%s' flag is not allowed in this context" % arg)
except KeyError:
raise OptionError("'%s' is not a valid option" % arg)
def set_defaults(options, **defaults):
"""Update options with default values. """
if 'defaults' not in options:
options = dict(options)
options['defaults'] = defaults
return options
Options._init_dependencies_order()
|
wxgeo/geophar
|
wxgeometrie/sympy/polys/polyoptions.py
|
Python
|
gpl-2.0
| 21,096
|
[
"Gaussian"
] |
4e1c96f58d57abcc723c4807778e703941d190e930bf0c7efcba111675b59a96
|
# Include the Dropbox SDK libraries
from dropbox import client, rest, session
# Get your app key and secret from the Dropbox developer website
APP_KEY = #removed for security reasons
APP_SECRET = #removed for security reasons
# ACCESS_TYPE should be 'dropbox' or 'app_folder' as configured for your app
ACCESS_TYPE = 'app_folder'
sess = session.DropboxSession(APP_KEY, APP_SECRET, ACCESS_TYPE)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
# Make the user sign in and authorize this token
print "url:", url
print "Please visit this website and press the 'Allow' button, then hit 'Enter' here."
raw_input()
# This will fail if the user didn't visit the above URL and hit 'Allow'
access_token = sess.obtain_access_token(request_token)
#Okay, now we are ready to save the access_token
TOKENS = 'dropbox_token.txt'
token_file = open(TOKENS, 'w')
token_file.write("%s|%s" % (access_token.key,access_token.secret))
token_file.close()
print "you are now ready to use the token in your application"
|
levibostian/VSAS
|
docs/Risk assessment/dropbox_requestToken.py
|
Python
|
mit
| 1,044
|
[
"VisIt"
] |
020e19f1c522775dbabcb29bdc0b855ce0d406bcaa42f531c12ee11f7de363f3
|
import numpy as np
from .util import spatial_frequencies
import scipy.ndimage
from skimage.filters import gaussian
import matplotlib.pyplot as plt
import warnings
def MTF_a(k,a1,a2,a3,a4):
return (a1 - a2)/(1 + (k/(2*a3))**np.abs(a4)) + a2
def detect(img,sampling,dose=None,MTF_param=None,MTF_func=None,blur=None,resample=None):
if resample is not None:
if not isinstance(resample, (list, tuple)):
resample=(resample,)*2
zoom=(sampling[0]/resample[0],sampling[1]/resample[1])
sampling=resample
warnings.filterwarnings('ignore')
img = scipy.ndimage.interpolation.zoom(img, zoom)
warnings.filterwarnings('always')
if blur is not None:
img=gaussian(img,blur)
if dose is not None:
img = img/np.sum(img)*dose*np.product(sampling)*np.product(img.shape)
img[img<0]=0
#vals = len(np.unique(img))
#vals = 2**np.ceil(np.log2(vals))
#img = np.random.poisson(img * vals) / float(vals)
img = np.random.poisson(img).astype(np.int64)
if MTF_param is not None:
if MTF_func is None:
MTF_func=MTF_a
kx,ky,k2=spatial_frequencies(img.shape,sampling)
k=np.sqrt(k2)
mtf=MTF_func(k,*MTF_param)
img=np.fft.ifft2(np.fft.fft2(img)*np.sqrt(mtf))
img=(img.real+img.imag)/2
return img
|
jacobjma/PyQSTEM
|
pyqstem/detection.py
|
Python
|
gpl-3.0
| 1,407
|
[
"Gaussian"
] |
0821be9a87d6ac5581632ebbaa7e2cde080c902e09d17c176edbe674526a28dc
|
"""The WaveBlocks Project
Thie file contains code for a sparsity oracle looking
at the Gaussian integral of both packets.
@author: R. Bourquin
@copyright: Copyright (C) 2013, 2014 R. Bourquin
@license: Modified BSD License
"""
from WaveBlocksND.SparsityOracle import SparsityOracle
from WaveBlocksND.GaussianIntegral import GaussianIntegral
from WaveBlocksND.InhomogeneousInnerProduct import InhomogeneousInnerProduct
__all__ = ["SparsityOracleGIHAWP"]
class SparsityOracleGIHAWP(SparsityOracle):
r"""This class implements an oracle by looking at Gaussian integrals.
"""
def __init__(self, threshold=1e-8):
r"""Initialize an oracle for estimating if a specific overlap integral
:math:`\langle \Psi_k | O | \Psi_l \rangle` is approximately zero. The
oracle works by approximating :math:`\langle \Psi_k | \Psi_l \rangle`
with a Gaussian integral. If
.. math::
\langle \Psi_k | \Psi_l \rangle \approx \langle \Psi_k^G | \Psi_l^G \rangle \leq \tau
the value :math:`\langle \Psi_k | O | \Psi_l \rangle` is considered
to be zero. Of course this may fail depending on the form of the
operator :math:`O` or the basis shape :math:`\mathfrak{K}`.
.. warning::
This code is highly experimental.
:param threshold: The threshold :math:`\tau` in the Gaussian integral criterion.
The default value of :math:`10^{-8}` should be reasonable in most cases.
"""
self._threshold = threshold
self._ip = InhomogeneousInnerProduct(GaussianIntegral())
def is_not_zero(self, pacbra, packet, component=None):
r"""Try to estimate if the overlap integral :math:`\langle \Psi_k | \Psi_l \rangle`
is zero or at least negligible.
:param pacbra: The packet :math:`\Psi_k` that is used for the 'bra' part.
:param packet: The packet :math:`\Psi_l` that is used for the 'ket' part.
:param component: The component of the packet that is considered.
:return: ``True`` or ``False`` whether the inner product is negligible.
"""
Q = self._ip.quadrature(pacbra, packet, diag_component=component, summed=True)
return abs(abs(Q) > self._threshold)
|
WaveBlocks/WaveBlocksND
|
WaveBlocksND/SparsityOracleGIHAWP.py
|
Python
|
bsd-3-clause
| 2,260
|
[
"Gaussian"
] |
d832481eb96272f6d0949bbd811d90ce5a44d1059cafa378e3d0e0fcd00fbaa4
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Okinawa Institute of Science and Technology, Japan.
#
# This script runs on STEPS 2.x http://steps.sourceforge.net
#
# H Anwar, I Hepburn, H Nedelescu, W Chen and E De Schutter
# Stochastic calcium mechanisms cause dendritic calcium spike variability
# J Neuroscience 2013
#
# *HybridCaburst_stochCaP.py : A hybrid calcium burst model with stochastic P-type
# calcium channels and everything else deterministic.
#
# Script authors: Haroon Anwar and Iain Hepburn
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# USAGE
#
# $ python HybridCaburst_stochCaP.py *mesh* *root* *iter_n*
#
# *mesh* is the tetrahedral mesh (10um to 160um cylinder)
# *root* is the path to the location for data storage
# *iter_n* (is intened to be an integer) is an identifier number for each
# simulation iteration.
#
# E.g: python HybridCaburst_stochCaP.py Cylinder2_dia2um_L10um_outer0_3um_0.3shell_0.3size_19156tets_adaptive.inp ~/stochcasims/ 1
#
#
# OUTPUT
#
# In (root)/data/HybridCaburst_stochCaP/(mesh)/(iter_n+time) directory
# 3 data files will be recorded. Each file contains one row for every
# time-point at which data is recorded, organised into the following columns:
#
# currents.dat
# Time (ms), P-type current, T-type current, BK current, SK current
# (current units are Amps/m^2)
#
# voltage.dat
# Time (ms), voltage at mesh centre (mV)
#
# calcium.dat
# Time (ms), determinstic calcium concentration in submembrane (micromolar),
# stochastic calcium concentration in submembrane (micromolar),
# number of calcium ions in submembrane in deterministic solver,
# number of calcium ions in submembrane in stochastic solver.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
from __future__ import print_function
import math
import time
from random import *
import steps.model as smodel
import steps.geom as sgeom
import steps.rng as srng
import steps.utilities.meshio as meshio
import steps.solver as ssolver
import os
import meshes.gettets as gettets
from extra.constants import *
import extra.curr_funcs as cf
import sys
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
meshfile_ab, root, iter_n = sys.argv[1], sys.argv[2], sys.argv[3]
if meshfile_ab == 'Cylinder2_dia2um_L160um_outer0_0.3shell_0.3size_279152tets_adaptive.inp': cyl160=True
else: cyl160=False
########################### BIOCHEMICAL MODEL ###############################
# Two models required: Stochastic and deterministic
mdl_stoch = smodel.Model()
mdl_det = smodel.Model()
# Calcium
Ca_det = smodel.Spec('Ca_det', mdl_det)
Ca_det.setValence(2)
Ca_stoch = smodel.Spec('Ca_stoch', mdl_stoch)
Ca_stoch.setValence(2)
# Pump
Pump = smodel.Spec('Pump', mdl_det)
# CaPump
CaPump = smodel.Spec('CaPump', mdl_det)
# iCBsf
iCBsf = smodel.Spec('iCBsf', mdl_det)
# iCBsCa
iCBsCa = smodel.Spec('iCBsCa', mdl_det)
# iCBCaf
iCBCaf = smodel.Spec('iCBCaf', mdl_det)
# iCBCaCa
iCBCaCa = smodel.Spec('iCBCaCa', mdl_det)
# CBsf
CBsf = smodel.Spec('CBsf', mdl_det)
# CBsCa
CBsCa = smodel.Spec('CBsCa', mdl_det)
# CBCaf
CBCaf = smodel.Spec('CBCaf', mdl_det)
# CBCaCa
CBCaCa = smodel.Spec('CBCaCa', mdl_det)
# PV
PV = smodel.Spec('PV', mdl_det)
# PVMg
PVMg = smodel.Spec('PVMg', mdl_det)
# PVCa
PVCa = smodel.Spec('PVCa', mdl_det)
# Mg
Mg = smodel.Spec('Mg', mdl_det)
# Vol/surface systems
vsys_stoch = smodel.Volsys('vsys_stoch', mdl_stoch)
ssys_stoch = smodel.Surfsys('ssys_stoch', mdl_stoch)
vsys_det = smodel.Volsys('vsys_det', mdl_det)
ssys_det = smodel.Surfsys('ssys_det', mdl_det)
# Diffusions
diff_Ca = smodel.Diff('diff_Ca', vsys_det, Ca_det)
diff_Ca.setDcst(DCST)
diff_CBsf = smodel.Diff('diff_CBsf', vsys_det, CBsf)
diff_CBsf.setDcst(DCB)
diff_CBsCa = smodel.Diff('diff_CBsCa', vsys_det, CBsCa)
diff_CBsCa.setDcst(DCB)
diff_CBCaf = smodel.Diff('diff_CBCaf', vsys_det, CBCaf)
diff_CBCaf.setDcst(DCB)
diff_CBCaCa = smodel.Diff('diff_CBCaCa', vsys_det, CBCaCa)
diff_CBCaCa.setDcst(DCB)
diff_PV = smodel.Diff('diff_PV', vsys_det, PV)
diff_PV.setDcst(DPV)
diff_PVCa = smodel.Diff('diff_PVCa', vsys_det, PVCa)
diff_PVCa.setDcst(DPV)
diff_PVMg = smodel.Diff('diff_PVMg', vsys_det, PVMg)
diff_PVMg.setDcst(DPV)
#Pump
PumpD_f = smodel.SReac('PumpD_f', ssys_det, ilhs=[Ca_det], slhs=[Pump], srhs=[CaPump])
PumpD_f.setKcst(P_f_kcst)
PumpD_b = smodel.SReac('PumpD_b', ssys_det, slhs=[CaPump], irhs=[Ca_det], srhs=[Pump])
PumpD_b.setKcst(P_b_kcst)
PumpD_k = smodel.SReac('PumpD_k', ssys_det, slhs=[CaPump], srhs=[Pump])
PumpD_k.setKcst(P_k_kcst)
#iCBsf-fast
iCBsf1_f = smodel.Reac('iCBsf1_f', vsys_det, lhs=[Ca_det,iCBsf], rhs=[iCBsCa], kcst = iCBsf1_f_kcst)
iCBsf1_b = smodel.Reac('iCBsf1_b', vsys_det, lhs=[iCBsCa], rhs=[Ca_det, iCBsf], kcst = iCBsf1_b_kcst)
#iCBsCa
iCBsCa_f = smodel.Reac('iCBsCa_f', vsys_det, lhs=[Ca_det,iCBsCa], rhs=[iCBCaCa], kcst = iCBsCa_f_kcst)
iCBsCa_b = smodel.Reac('iCBsCa_b', vsys_det, lhs=[iCBCaCa], rhs=[Ca_det,iCBsCa], kcst = iCBsCa_b_kcst)
#iCBsf_slow
iCBsf2_f = smodel.Reac('iCBsf2_f', vsys_det, lhs=[Ca_det,iCBsf], rhs=[iCBCaf], kcst = iCBsf2_f_kcst)
iCBsf2_b = smodel.Reac('iCBsf2_b', vsys_det, lhs=[iCBCaf], rhs=[Ca_det,iCBsf], kcst = iCBsf2_b_kcst)
#iCBCaf
iCBCaf_f = smodel.Reac('iCBCaf_f', vsys_det, lhs=[Ca_det,iCBCaf], rhs=[iCBCaCa], kcst = iCBCaf_f_kcst)
iCBCaf_b = smodel.Reac('iCBCaf_b', vsys_det, lhs=[iCBCaCa], rhs=[Ca_det,iCBCaf], kcst = iCBCaf_b_kcst)
#CBsf-fast
CBsf1_f = smodel.Reac('CBsf1_f', vsys_det, lhs=[Ca_det,CBsf], rhs=[CBsCa], kcst = CBsf1_f_kcst)
CBsf1_b = smodel.Reac('CBsf1_b', vsys_det, lhs=[CBsCa], rhs=[Ca_det,CBsf], kcst = CBsf1_b_kcst)
#CBsCa
CBsCa_f = smodel.Reac('CBsCa_f', vsys_det, lhs=[Ca_det,CBsCa], rhs=[CBCaCa], kcst = CBsCa_f_kcst)
CBsCa_b = smodel.Reac('CBsCa_b', vsys_det, lhs=[CBCaCa], rhs=[Ca_det,CBsCa], kcst = CBsCa_b_kcst)
#CBsf_slow
CBsf2_f = smodel.Reac('CBsf2_f', vsys_det, lhs=[Ca_det,CBsf], rhs=[CBCaf], kcst = CBsf2_f_kcst)
CBsf2_b = smodel.Reac('CBsf2_b', vsys_det, lhs=[CBCaf], rhs=[Ca_det,CBsf], kcst = CBsf2_b_kcst)
#CBCaf
CBCaf_f = smodel.Reac('CBCaf_f', vsys_det, lhs=[Ca_det,CBCaf], rhs=[CBCaCa], kcst = CBCaf_f_kcst)
CBCaf_b = smodel.Reac('CBCaf_b', vsys_det, lhs=[CBCaCa], rhs=[Ca_det,CBCaf], kcst = CBCaf_b_kcst)
#PVca
PVca_f = smodel.Reac('PVca_f', vsys_det, lhs=[Ca_det,PV], rhs=[PVCa], kcst = PVca_f_kcst)
PVca_b = smodel.Reac('PVca_b', vsys_det, lhs=[PVCa], rhs=[Ca_det,PV], kcst = PVca_b_kcst)
#PVmg
PVmg_f = smodel.Reac('PVmg_f', vsys_det, lhs=[Mg,PV], rhs=[PVMg], kcst = PVmg_f_kcst)
PVmg_b = smodel.Reac('PVmg_b', vsys_det, lhs=[PVMg], rhs=[Mg,PV], kcst = PVmg_b_kcst)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # CHANNELS # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
###### CaP channel ##############
CaPchan = smodel.Chan('CaPchan', mdl_stoch)
CaP_m0 = smodel.ChanState('CaP_m0', mdl_stoch, CaPchan)
CaP_m1 = smodel.ChanState('CaP_m1', mdl_stoch, CaPchan)
CaP_m2 = smodel.ChanState('CaP_m2', mdl_stoch, CaPchan)
CaP_m3 = smodel.ChanState('CaP_m3', mdl_stoch, CaPchan)
CaPm0m1 = smodel.VDepSReac('CaPm0m1', ssys_stoch, slhs = [CaP_m0], srhs = [CaP_m1], k= lambda V: 1.0e3 *3.* alpha_cap(V*1.0e3)* Qt)
CaPm1m2 = smodel.VDepSReac('CaPm1m2', ssys_stoch, slhs = [CaP_m1], srhs = [CaP_m2], k= lambda V: 1.0e3 *2.* alpha_cap(V*1.0e3)* Qt)
CaPm2m3 = smodel.VDepSReac('CaPm2m3', ssys_stoch, slhs = [CaP_m2], srhs = [CaP_m3], k= lambda V: 1.0e3 *1.* alpha_cap(V*1.0e3)* Qt)
CaPm3m2 = smodel.VDepSReac('CaPm3m2', ssys_stoch, slhs = [CaP_m3], srhs = [CaP_m2], k= lambda V: 1.0e3 *3.* beta_cap(V*1.0e3)* Qt)
CaPm2m1 = smodel.VDepSReac('CaPm2m1', ssys_stoch, slhs = [CaP_m2], srhs = [CaP_m1], k= lambda V: 1.0e3 *2.* beta_cap(V*1.0e3)* Qt)
CaPm1m0 = smodel.VDepSReac('CaPm1m0', ssys_stoch, slhs = [CaP_m1], srhs = [CaP_m0], k= lambda V: 1.0e3 *1.* beta_cap(V*1.0e3)* Qt)
if cyl160:
OC_CaP = smodel.GHKcurr('OC_CaP', ssys_stoch, CaP_m3, Ca_stoch, virtual_oconc = Ca_oconc, computeflux = True)
else:
OC_CaP = smodel.GHKcurr('OC_CaP', ssys_stoch, CaP_m3, Ca_stoch, computeflux = True)
OC_CaP.setP(CaP_P)
######## CaT channel ##########
CaT_m0h0 = smodel.Spec('CaT_m0h0', mdl_det)
CaT_m0h1 = smodel.Spec('CaT_m0h1', mdl_det)
CaT_m1h0 = smodel.Spec('CaT_m1h0', mdl_det)
CaT_m1h1 = smodel.Spec('CaT_m1h1', mdl_det)
CaT_m2h0 = smodel.Spec('CaT_m2h0', mdl_det)
CaT_m2h1 = smodel.Spec('CaT_m2h1', mdl_det)
CaTm0h0_m1h0 = smodel.SReac('CaTm0h0_m1h0', ssys_det, slhs = [CaT_m0h0], srhs = [CaT_m1h0], kcst=0.0)
CaTm1h0_m2h0 = smodel.SReac('CaTm1h0_m2h0', ssys_det, slhs = [CaT_m1h0], srhs = [CaT_m2h0], kcst=0.0)
CaTm2h0_m1h0 = smodel.SReac('CaTm2h0_m1h0', ssys_det, slhs = [CaT_m2h0], srhs = [CaT_m1h0], kcst=0.0)
CaTm1h0_m0h0 = smodel.SReac('CaTm1h0_m0h0', ssys_det, slhs = [CaT_m1h0], srhs = [CaT_m0h0], kcst=0.0)
CaTm0h1_m1h1 = smodel.SReac('CaTm0h1_m1h1', ssys_det, slhs = [CaT_m0h1], srhs = [CaT_m1h1], kcst=0.0)
CaTm1h1_m2h1 = smodel.SReac('CaTm1h1_m2h1', ssys_det, slhs = [CaT_m1h1], srhs = [CaT_m2h1], kcst=0.0)
CaTm2h1_m1h1 = smodel.SReac('CaTm2h1_m1h1', ssys_det, slhs = [CaT_m2h1], srhs = [CaT_m1h1], kcst=0.0)
CaTm1h1_m0h1 = smodel.SReac('CaTm1h1_m0h1', ssys_det, slhs = [CaT_m1h1], srhs = [CaT_m0h1], kcst=0.0)
CaTm0h0_m0h1 = smodel.SReac('CaTm0h0_m0h1', ssys_det, slhs = [CaT_m0h0], srhs = [CaT_m0h1], kcst=0.0)
CaTm1h0_m1h1 = smodel.SReac('CaTm1h0_m1h1', ssys_det, slhs = [CaT_m1h0], srhs = [CaT_m1h1], kcst=0.0)
CaTm2h0_m2h1 = smodel.SReac('CaTm2h0_m2h1', ssys_det, slhs = [CaT_m2h0], srhs = [CaT_m2h1], kcst=0.0)
CaTm2h1_m2h0 = smodel.SReac('CaTm2h1_m2h0', ssys_det, slhs = [CaT_m2h1], srhs = [CaT_m2h0], kcst=0.0)
CaTm1h1_m1h0 = smodel.SReac('CaTm1h1_m1h0', ssys_det, slhs = [CaT_m1h1], srhs = [CaT_m1h0], kcst=0.0)
CaTm0h1_m0h0 = smodel.SReac('CaTm0h1_m0h0', ssys_det, slhs = [CaT_m0h1], srhs = [CaT_m0h0], kcst=0.0)
##### BK channel ####################
BK_C0 = smodel.Spec('BK_C0', mdl_det)
BK_C1 = smodel.Spec('BK_C1', mdl_det)
BK_C2 = smodel.Spec('BK_C2', mdl_det)
BK_C3 = smodel.Spec('BK_C3', mdl_det)
BK_C4 = smodel.Spec('BK_C4', mdl_det)
BK_O0 = smodel.Spec('BK_O0', mdl_det)
BK_O1 = smodel.Spec('BK_O1', mdl_det)
BK_O2 = smodel.Spec('BK_O2', mdl_det)
BK_O3 = smodel.Spec('BK_O3', mdl_det)
BK_O4 = smodel.Spec('BK_O4', mdl_det)
BKCAC0 = smodel.SReac('BKCAC0', ssys_det, slhs = [BK_C0], ilhs = [Ca_det], srhs = [BK_C1], kcst = c_01)
BKCAC1 = smodel.SReac('BKCAC1', ssys_det, slhs = [BK_C1], ilhs = [Ca_det], srhs = [BK_C2], kcst = c_12)
BKCAC2 = smodel.SReac('BKCAC2', ssys_det, slhs = [BK_C2], ilhs = [Ca_det], srhs = [BK_C3], kcst = c_23)
BKCAC3 = smodel.SReac('BKCAC3', ssys_det, slhs = [BK_C3], ilhs = [Ca_det], srhs = [BK_C4], kcst = c_34)
BKC0 = smodel.SReac('BKC0', ssys_det, slhs = [BK_C1], srhs = [BK_C0], irhs=[Ca_det], kcst = c_10)
BKC1 = smodel.SReac('BKC1', ssys_det, slhs = [BK_C2], srhs = [BK_C1], irhs=[Ca_det], kcst = c_21)
BKC2 = smodel.SReac('BKC2', ssys_det, slhs = [BK_C3], srhs = [BK_C2], irhs=[Ca_det], kcst = c_32)
BKC3 = smodel.SReac('BKC3', ssys_det, slhs = [BK_C4], srhs = [BK_C3], irhs=[Ca_det], kcst = c_43)
BKCAO0 = smodel.SReac('BKCAO0', ssys_det, slhs = [BK_O0], ilhs = [Ca_det], srhs = [BK_O1], kcst = o_01)
BKCAO1 = smodel.SReac('BKCAO1', ssys_det, slhs = [BK_O1], ilhs = [Ca_det], srhs = [BK_O2], kcst = o_12)
BKCAO2 = smodel.SReac('BKCAO2', ssys_det, slhs = [BK_O2], ilhs = [Ca_det], srhs = [BK_O3], kcst = o_23)
BKCAO3 = smodel.SReac('BKCAO3', ssys_det, slhs = [BK_O3], ilhs = [Ca_det], srhs = [BK_O4], kcst = o_34)
BKO0 = smodel.SReac('BKO0', ssys_det, slhs = [BK_O1], srhs = [BK_O0], irhs=[Ca_det], kcst = o_10)
BKO1 = smodel.SReac('BKO1', ssys_det, slhs = [BK_O2], srhs = [BK_O1], irhs=[Ca_det], kcst = o_21)
BKO2 = smodel.SReac('BKO2', ssys_det, slhs = [BK_O3], srhs = [BK_O2], irhs=[Ca_det], kcst = o_32)
BKO3 = smodel.SReac('BKO3', ssys_det, slhs = [BK_O4], srhs = [BK_O3], irhs=[Ca_det], kcst = o_43)
BKC0O0 = smodel.SReac('BKC0O0', ssys_det, slhs = [BK_C0], srhs = [BK_O0], kcst=0.0)
BKC1O1 = smodel.SReac('BKC1O1', ssys_det, slhs = [BK_C1], srhs = [BK_O1], kcst=0.0)
BKC2O2 = smodel.SReac('BKC2O2', ssys_det, slhs = [BK_C2], srhs = [BK_O2], kcst=0.0)
BKC3O3 = smodel.SReac('BKC3O3', ssys_det, slhs = [BK_C3], srhs = [BK_O3], kcst=0.0)
BKC4O4 = smodel.SReac('BKC4O4', ssys_det, slhs = [BK_C4], srhs = [BK_O4], kcst=0.0)
BKO0C0 = smodel.SReac('BKO0C0', ssys_det, slhs = [BK_O0], srhs = [BK_C0], kcst=0.0)
BKO1C1 = smodel.SReac('BKO1C1', ssys_det, slhs = [BK_O1], srhs = [BK_C1], kcst=0.0)
BKO2C2 = smodel.SReac('BKO2C2', ssys_det, slhs = [BK_O2], srhs = [BK_C2], kcst=0.0)
BKO3C3 = smodel.SReac('BKO3C3', ssys_det, slhs = [BK_O3], srhs = [BK_C3], kcst=0.0)
BKO4C4 = smodel.SReac('BKO4C4', ssys_det, slhs = [BK_O4], srhs = [BK_C4], kcst=0.0)
###### SK channel ################## DETERMINISTIC
SK_C1 = smodel.Spec('SK_C1', mdl_det)
SK_C2 = smodel.Spec('SK_C2', mdl_det)
SK_C3 = smodel.Spec('SK_C3', mdl_det)
SK_C4 = smodel.Spec('SK_C4', mdl_det)
SK_O1 = smodel.Spec('SK_O1', mdl_det)
SK_O2 = smodel.Spec('SK_O2', mdl_det)
SKCAC1 = smodel.SReac('SKCAC1', ssys_det, slhs = [SK_C1], ilhs = [Ca_det], srhs = [SK_C2], kcst = dirc2_t)
SKCAC2 = smodel.SReac('SKCAC2', ssys_det, slhs = [SK_C2], ilhs = [Ca_det], srhs = [SK_C3], kcst = dirc3_t)
SKCAC3 = smodel.SReac('SKCAC3', ssys_det, slhs = [SK_C3], ilhs = [Ca_det], srhs = [SK_C4], kcst = dirc4_t)
SKC1 = smodel.SReac('SKC1', ssys_det, slhs = [SK_C2], srhs = [SK_C1], irhs=[Ca_det], kcst = invc1_t)
SKC2 = smodel.SReac('SKC2', ssys_det, slhs = [SK_C3], srhs = [SK_C2], irhs=[Ca_det], kcst = invc2_t)
SKC3 = smodel.SReac('SKC3', ssys_det, slhs = [SK_C4], srhs = [SK_C3], irhs=[Ca_det], kcst = invc3_t)
SKC3O1 = smodel.SReac('SKC3O1', ssys_det, slhs = [SK_C3], srhs = [SK_O1], kcst = diro1_t)
SKC4O2 = smodel.SReac('SKC4O2', ssys_det, slhs = [SK_C4], srhs = [SK_O2], kcst = diro2_t)
SKO1C3 = smodel.SReac('SKO1C3', ssys_det, slhs = [SK_O1], srhs = [SK_C3], kcst = invo1_t)
SKO2C4 = smodel.SReac('SKO2C4', ssys_det, slhs = [SK_O2], srhs = [SK_C4], kcst = invo2_t)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
########### MESH & COMPARTMENTALIZATION #################
##########Import Mesh
# For stochastic sim:
mesh_stoch = meshio.loadMesh('./meshes/'+meshfile_ab)[0]
# For determinstic sim:
mesh_det = meshio.loadMesh('./meshes/'+meshfile_ab)[0]
outer_tets = range(mesh_stoch.ntets)
inner_tets = gettets.getcyl(mesh_stoch, 1e-6, -200e-6, 200e-6)[0]
for i in inner_tets: outer_tets.remove(i)
print(outer_tets.__len__(), " tets in outer compartment")
print(inner_tets.__len__(), " tets in inner compartment")
# Record voltage from the central tetrahedron
cent_tet = mesh_stoch.findTetByPoint([0.0,0.0,0.0])
########## Create an intracellular compartment i.e. cytosolic compartment
cyto_stoch = sgeom.TmComp('cyto_stoch', mesh_stoch, inner_tets)
cyto_stoch.addVolsys('vsys_stoch')
if not cyl160: outer_stoch = sgeom.TmComp('outer_stoch', mesh_stoch, outer_tets)
cyto_det = sgeom.TmComp('cyto_det', mesh_det, inner_tets)
cyto_det.addVolsys('vsys_det')
if cyl160:
# Ensure that we use points a small distance inside the boundary:
LENGTH = mesh_stoch.getBoundMax()[2] - mesh_stoch.getBoundMin()[2]
boundminz = mesh_stoch.getBoundMin()[2] + LENGTH/mesh_stoch.ntets
boundmaxz = mesh_stoch.getBoundMax()[2] - LENGTH/mesh_stoch.ntets
memb_tris = list(mesh_stoch.getSurfTris())
minztris = []
maxztris = []
for tri in memb_tris:
zminboundtri = True
zmaxboundtri = True
tritemp = mesh_stoch.getTri(tri)
trizs = [0.0, 0.0, 0.0]
trizs[0] = mesh_stoch.getVertex(tritemp[0])[2]
trizs[1] = mesh_stoch.getVertex(tritemp[1])[2]
trizs[2] = mesh_stoch.getVertex(tritemp[2])[2]
for j in range(3):
if (trizs[j]>boundminz): zminboundtri = False
if (zminboundtri):
minztris.append(tri)
continue
for j in range(3):
if (trizs[j]< boundmaxz): zmaxboundtri = False
if (zmaxboundtri):
maxztris.append(tri)
for t in minztris: memb_tris.remove(t)
for t in maxztris: memb_tris.remove(t)
else:
print('Finding connecting triangles...')
out_tris = set()
for i in outer_tets:
tritemp = mesh_stoch.getTetTriNeighb(i)
for j in range(4): out_tris.add(tritemp[j])
in_tris = set()
for i in inner_tets:
tritemp = mesh_stoch.getTetTriNeighb(i)
for j in range(4): in_tris.add(tritemp[j])
memb_tris = out_tris.intersection(in_tris)
memb_tris = list(memb_tris)
########## Find the submembrane tets
memb_tet_neighb = []
for i in memb_tris:
tettemp = mesh_stoch.getTriTetNeighb(i)
for j in tettemp:
memb_tet_neighb.append(j)
submemb_tets = []
for i in memb_tet_neighb:
if i in inner_tets:
submemb_tets.append(i)
print(len(submemb_tets))
vol = 0.0
for i in submemb_tets:
vol = vol + mesh_stoch.getTetVol(i)
print('Volume of submembrane region is', vol)
submemb_tets_surftris = dict()
for m in submemb_tets:
tris = mesh_stoch.getTetTriNeighb(m)
for t in tris:
if t in memb_tris:
submemb_tets_surftris[m] = t
break
assert(len(submemb_tets_surftris.values()) == len(submemb_tets))
########## Create a membrane as a surface mesh
# Stochastic sim:
if cyl160:
memb_stoch = sgeom.TmPatch('memb_stoch', mesh_stoch, memb_tris, cyto_stoch)
else:
memb_stoch = sgeom.TmPatch('memb_stoch', mesh_stoch, memb_tris, cyto_stoch, outer_stoch)
memb_stoch.addSurfsys('ssys_stoch')
# Determinsitic sim:
memb_det = sgeom.TmPatch('memb_det', mesh_det, memb_tris, cyto_det)
memb_det.addSurfsys('ssys_det')
# For EField calculation
print("Creating membrane..")
membrane = sgeom.Memb('membrane', mesh_stoch, [memb_stoch])
print("Membrane created.")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # SIMULATION # # # # # # # # # # # # # # # # # # # # # #
r = srng.create_mt19937(512)
r.initialize(7)
r_dummy = srng.create_mt19937(512)
r_dummy.initialize(7)
print("Creating Tet exact solver")
#Creating two solvers
sim_stoch = ssolver.Tetexact(mdl_stoch, mesh_stoch, r, True)
print("Creating Tet ODE solver")
sim_det = ssolver.TetODE(mdl_det, mesh_det, r_dummy)
sim_det.setTolerances(1.0e-3, 1.0e-3)
print("Resetting simulation objects..")
sim_stoch.reset()
print("Injecting molecules..")
sim_stoch.setTemp(TEMPERATURE+273.15)
if not cyl160:
sim_stoch.setCompConc('outer_stoch', 'Ca_stoch', Ca_oconc)
sim_stoch.setCompClamped('outer_stoch', 'Ca_stoch', True)
sim_stoch.setCompConc('cyto_stoch', 'Ca_stoch', Ca_iconc)
print("Calcium concentration in stochastic simulation is: ", sim_stoch.getCompConc('cyto_stoch', 'Ca_stoch'))
print("No. of Ca molecules in stochastic simulation is: ", sim_stoch.getCompCount('cyto_stoch', 'Ca_stoch'))
sim_det.setCompConc('cyto_det', 'Ca_det', Ca_iconc)
print("Calcium concentration in deterministic simulation is: ", sim_det.getCompConc('cyto_det', 'Ca_det'))
print("No. of Ca molecules in deterministic simulation is: ", sim_det.getCompCount('cyto_det', 'Ca_det'))
sim_det.setCompConc('cyto_det', 'Mg', Mg_conc)
surfarea = sim_stoch.getPatchArea('memb_stoch')
pumpnbs = 6.022141e12*surfarea
sim_det.setPatchCount('memb_det', 'Pump', round(pumpnbs))
sim_det.setPatchCount('memb_det', 'CaPump', 0)
print("Injected ", sim_det.getPatchCount('memb_det', 'Pump'), "pumps")
sim_det.setCompConc('cyto_det', 'iCBsf', iCBsf_conc)
sim_det.setCompConc('cyto_det', 'iCBsCa', iCBsCa_conc)
sim_det.setCompConc('cyto_det', 'iCBCaf', iCBCaf_conc)
sim_det.setCompConc('cyto_det', 'iCBCaCa', iCBCaCa_conc)
sim_det.setCompConc('cyto_det', 'CBsf', CBsf_conc)
sim_det.setCompConc('cyto_det', 'CBsCa', CBsCa_conc)
sim_det.setCompConc('cyto_det', 'CBCaf', CBCaf_conc)
sim_det.setCompConc('cyto_det', 'CBCaCa', CBCaCa_conc)
sim_det.setCompConc('cyto_det', 'PV', PV_conc)
sim_det.setCompConc('cyto_det', 'PVCa', PVCa_conc)
sim_det.setCompConc('cyto_det', 'PVMg', PVMg_conc)
# CaP
sim_stoch.setPatchCount('memb_stoch', 'CaP_m0' , round(CaP_ro*surfarea*CaP_m0_p))
sim_stoch.setPatchCount('memb_stoch', 'CaP_m1' , round(CaP_ro*surfarea*CaP_m1_p))
sim_stoch.setPatchCount('memb_stoch', 'CaP_m2' , round(CaP_ro*surfarea*CaP_m2_p))
sim_stoch.setPatchCount('memb_stoch', 'CaP_m3' , round(CaP_ro*surfarea*CaP_m3_p))
print("CaP_m0 ", round(CaP_ro*surfarea*CaP_m0_p))
print("CaP_m1 ", round(CaP_ro*surfarea*CaP_m1_p))
print("CaP_m2 ", round(CaP_ro*surfarea*CaP_m2_p))
print("CaP_m3 ", round(CaP_ro*surfarea*CaP_m3_p))
print("Targeted Injection: ", round(CaP_ro*surfarea), "CaP channels")
# CaT
# From cstate: CaT_m2h0 conducting
sim_det.setPatchCount('memb_det', 'CaT_m0h0' , round(CaT_ro*surfarea*CaT_m0h0_p))
sim_det.setPatchCount('memb_det', 'CaT_m1h0' , round(CaT_ro*surfarea*CaT_m1h0_p))
sim_det.setPatchCount('memb_det', 'CaT_m2h0' , round(CaT_ro*surfarea*CaT_m2h0_p))
sim_det.setPatchCount('memb_det', 'CaT_m0h1' , round(CaT_ro*surfarea*CaT_m0h1_p))
sim_det.setPatchCount('memb_det', 'CaT_m1h1' , round(CaT_ro*surfarea*CaT_m1h1_p))
sim_det.setPatchCount('memb_det', 'CaT_m2h1' , round(CaT_ro*surfarea*CaT_m2h1_p))
print("Injected ", CaT_ro*surfarea, "CaT channels")
# BK
sim_det.setPatchCount('memb_det', 'BK_C0' , round(BK_ro*surfarea*BK_C0_p))
sim_det.setPatchCount('memb_det', 'BK_C1' , round(BK_ro*surfarea*BK_C1_p))
sim_det.setPatchCount('memb_det', 'BK_C2' , round(BK_ro*surfarea*BK_C2_p))
sim_det.setPatchCount('memb_det', 'BK_C3' , round(BK_ro*surfarea*BK_C3_p))
sim_det.setPatchCount('memb_det', 'BK_C4' , round(BK_ro*surfarea*BK_C4_p))
sim_det.setPatchCount('memb_det', 'BK_O0' , round(BK_ro*surfarea*BK_O0_p))
sim_det.setPatchCount('memb_det', 'BK_O1' , round(BK_ro*surfarea*BK_O1_p))
sim_det.setPatchCount('memb_det', 'BK_O2' , round(BK_ro*surfarea*BK_O2_p))
sim_det.setPatchCount('memb_det', 'BK_O3' , round(BK_ro*surfarea*BK_O3_p))
sim_det.setPatchCount('memb_det', 'BK_O4' , round(BK_ro*surfarea*BK_O4_p))
print("Injected ", BK_ro*surfarea, "BK channels")
# SK
sim_det.setPatchCount('memb_det', 'SK_C1' , round(SK_ro*surfarea*SK_C1_p))
sim_det.setPatchCount('memb_det', 'SK_C2' , round(SK_ro*surfarea*SK_C2_p))
sim_det.setPatchCount('memb_det', 'SK_C3' , round(SK_ro*surfarea*SK_C3_p))
sim_det.setPatchCount('memb_det', 'SK_C4' , round(SK_ro*surfarea*SK_C4_p))
sim_det.setPatchCount('memb_det', 'SK_O1' , round(SK_ro*surfarea*SK_O1_p))
sim_det.setPatchCount('memb_det', 'SK_O2' , round(SK_ro*surfarea*SK_O2_p))
print("Injected ", SK_ro*surfarea, "SK channels")
sim_stoch.setEfieldDT(EF_DT)
sim_stoch.setMembPotential('membrane', init_pot)
sim_stoch.setMembVolRes('membrane', Ra)
#cm = 1.5uF/cm2 -> 1.5e-6F/1e-4m2 ->1.5e-2 F/m2
sim_stoch.setMembCapac('membrane',memb_capac)
#### Recording #####
c=time.ctime()
dc = c.split()[1]+c.split()[2]+'_'+c.split()[3]+'_'+c.split()[4]
dc= dc.replace(':', '_')
try: os.mkdir(root+'data')
except: pass
try: os.mkdir(root+'data/' + 'HybridCaburst_stochCaP')
except: pass
try: os.mkdir(root+'data/' + 'HybridCaburst_stochCaP/'+meshfile_ab)
except: pass
os.mkdir(root+'data/' + 'HybridCaburst_stochCaP/'+meshfile_ab+'/'+iter_n+'__'+dc )
datfile = open(root+'data/' + 'HybridCaburst_stochCaP/'+meshfile_ab+'/'+iter_n+'__'+dc + '/currents.dat', 'w')
datfile2 = open(root+'data/' + 'HybridCaburst_stochCaP/'+meshfile_ab+'/'+iter_n+'__'+dc + '/voltage.dat', 'w')
datfile3 = open(root+'data/' + 'HybridCaburst_stochCaP/'+meshfile_ab+'/'+iter_n+'__'+dc + '/calcium.dat', 'w')
r.initialize(int(time.time()%1000))
btime = time.time()
for l in range(NTIMEPOINTS):
print("Tpnt: ", l)
#1) RUN STOCHASTIC SIMULATION i.e. compute currents and update stochastic calcium concentration
sim_stoch.run(TIMECONVERTER*l)
#2) READ STOCHASTIC CA and 3) SET DETERMINISTIC CA AND RATE CONSTANTS FOR DETERMINISTIC CHANNELS
for m in submemb_tets:
Si = sim_stoch.getTetConc(m,'Ca_stoch')
sim_det.setTetConc(m,'Ca_det',Si)
#Assuming this sim V is not constant everwhere
for m in submemb_tets:
ctriID = submemb_tets_surftris[m]
V = sim_stoch.getTriV(ctriID)
#3) Set the rate constants and RUN THE DETERMINISTIC SIMULATION
sim_det.setTriSReacK(ctriID, 'CaTm0h0_m1h0', 1.0e3 *2.* alpham_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm1h0_m2h0', 1.0e3 *1.* alpham_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm2h0_m1h0', 1.0e3 *2.* betam_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm1h0_m0h0', 1.0e3 *1.* betam_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm0h0_m0h1', 1.0e3 *1.* alphah_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm1h0_m1h1', 1.0e3 *1.* alphah_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm2h0_m2h1', 1.0e3 *1.* alphah_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm2h1_m2h0', 1.0e3 *1.* betah_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm1h1_m1h0', 1.0e3 *1.* betah_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm0h1_m0h0', 1.0e3 *1.* betah_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm0h1_m1h1', 1.0e3 *2.* alpham_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm1h1_m2h1', 1.0e3 *1.* alpham_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm2h1_m1h1', 1.0e3 *2.* betam_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm1h1_m0h1', 1.0e3 *1.* betam_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'BKC0O0', f_0(V))
sim_det.setTriSReacK(ctriID, 'BKC1O1', f_1(V))
sim_det.setTriSReacK(ctriID, 'BKC2O2', f_2(V))
sim_det.setTriSReacK(ctriID, 'BKC3O3', f_3(V))
sim_det.setTriSReacK(ctriID, 'BKC4O4', f_4(V))
sim_det.setTriSReacK(ctriID, 'BKO0C0', b_0(V))
sim_det.setTriSReacK(ctriID, 'BKO1C1', b_1(V))
sim_det.setTriSReacK(ctriID, 'BKO2C2', b_2(V))
sim_det.setTriSReacK(ctriID, 'BKO3C3', b_3(V))
sim_det.setTriSReacK(ctriID, 'BKO4C4', b_4(V))
#4) RUN DETERMINISTIC SIMULATION
sim_det.run(TIMECONVERTER*l)
# Now do the communication between the sims
#5)READ DETERMINISTIC CHANNELS & THEN COMPUTE CURRENT USING DETERMINISTIC GHK (could be stochastic)
So = Ca_oconc
# i) For each tet in submembrane, find the corresponding triID
# ii) For each tri, compute GHK current for each channel
# iii) Count the channel states / Spec in open states for each of the triID and compute the total current of that channel
tcur_CaP = 0.0
tcur_CaT = 0.0
tcur_BK = 0.0
tcur_SK = 0.0
tca_count_det = 0.0
tca_count_stoch = 0.0
for m in submemb_tets:
ctriID = submemb_tets_surftris[m]
V = sim_stoch.getTriV(ctriID)
Si = sim_det.getTetConc(m,'Ca_det')
cur_CaT_sc = cf.getGHKI(CaT_P, V, 2, TEMPERATURE+273.15, Si*1.0e3, So*1.0e3)
cur_BK_sc = cf.getOhmI(V, BK_rev, BK_G)
cur_SK_sc = cf.getOhmI(V, SK_rev, SK_G)
cur_L_sc = cf.getOhmI(V, L_rev, L_G)
cur_CaT = cur_CaT_sc*(sim_det.getTriCount(ctriID, 'CaT_m2h1'))
cur_BK = cur_BK_sc*(sim_det.getTriCount(ctriID, 'BK_O0') + sim_det.getTriCount(ctriID, 'BK_O1') + sim_det.getTriCount(ctriID, 'BK_O2') + sim_det.getTriCount(ctriID, 'BK_O3') + sim_det.getTriCount(ctriID, 'BK_O4'))
cur_SK = cur_SK_sc*(sim_det.getTriCount(ctriID, 'SK_O1') + sim_det.getTriCount(ctriID, 'SK_O2'))
#cur_L corresponding to each surftri has been corrected in the following script line
cur_L = cur_L_sc*(round(L_ro * sim_det.getPatchArea('memb_det')))*(sim_stoch.getTriArea(ctriID)/sim_det.getPatchArea('memb_det'))
ca_count_inj = -1.0*((cur_CaT)*TIMECONVERTER)/(2*E_CHARGE)
sim_stoch.setTetCount(m, 'Ca_stoch', ca_count_inj+sim_det.getTetCount(m,'Ca_det'))
sim_stoch.setTriIClamp(ctriID, cur_CaT+cur_BK+cur_SK+cur_L)
tcur_CaP = tcur_CaP + sim_stoch.getTriGHKI(ctriID,'OC_CaP')
tcur_CaT = tcur_CaT + cur_CaT
tcur_BK = tcur_BK + cur_BK
tcur_SK = tcur_SK + cur_SK
tca_count_det = tca_count_det + sim_det.getTetCount(m,'Ca_det')
tca_count_stoch = tca_count_stoch + sim_stoch.getTetCount(m,'Ca_stoch')
datfile.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile.write('%.6g' %((tcur_CaP*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_CaT*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_BK*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_SK*1.0e-1)/surfarea) + ' ')
datfile.write('\n')
datfile2.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile2.write('%.6g' %(sim_stoch.getTetV(cent_tet)*1.0e3) + ' ')
datfile2.write('\n')
datfile3.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile3.write('%.6g' %(((tca_count_det/AVOGADRO)/(vol*1.0e3))*1.0e6) + ' ')
datfile3.write('%.6g' %(((tca_count_stoch/AVOGADRO)/(vol*1.0e3))*1.0e6) + ' ')
datfile3.write('%.6g' %tca_count_det + ' ')
datfile3.write('%.6g' %tca_count_stoch + ' ')
datfile3.write('\n')
datfile.close()
datfile2.close()
datfile3.close()
|
CNS-OIST/STEPS_Example
|
publication_models/API_1/Anwar_J Neurosci_2013/HybridCaburst_stochCaP.py
|
Python
|
gpl-2.0
| 29,332
|
[
"Avogadro"
] |
52e51b25c44ffc9ef0694a8df13c0fb80ee57fd4b4d34acaefc006a3312c610a
|
import numpy as np
def multiple_2d_circ_gauss_func(p_guess):
def f(p, fjac=None, data=None, err=None,return_models=False):
#p[0] = background
#p[1] = amplitude
#p[2] = x_offset
#p[3] = y_offset
#p[4] = sigma for both x and y
#And so on for the 2nd and 3rd gaussians etc...
#A+Be^-((x-xo)^2+(y-y0)^2)/2s^2 + Ce^-((x-x1)^2+(y-y1)^2)/2d^2 + ...
#print p_guess
#print p
#print range(len(p[1:])%4)
models=[p[0]*p_guess[0]*np.ones(np.asarray(data).shape)]
x=np.tile(range(len(data[0])),(len(data),1))
y=np.tile(range(len(data)),(len(data[0]),1)).transpose()
for i in range(len(p[1:])/4):
m = p[1+4*i]*p_guess[1+4*i] * np.exp( - (pow(x-p[2+i*4]*p_guess[2+i*4],2)+pow(y-p[3+i*4]*p_guess[3+i*4],2)) / (2 * pow(p[4+i*4]*p_guess[4+i*4],2)) )
models.append(m)
if return_models: return models
model = models[0]
for m in models[1:]: model+=m
status = 0
return([status,np.ravel((data-model)/err)])
return f
def benitez2(p, fjac=None, x=None, y=None, err=None):
model = pow(x,p[0]) * np.exp( -(pow(x/p[1]),p[2]))
status = 0
return([status, (y-model)/err])
def parabola_old(p, fjac=None, x=None, y=None, err=None,return_models=False):
#p[0] = x_offset
#p[1] = y_offset
#p[2] = amplitude
model = p[2] * (pow( (x - p[0]), 2 )) + p[1]
if return_models:
return [model]
status = 0
return([status, (y-model)/err])
def parabola(p, fjac=None, x=None, y=None, err=None,return_models=False):
#p[0] = constant term
#p[1] = linear term
#p[2] = quadratic term
model = p[0]+p[1]*x+p[2]*x**2
if return_models:
return [model]
status = 0
return([status, (y-model)/err])
def gaussian(p, fjac=None, x=None, y=None, err=None,return_models=False):
#p[0] = sigma
#p[1] = x_offset
#p[2] = amplitude
#p[3] = y_offset
# model = p[3] + p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
model = p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
if return_models:
return [model]
# Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
return([status, (y-model)/err])
def twogaussian(p, fjac=None, x=None, y=None, err=None):
#p[0] = sigma1
#p[1] = x_offset1
#p[2] = amplitude1
#p[3] = sigma2
#p[4] = x_offset2
#p[5] = amplitude2
gauss1 = p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
gauss2 = p[5] * np.exp( - (pow(( x - p[4]),2) / ( 2. * pow(p[3],2))))
model = gauss1 + gauss2
status = 0
return([status, (y-model)/err])
def twogaussianexp(p, fjac=None, x=None, y=None, err=None):
#p[0] = sigma1
#p[1] = x_offset1
#p[2] = amplitude1
#p[3] = sigma2
#p[4] = x_offset2
#p[5] = amplitude2
#p[6] = scalefactor
#p[7] = x_offset3
#p[8] = amplitude3
gauss1 = p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
gauss2 = p[5] * np.exp( - (pow(( x - p[4]),2) / ( 2. * pow(p[3],2))))
expo = p[8] * np.exp(p[6] * (x - p[7]))
model = gauss1 + gauss2 + expo
status = 0
return([status, (y-model)/err])
def threegaussian(p, fjac=None, x=None, y=None, err=None):
#p[0] = sigma1
#p[1] = x_offset1
#p[2] = amplitude1
#p[3] = sigma2
#p[4] = x_offset2
#p[5] = amplitude2
#p[6] = sigma3
#p[7] = x_offset3
#p[8] = amplitude3
gauss1 = p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
gauss2 = p[5] * np.exp( - (pow(( x - p[4]),2) / ( 2. * pow(p[3],2))))
gauss3 = p[8] * np.exp( - (pow(( x - p[7]),2) / ( 2. * pow(p[6],2))))
model = gauss1 + gauss2 + gauss3
status = 0
return([status, (y-model)/err])
def fourgaussian(p, fjac=None, x=None, y=None, err=None,return_models=False):
#p[0] = sigma1
#p[1] = x_offset1
#p[2] = amplitude1
#p[3] = sigma2
#p[4] = x_offset2-x_offset1
#p[5] = amplitude2
#p[6] = sigma3
#p[7] = x_offset3-xoffset2
#p[8] = amplitude3
#p[9] = sigma4
#p[10] = x_offset4
#p[11] = amplitude4
gauss1 = p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
gauss2 = p[5] * np.exp( - (pow(( x - p[4]-p[1]),2) / ( 2. * pow(p[3],2))))
gauss3 = p[8] * np.exp( - (pow(( x - p[7]-p[4]-p[1]),2) / ( 2. * pow(p[6],2))))
gauss4 = p[11] * np.exp( - (pow(( x - p[10]),2) / ( 2. * pow(p[9],2))))
model = gauss1 + gauss2 + gauss3 + gauss4
if return_models:
return [gauss1, gauss2, gauss3, gauss4]
status = 0
return([status, (y-model)/err])
def fourgaussian_pow(p, fjac=None, x=None, y=None, err=None,return_models=False):
#p[0] = sigma1
#p[1] = x_offset1
#p[2] = amplitude1
#p[3] = sigma2
#p[4] = x_offset2-x_offset1
#p[5] = amplitude2
#p[6] = sigma3
#p[7] = x_offset3-xoffset2
#p[8] = amplitude3
#p[9] = sigma4
#p[10] = x_offset4
#p[11] = amplitude4
gauss1 = p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
gauss2 = p[5] * np.exp( - (pow(( x - p[4]-p[1]),2) / ( 2. * pow(p[3],2))))
gauss3 = p[8] * np.exp( - (pow(( x - p[7]-p[4]-p[1]),2) / ( 2. * pow(p[6],2))))
gauss4 = p[11] * np.exp( - (pow(( x - p[10]),2) / ( 2. * pow(p[9],2))))
model = gauss1 + gauss2 + gauss3 + gauss4
if return_models:
return [gauss1, gauss2, gauss3, gauss4]
status = 0
return([status, (y-model)/err])
def threegaussian_exp(p, fjac=None, x=None, y=None, err=None,return_models=False):
#p[0] = sigma1
#p[1] = x_offset1
#p[2] = amplitude1
#p[3] = sigma2
#p[4] = x_offset2-x_offset1
#p[5] = amplitude2
#p[6] = sigma3
#p[7] = x_offset3-xoffset2
#p[8] = amplitude3
#p[9] = scale_factor
#p[10] = x_offset4
#p[11] = amplitude4
gauss1 = p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
gauss2 = p[5] * np.exp( - (pow(( x - p[4]-p[1]),2) / ( 2. * pow(p[3],2))))
gauss3 = p[8] * np.exp( - (pow(( x - p[7]-p[4]-p[1]),2) / ( 2. * pow(p[6],2))))
expo = p[11] * np.exp(p[9] * (x - p[10]))
model = gauss1 + gauss2 + gauss3 + expo
if return_models:
return [gauss1, gauss2, gauss3, expo]
status = 0
return([status, (y-model)/err])
def threegaussian_exppow(p, fjac=None, x=None, y=None, err=None,return_models=False):
#p[0] = sigma1
#p[1] = x_offset1
#p[2] = amplitude1
#p[3] = sigma2
#p[4] = x_offset2
#p[5] = amplitude2
#p[6] = sigma3
#p[7] = x_offset3
#p[8] = amplitude3
#p[9] = scale_factor
#p[10] = x_offset4
#p[11] = amplitude4
#p[12] = power4
gauss1 = p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
gauss2 = p[5] * np.exp( - (pow(( x - p[4]),2) / ( 2. * pow(p[3],2))))
gauss3 = p[8] * np.exp( - (pow(( x - p[7]),2) / ( 2. * pow(p[6],2))))
expo = p[11] * np.exp(p[9] * (-(p[10] - x)**p[12]))
model = gauss1 + gauss2 + gauss3 + expo
if return_models:
return [gauss1, gauss2, gauss3, expo]
status = 0
return([status, (y-model)/err])
def threegaussian_moyal(p, fjac=None, x=None, y=None, err=None,return_models=False):
#p[0] = sigma1
#p[1] = x_offset1
#p[2] = amplitude1
#p[3] = sigma2
#p[4] = x_offset2
#p[5] = amplitude2
#p[6] = sigma3
#p[7] = x_offset3
#p[8] = amplitude3
#p[9] = sigma4
#p[10] = x_offset4
#p[11] = amplitude4
gauss1 = p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
gauss2 = p[5] * np.exp( - (pow(( x - p[4]),2) / ( 2. * pow(p[3],2))))
gauss3 = p[8] * np.exp( - (pow(( x - p[7]),2) / ( 2. * pow(p[6],2))))
moyal = p[11] * np.exp( - 0.5 * (np.exp( - (-x - p[10])/p[9]) + (-x - p[10])/p[9] + 1))
model = gauss1 + gauss2 + gauss3 + moyal
if return_models:
return [gauss1, gauss2, gauss3, moyal]
status = 0
return([status, (y-model)/err])
def threegaussian_power(p, fjac=None, x=None, y=None, err=None,return_models=False):
#p[0] = sigma1
#p[1] = x_offset1
#p[2] = amplitude1
#p[3] = sigma2
#p[4] = x_offset2-x_offset1
#p[5] = amplitude2
#p[6] = sigma3
#p[7] = x_offset3-xoffset2
#p[8] = amplitude3
#p[9] = scale_factor4
#p[10] = x_offset4
#p[11] = amplitude4
gauss1 = p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
gauss2 = p[5] * np.exp( - (pow(( x - p[4]-p[1]),2) / ( 2. * pow(p[3],2))))
gauss3 = p[8] * np.exp( - (pow(( x - p[7]-p[4]-p[1]),2) / ( 2. * pow(p[6],2))))
power4 = p[11] * np.maximum((x - p[10]),0)**p[9]
model = gauss1 + gauss2 + gauss3 + power4
if return_models:
return [gauss1, gauss2, gauss3, power4]
status = 0
return([status, (y-model)/err])
def threegaussian_lorentzian(p, fjac=None, x=None, y=None, err=None,return_models=False):
#p[0] = sigma1
#p[1] = x_offset1
#p[2] = amplitude1
#p[3] = sigma2
#p[4] = x_offset2
#p[5] = amplitude2
#p[6] = sigma3
#p[7] = x_offset3
#p[8] = amplitude3
#p[9] = scale_factor4
#p[10] = x_offset4
#p[11] = amplitude4
gauss1 = p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
gauss2 = p[5] * np.exp( - (pow(( x - p[4]),2) / ( 2. * pow(p[3],2))))
gauss3 = p[8] * np.exp( - (pow(( x - p[7]),2) / ( 2. * pow(p[6],2))))
lorentz4 = p[11] / (1 + ((x - p[10])/p[9])**2)
model = gauss1 + gauss2 + gauss3 + lorentz
if return_models:
return [gauss1, gauss2, gauss3, lorentz]
status = 0
return([status, (y-model)/err])
model_list = {
'parabola': parabola,
'gaussian': gaussian,
'fourgaussian': fourgaussian,
'threegaussian_exp': threegaussian_exp,
'threegaussian_exppow': threegaussian_exppow,
'threegaussian_moyal': threegaussian_moyal,
'threegaussian_power': threegaussian_power,
'threegaussian_lorentzian': threegaussian_lorentzian,
'multiple_2d_circ_gauss_func': multiple_2d_circ_gauss_func}
|
bmazin/ARCONS-pipeline
|
util/fitFunctions.py
|
Python
|
gpl-2.0
| 10,022
|
[
"Gaussian"
] |
e6457e65eacfedfd7716dfec40f6e61fbe5ba0826489933f76ddb8252d61da71
|
##
# TRACK 4
# CALL & RESPONSE
# Brian Foo (brianfoo.com)
# This file builds the sequence file for use with ChucK from the data supplied
##
# Library dependancies
import csv
import json
import math
import os
import time
# Config
BPM = 60 # Beats per minute, e.g. 60, 75, 100, 120, 150
DIVISIONS_PER_BEAT = 16 # e.g. 4 = quarter notes, 8 = eighth notes, etc
BEATS_PER_PAIR = 1
VARIANCE_MS = 20 # +/- milliseconds an instrument note should be off by to give it a little more "natural" feel
GAIN = 0.5 # base gain
TEMPO = 1.0 # base tempo
# Files
INSTRUMENTS_INPUT_FILE = 'data/instruments.csv'
PAIRS_INPUT_FILE = 'data/pairs.csv'
REPORT_SUMMARY_OUTPUT_FILE = 'data/report_summary.csv'
REPORT_SEQUENCE_OUTPUT_FILE = 'data/report_sequence.csv'
INSTRUMENTS_OUTPUT_FILE = 'data/ck_instruments.csv'
SEQUENCE_OUTPUT_FILE = 'data/ck_sequence.csv'
VISUALIZATION_OUTPUT_FILE = 'visualization/data/pairs.json'
INSTRUMENTS_DIR = 'instruments/'
# Output options
WRITE_SEQUENCE = True
WRITE_REPORT = True
WRITE_JSON = True
# Calculations
BEAT_MS = round(60.0 / BPM * 1000)
ROUND_TO_NEAREST = round(BEAT_MS/DIVISIONS_PER_BEAT)
PAIR_MS = BEATS_PER_PAIR * BEAT_MS
print('Building sequence at '+str(BPM)+' BPM ('+str(BEAT_MS)+'ms per beat)')
# Initialize Variables
instruments = []
pairs = []
sequence = []
hindex = 0
total_ms = 0
min_percent = None
max_percent = None
min_total = None
max_total = None
min_year = None
max_year = None
min_diff = None
max_diff = None
# For creating pseudo-random numbers
def halton(index, base):
result = 0.0
f = 1.0 / base
i = 1.0 * index
while(i > 0):
result += f * (i % base)
i = math.floor(i / base)
f = f / base
return result
# Find index of first item that matches value
def findInList(list, key, value):
found = -1
for index, item in enumerate(list):
if item[key] == value:
found = index
break
return found
# Mean of list
def mean(data, key):
n = len(data)
if n < 1:
return 0
else:
list = [i[key] for i in data]
return sum(list)/n
# round {n} to nearest {nearest}
def roundToNearest(n, nearest):
return 1.0 * round(1.0*n/nearest) * nearest
# Read instruments from file
with open(INSTRUMENTS_INPUT_FILE, 'rb') as f:
r = csv.reader(f, delimiter=',')
next(r, None) # remove header
for file,f_percent_min,f_percent_max,m_percent_min,m_percent_max,t_percent_min,t_percent_max,f_avg_min,f_avg_max,m_avg_min,m_avg_max,t_avg_min,t_avg_max,rvb_max,gain,tempo,tempo_offset,interval_phase,interval,interval_offset,active in r:
if int(active):
index = len(instruments)
# build instrument object
_beat_ms = int(round(BEAT_MS/(float(tempo)*TEMPO)))
instrument = {
'file': INSTRUMENTS_DIR + file,
'index': index,
'f_percent_min': float(f_percent_min),
'f_percent_max': float(f_percent_max),
'm_percent_min': float(m_percent_min),
'm_percent_max': float(m_percent_max),
't_percent_min': float(t_percent_min),
't_percent_max': float(t_percent_max),
'f_avg_min': float(f_avg_min),
'f_avg_max': float(f_avg_max),
'm_avg_min': float(m_avg_min),
'm_avg_max': float(m_avg_max),
't_avg_min': float(t_avg_min),
't_avg_max': float(t_avg_max),
'rvb_max': float(rvb_max),
'gain': float(gain) * GAIN,
'to_tempo': float(tempo) * TEMPO,
'tempo_offset': float(tempo_offset),
'interval_ms': int(int(interval_phase)*_beat_ms),
'interval': int(interval),
'interval_offset': int(interval_offset),
'beat_ms': _beat_ms
}
# add instrument to instruments
instruments.append(instrument)
# Read pairs from file
elapsed = 0
with open(PAIRS_INPUT_FILE, 'rb') as f:
r = csv.reader(f, delimiter=',')
next(r, None) # remove header
for _order, _f_race, _m_race, _f_percent, _m_percent, _diff, _total, _year in r:
# Keep track of min/max
if min_percent is None:
min_percent = int(_f_percent)
if max_percent is None:
max_percent = int(_f_percent)
if min_total is None:
min_total = int(_total)
if max_total is None:
max_total = int(_total)
if min_year is None:
min_year = int(_year)
if max_year is None:
max_year = int(_year)
if min_diff is None:
min_diff = int(_diff)
if max_diff is None:
max_diff = int(_diff)
min_percent = min([min_percent, int(_f_percent), int(_m_percent)])
max_percent = max([max_percent, int(_f_percent), int(_m_percent)])
min_total = min([min_total, int(_total)])
max_total = max([max_total, int(_total)])
min_year = min([min_year, int(_year)])
max_year = max([max_year, int(_year)])
min_diff = min([min_diff, int(_diff)])
max_diff = max([max_diff, int(_diff)])
# Add pair to list
index = len(pairs)
pairs.append({
'index': index,
'f_race': _f_race,
'm_race': _m_race,
'f_percent': int(_f_percent),
'm_percent': int(_m_percent),
'diff_percent': int(_diff),
'total': int(_total),
'year': int(_year),
'start_ms': elapsed,
'stop_ms': elapsed + PAIR_MS
})
elapsed += PAIR_MS
# Report pair data
print('Retrieved pairs data with '+ str(len(pairs)) + ' data points')
print('Percent range: ['+str(min_percent)+','+str(max_percent)+']')
print('Total range: ['+str(min_total)+','+str(max_total)+']')
# Calculate total time
total_beats = 1.0 * len(pairs) * BEATS_PER_PAIR
total_ms = elapsed
total_seconds = int(1.0*total_ms/1000)
print('Main sequence time: '+time.strftime('%M:%S', time.gmtime(total_seconds)) + ' (' + str(total_seconds) + 's, '+str(total_beats)+' beats)')
print(str(PAIR_MS)+'ms per pair')
print(str(PAIR_MS*6)+'ms per pair total')
# Add normalized values
for i, pair in enumerate(pairs):
f_percent = (1.0 * pair['f_percent'] - min_percent) / (max_percent - min_percent)
m_percent = (1.0 * pair['m_percent'] - min_percent) / (max_percent - min_percent)
total = (1.0 * pair['total'] - min_total) / (max_total - min_total)
diff_percent = (1.0 * pair['diff_percent'] - min_diff) / (max_diff - min_diff)
pairs[i]['f_percent_n'] = f_percent
pairs[i]['m_percent_n'] = m_percent
pairs[i]['total_n'] = total
pairs[i]['diff_percent_n'] = diff_percent
# Add pair averages
avg_queue = []
for i, pair in enumerate(pairs):
avg_queue.append(pair.copy())
if pair['year'] >= max_year:
f_avg = mean(avg_queue, 'f_percent_n')
m_avg = mean(avg_queue, 'm_percent_n')
t_avg = mean(avg_queue, 'total_n')
for p in avg_queue:
pairs[p['index']]['f_percent_n_avg'] = f_avg
pairs[p['index']]['m_percent_n_avg'] = m_avg
pairs[p['index']]['total_n_avg'] = t_avg
avg_queue = []
# Return if the instrument should be played in the given interval
def isValidInterval(instrument, elapsed_ms):
interval_ms = instrument['interval_ms']
interval = instrument['interval']
interval_offset = instrument['interval_offset']
return int(math.floor(1.0*elapsed_ms/interval_ms)) % interval == interval_offset
# Add beats to sequence
def addBeatsToSequence(instrument, rvb, duration, ms, beat_ms, round_to):
global sequence
global hindex
offset_ms = int(instrument['tempo_offset'] * instrument['beat_ms'])
ms += offset_ms
previous_ms = int(ms)
remaining_duration = int(duration)
elapsed_duration = offset_ms
while remaining_duration > 0:
elapsed_ms = int(ms)
percent_complete = 1.0 * elapsed_duration / duration
this_beat_ms = instrument['beat_ms']
# add to sequence if in valid interval
if isValidInterval(instrument, elapsed_ms):
h = halton(hindex, 3)
variance = int(h * VARIANCE_MS * 2 - VARIANCE_MS)
sequence.append({
'instrument_index': instrument['index'],
'instrument': instrument,
'position': 0,
'rate': 1,
'gain': instrument['gain'],
'reverb': round(rvb,2),
'elapsed_ms': max([elapsed_ms + variance, 0])
})
hindex += 1
remaining_duration -= this_beat_ms
elapsed_duration += this_beat_ms
ms += this_beat_ms
# Build sequence
for instrument in instruments:
ms = 0
queue_duration = 0
fp0 = instrument['f_percent_min']
fp1 = instrument['f_percent_max']
mp0 = instrument['m_percent_min']
mp1 = instrument['m_percent_max']
tp0 = instrument['t_percent_min']
tp1 = instrument['t_percent_max']
fa0 = instrument['f_avg_min']
fa1 = instrument['f_avg_max']
ma0 = instrument['m_avg_min']
ma1 = instrument['m_avg_max']
ta0 = instrument['t_avg_min']
ta1 = instrument['t_avg_max']
# Each pair
for pair in pairs:
# Check if instrument is valid for this pair
fp = pair['f_percent_n']
mp = pair['m_percent_n']
tp = pair['total_n']
fa = pair['f_percent_n_avg']
ma = pair['m_percent_n_avg']
ta = pair['total_n_avg']
is_valid = (fp>=fp0 and fp<fp1 and mp>=mp0 and mp<mp1 and tp>=tp0 and tp<tp1 and fa>=fa0 and fa<fa1 and ma>=ma0 and ma<ma1 and ta>=ta0 and ta<ta1)
if instrument['rvb_max'] > 0:
if is_valid:
rvb = pair['diff_percent_n'] * instrument['rvb_max']
addBeatsToSequence(instrument.copy(), rvb, PAIR_MS, ms, BEAT_MS, ROUND_TO_NEAREST)
ms += PAIR_MS
else:
# Instrument not here, just add the pair duration and continue
if not is_valid and queue_duration > 0:
addBeatsToSequence(instrument.copy(), 0, queue_duration, ms, BEAT_MS, ROUND_TO_NEAREST)
ms += queue_duration + PAIR_MS
queue_duration = 0
elif not is_valid:
ms += PAIR_MS
else:
queue_duration += PAIR_MS
if queue_duration > 0:
addBeatsToSequence(instrument.copy(), 0, queue_duration, ms, BEAT_MS, ROUND_TO_NEAREST)
# Sort sequence
sequence = sorted(sequence, key=lambda k: k['elapsed_ms'])
# Add milliseconds to sequence
elapsed = 0
for index, step in enumerate(sequence):
sequence[index]['milliseconds'] = step['elapsed_ms'] - elapsed
elapsed = step['elapsed_ms']
# Write instruments to file
if WRITE_SEQUENCE and len(instruments) > 0:
with open(INSTRUMENTS_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
for index, instrument in enumerate(instruments):
w.writerow([index])
w.writerow([instrument['rvb_max']])
w.writerow([instrument['file']])
f.seek(-2, os.SEEK_END) # remove newline
f.truncate()
print('Successfully wrote instruments to file: '+INSTRUMENTS_OUTPUT_FILE)
# Write sequence to file
if WRITE_SEQUENCE and len(sequence) > 0:
with open(SEQUENCE_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
for step in sequence:
w.writerow([step['instrument_index']])
w.writerow([step['position']])
w.writerow([step['gain']])
w.writerow([step['rate']])
w.writerow([step['reverb']])
w.writerow([step['milliseconds']])
f.seek(-2, os.SEEK_END) # remove newline
f.truncate()
print('Successfully wrote sequence to file: '+SEQUENCE_OUTPUT_FILE)
# Write summary files
if WRITE_REPORT:
with open(REPORT_SUMMARY_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
w.writerow(['Time', 'Year', 'F Race', 'M Race', 'F Percent', 'M Percent', 'T Percent', 'F Avg', 'M Avg', 'T Avg'])
for pair in pairs:
elapsed = pair['start_ms']
elapsed_f = time.strftime('%M:%S', time.gmtime(int(elapsed/1000)))
ms = int(elapsed % 1000)
elapsed_f += '.' + str(ms)
w.writerow([elapsed_f, pair['year'], pair['f_race'], pair['m_race'], pair['f_percent_n'], pair['m_percent_n'], pair['total_n'], pair['f_percent_n_avg'], pair['m_percent_n_avg'], pair['total_n_avg']])
print('Successfully wrote summary file: '+REPORT_SUMMARY_OUTPUT_FILE)
# Write sequence report to file
if WRITE_REPORT and len(sequence) > 0:
with open(REPORT_SEQUENCE_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
w.writerow(['Time', 'Instrument', 'Gain', 'Reverb'])
for step in sequence:
instrument = instruments[step['instrument_index']]
elapsed = step['elapsed_ms']
elapsed_f = time.strftime('%M:%S', time.gmtime(int(elapsed/1000)))
ms = int(elapsed % 1000)
elapsed_f += '.' + str(ms)
w.writerow([elapsed_f, instrument['file'], step['gain'], step['reverb']])
f.seek(-2, os.SEEK_END) # remove newline
f.truncate()
print('Successfully wrote sequence report to file: '+REPORT_SEQUENCE_OUTPUT_FILE)
# Write JSON data for the visualization
if WRITE_JSON:
json_data = {
'min_percent': min_percent,
'max_percent': max_percent,
'min_year': min_year,
'max_year': max_year,
'pairs': pairs
}
with open(VISUALIZATION_OUTPUT_FILE, 'w') as outfile:
json.dump(json_data, outfile)
print('Successfully wrote to JSON file: '+VISUALIZATION_OUTPUT_FILE)
|
shawngraham/music-lab-scripts
|
04_dating/dating.py
|
Python
|
mit
| 12,150
|
[
"Brian"
] |
906a3dcd60dba4c21883b3b8fe1080baf0c166b7f00e993a8aa22134da79214f
|
import os
import PythonQt
from PythonQt import QtCore, QtGui
from ddapp import callbacks
import ddapp.applogic as app
import ddapp.objectmodel as om
import ddapp.visualization as vis
import ddapp.vtkAll as vtk
from ddapp import jointcontrol
from ddapp import getDRCBaseDir
from ddapp import lcmUtils
from ddapp import filterUtils
from ddapp import transformUtils
from ddapp import drcargs
import drc as lcmdrc
import math
import numpy as np
import json
with open(drcargs.args().directorConfigFile) as directorConfigFile:
directorConfig = json.load(directorConfigFile)
directorConfigDirectory = os.path.dirname(os.path.abspath(directorConfigFile.name))
fixedPointFile = os.path.join(directorConfigDirectory, directorConfig['fixedPointFile'])
urdfConfig = directorConfig['urdfConfig']
for key, urdf in list(urdfConfig.items()):
urdfConfig[key] = os.path.join(directorConfigDirectory, urdf)
handCombinations = directorConfig['handCombinations']
numberOfHands = len(handCombinations)
headLink = directorConfig['headLink']
def getRobotGrayColor():
return QtGui.QColor(177, 180, 190)
def getRobotOrangeColor():
return QtGui.QColor(255, 190, 0)
def getRobotBlueColor():
return QtGui.QColor(170, 255, 255)
class RobotModelItem(om.ObjectModelItem):
MODEL_CHANGED_SIGNAL = 'MODEL_CHANGED_SIGNAL'
def __init__(self, model):
modelName = os.path.basename(model.filename())
om.ObjectModelItem.__init__(self, modelName, om.Icons.Robot)
self.views = []
self.model = None
self.callbacks.addSignal(self.MODEL_CHANGED_SIGNAL)
self.useUrdfColors = False
self.addProperty('Filename', model.filename())
self.addProperty('Visible', model.visible())
self.addProperty('Alpha', model.alpha(),
attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=1.0, singleStep=0.1, hidden=False))
self.addProperty('Textures', True)
self.addProperty('Color', model.color())
self.setModel(model)
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Alpha':
self.model.setAlpha(self.getProperty(propertyName))
elif propertyName == 'Visible':
self.model.setVisible(self.getProperty(propertyName))
elif propertyName == 'Textures':
self.model.setTexturesEnabled(self.getProperty(propertyName))
self._updateModelColor()
elif propertyName == 'Color':
self._updateModelColor()
self._renderAllViews()
def hasDataSet(self, dataSet):
return len(self.model.getLinkNameForMesh(dataSet)) != 0
def connectModelChanged(self, func):
return self.callbacks.connect(self.MODEL_CHANGED_SIGNAL, func)
def disconnectModelChanged(self, callbackId):
self.callbacks.disconnect(callbackId)
def onModelChanged(self):
self.callbacks.process(self.MODEL_CHANGED_SIGNAL, self)
if self.getProperty('Visible'):
self._renderAllViews()
def onDisplayChanged(self):
if self.getProperty('Visible'):
self._renderAllViews()
def _renderAllViews(self):
for view in self.views:
view.render()
def getLinkFrame(self, linkName):
t = vtk.vtkTransform()
t.PostMultiply()
if self.model.getLinkToWorld(linkName, t):
return t
else:
return None
def getHeadLink(self):
return headLink
def getLinkContactPoints(self, linkName):
pts = self.model.getBodyContactPoints(linkName)
numberOfPoints = len(pts)/3
return np.array(pts).reshape(numberOfPoints, 3)
def setModel(self, model):
assert model is not None
if model == self.model:
return
model.disconnect('modelChanged()', self.onModelChanged)
model.disconnect('displayChanged()', self.onDisplayChanged)
views = list(self.views)
self.removeFromAllViews()
self.model = model
self.model.setAlpha(self.getProperty('Alpha'))
self.model.setVisible(self.getProperty('Visible'))
self.model.setTexturesEnabled(self.getProperty('Textures'))
self._updateModelColor()
self.setProperty('Filename', model.filename())
model.connect('modelChanged()', self.onModelChanged)
model.connect('displayChanged()', self.onDisplayChanged)
for view in views:
self.addToView(view)
self.onModelChanged()
def _updateModelColor(self):
if self.getProperty('Textures'):
self._setupTextureColors()
elif not self.useUrdfColors:
color = QtGui.QColor(*[c*255 for c in self.getProperty('Color')])
self.model.setColor(color)
def _setupTextureColors(self):
# custom colors for non-textured robotiq hand
for name in self.model.getLinkNames():
strs = name.split('_')
if len(strs) >= 2 and strs[0] in ['left', 'right'] and strs[1] in ('finger', 'palm') or name.endswith('hand_force_torque'):
self.model.setLinkColor(name, QtGui.QColor(90, 90, 90) if strs[1] == 'finger' else QtGui.QColor(20,20,20))
else:
self.model.setLinkColor(name, QtGui.QColor(255,255,255))
def addToView(self, view):
if view in self.views:
return
self.views.append(view)
self.model.addToRenderer(view.renderer())
view.render()
def onRemoveFromObjectModel(self):
om.ObjectModelItem.onRemoveFromObjectModel(self)
self.removeFromAllViews()
def removeFromAllViews(self):
for view in list(self.views):
self.removeFromView(view)
assert len(self.views) == 0
def removeFromView(self, view):
assert view in self.views
self.views.remove(view)
self.model.removeFromRenderer(view.renderer())
view.render()
def loadRobotModel(name, view=None, parent='planning', urdfFile=None, color=None, visible=True):
if not urdfFile:
urdfFile = urdfConfig['default']
if isinstance(parent, str):
parent = om.getOrCreateContainer(parent)
model = loadRobotModelFromFile(urdfFile)
if not model:
raise Exception('Error loading robot model from file: %s' % urdfFile)
obj = RobotModelItem(model)
om.addToObjectModel(obj, parent)
obj.setProperty('Visible', visible)
obj.setProperty('Name', name)
obj.setProperty('Color', color or getRobotGrayColor())
if view is not None:
obj.addToView(view)
jointController = jointcontrol.JointController([obj], fixedPointFile)
jointController.setNominalPose()
return obj, jointController
def loadRobotModelFromFile(filename):
model = PythonQt.dd.ddDrakeModel()
if not model.loadFromFile(filename):
return None
return model
def loadRobotModelFromString(xmlString):
model = PythonQt.dd.ddDrakeModel()
if not model.loadFromXML(xmlString):
return None
return model
def openUrdf(filename, view):
model = loadRobotModelFromFile(filename)
if model:
model = RobotModelItem(model)
om.addToObjectModel(model)
model.addToView(view)
return model
def getExistingRobotModels():
return [obj for obj in om.getObjects() if isinstance(obj, RobotModelItem)]
_modelPublisherString = None
def getModelPublisherString():
return _modelPublisherString
def updateModelPublisherString(msg):
global _modelPublisherString
_modelPublisherString = msg.urdf_xml_string
return _modelPublisherString
def onModelPublisherString(msg):
lastStr = getModelPublisherString()
if updateModelPublisherString(msg) == lastStr:
return
print 'reloading models with new model publisher string'
if lastStr is not None:
app.showInfoMessage('A model publisher string was received that differs from the previous string. '
'Models will be reloaded with the new string.',
title='Model publisher string changed')
objs = getExistingRobotModels()
for obj, jointController in _modelsToReload:
print 'reloading model:', obj.getProperty('Name')
newModel = loadRobotModelFromString(getModelPublisherString())
obj.setModel(newModel)
jointController.push()
def startModelPublisherListener(modelsToReload):
global _modelsToReload
_modelsToReload = modelsToReload
lcmUtils.addSubscriber('ROBOT_MODEL', lcmdrc.robot_urdf_t, onModelPublisherString)
def setupPackagePaths():
searchPaths = [
'software/models/atlas_v3',
'software/models/atlas_v4',
'software/models/atlas_v5',
'software/models/valkyrie',
'software/models/val_description',
'software/models/valkyrie_original',
'software/models/lwr_defs',
'software/models/mit_gazebo_models/mit_robot',
'software/models/mit_gazebo_models/V1',
'software/models/common_components/multisense_sl',
'software/models/common_components/irobot_hand',
'software/models/common_components/handle_description',
'software/models/common_components/robotiq_hand_description',
'software/models/common_components/schunk_description',
'software/models/otdf',
]
for path in searchPaths:
PythonQt.dd.ddDrakeModel.addPackageSearchPath(os.path.join(getDRCBaseDir(), path))
environmentVariables = ['ROS_PACKAGE_PATH']
for e in environmentVariables:
paths = os.environ.get(e, '').split(':')
for path in paths:
for root, dirnames, filenames in os.walk(path):
if os.path.isfile(os.path.join(root, 'package.xml')) or os.path.isfile(os.path.join(root, 'manifest.xml')):
PythonQt.dd.ddDrakeModel.addPackageSearchPath(root)
setupPackagePaths()
class HandFactory(object):
def __init__(self, robotModel, defaultLeftHandType=None, defaultRightHandType=None):
self.robotModel = robotModel
self.loaders = {}
if (numberOfHands==0):
self.defaultHandTypes = {}
elif (numberOfHands==1):
if not defaultLeftHandType:
defaultLeftHandType = handCombinations[0]['handType']
self.defaultHandTypes = { 'left' : defaultLeftHandType }
elif (numberOfHands==2):
if not defaultLeftHandType:
defaultLeftHandType = handCombinations[0]['handType']
if not defaultRightHandType:
defaultRightHandType = handCombinations[1]['handType']
self.defaultHandTypes = {
'left' : defaultLeftHandType,
'right' : defaultRightHandType
}
def getLoader(self, side):
assert side in self.defaultHandTypes.keys()
handType = self.defaultHandTypes[side]
loader = self.loaders.get(handType)
if loader is None:
loader = HandLoader(handType, self.robotModel)
self.loaders[handType] = loader
return loader
def newPolyData(self, side, view, name=None, parent=None):
loader = self.getLoader(side)
name = name or self.defaultHandTypes[side].replace('_', ' ')
return loader.newPolyData(name, view, parent=parent)
def placeHandModelWithTransform(self, transform, view, side, name=None, parent=None):
handObj = self.newPolyData(side, view, name=name, parent=parent)
handObj.setProperty('Visible', True)
handFrame = handObj.children()[0]
handFrame.copyFrame(transform)
return handObj, handFrame
class HandLoader(object):
def __init__(self, handType, robotModel):
'''
handType is of the form 'left_robotiq' or 'right_valkyrie'
'''
def toFrame(xyzrpy):
rpy = [math.degrees(rad) for rad in xyzrpy[3:]]
return transformUtils.frameFromPositionAndRPY(xyzrpy[:3], rpy)
self.side, self.handType = handType.split('_')
assert self.side in ('left', 'right')
thisCombination = None
for i in range(0, numberOfHands ):
if (handCombinations[i]['side'] == self.side):
thisCombination = handCombinations[i]
break
assert thisCombination is not None
self.handLinkName = thisCombination['handLinkName']
self.handUrdf = thisCombination['handUrdf']
handRootLink = thisCombination['handRootLink']
robotMountLink = thisCombination['robotMountLink']
palmLink = thisCombination['palmLink']
self.loadHandModel()
baseToHandRoot = self.getLinkToLinkTransform(self.handModel, 'plane::xy::base', handRootLink)
robotMountToHandRoot = self.getLinkToLinkTransform(robotModel, robotMountLink, handRootLink)
robotMountToHandLink = self.getLinkToLinkTransform(robotModel, robotMountLink, self.handLinkName)
robotMountToPalm = self.getLinkToLinkTransform(robotModel, robotMountLink, palmLink)
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(baseToHandRoot)
t.Concatenate(robotMountToHandRoot.GetLinearInverse())
t.Concatenate(robotMountToPalm)
self.modelToPalm = t
self.handLinkToPalm = self.getLinkToLinkTransform(robotModel, self.handLinkName, palmLink)
self.palmToHandLink = self.handLinkToPalm.GetLinearInverse()
def getHandUrdf(self):
urdfBase = os.path.join(getDRCBaseDir(), 'software/models/common_components')
return os.path.join(urdfBase, 'hand_factory', self.handUrdf)
@staticmethod
def getLinkToLinkTransform(model, linkA, linkB):
linkAToWorld = model.getLinkFrame(linkA)
linkBToWorld = model.getLinkFrame(linkB)
assert linkAToWorld
assert linkBToWorld
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(linkAToWorld)
t.Concatenate(linkBToWorld.GetLinearInverse())
return t
def loadHandModel(self):
filename = self.getHandUrdf()
handModel = loadRobotModelFromFile(filename)
handModel = RobotModelItem(handModel)
self.handModel = handModel
'''
color = [1.0, 1.0, 0.0]
if self.side == 'right':
color = [0.33, 1.0, 0.0]
handModel = RobotModelItem(handModel)
om.addToObjectModel(handModel, om.getOrCreateContainer('hands'))
handModel.setProperty('Name', os.path.basename(filename).replace('.urdf', '').replace('_', ' '))
handModel.setProperty('Visible', False)
color = np.array(color)*255
handModel.setProperty('Color', QtGui.QColor(color[0], color[1], color[2]))
handModel.setProperty('Alpha', 1.0)
#handModel.addToView(view)
'''
def newPolyData(self, name, view, parent=None):
self.handModel.model.setJointPositions(np.zeros(self.handModel.model.numberOfJoints()))
polyData = vtk.vtkPolyData()
self.handModel.model.getModelMesh(polyData)
polyData = filterUtils.transformPolyData(polyData, self.modelToPalm)
if isinstance(parent, str):
parent = om.getOrCreateContainer(parent)
color = [1.0, 1.0, 0.0]
if self.side == 'right':
color = [0.33, 1.0, 0.0]
obj = vis.showPolyData(polyData, name, view=view, color=color, visible=False, parent=parent)
obj.side = self.side
frame = vtk.vtkTransform()
frame.PostMultiply()
obj.actor.SetUserTransform(frame)
frameObj = vis.showFrame(frame, '%s frame' % name, view=view, scale=0.2, visible=False, parent=obj)
return obj
def getPalmToWorldTransform(self, robotModel):
handLinkToWorld = robotModel.getLinkFrame(self.handLinkName)
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(self.palmToHandLink)
t.Concatenate(handLinkToWorld)
return t
def moveToRobot(self, robotModel):
handLinkToWorld = robotModel.getLinkFrame(self.handLinkName)
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(self.modelToPalm)
t.Concatenate(self.palmToHandLink)
t.Concatenate(handLinkToWorld)
self.moveHandModelToFrame(self.handModel, t)
vis.updateFrame(self.getPalmToWorldTransform(), '%s palm' % self.side)
def moveToGraspFrame(self, frame):
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(self.modelToPalm)
t.Concatenate(frame)
self.moveHandModelToFrame(self.handModel, t)
@staticmethod
def moveHandModelToFrame(model, frame):
pos, quat = transformUtils.poseFromTransform(frame)
rpy = transformUtils.quaternionToRollPitchYaw(quat)
pose = np.hstack((pos, rpy))
model.model.setJointPositions(pose, ['base_x', 'base_y', 'base_z', 'base_roll', 'base_pitch', 'base_yaw'])
def setRobotiqJointsToOpenHand(robotModel):
for side in ['left', 'right']:
setRobotiqJoints(robotModel, side, [0.0, 0.0, 0.0], [0.0, 0.0, 0.0])
def setRobotiqJointsToClosedHand(robotModel):
for side in ['left', 'right']:
setRobotiqJoints(robotModel, side, [1.0, 1.0, 1.0], [0.0, 0.0, 0.0])
def setRobotiqJointsToPinchOpenHand(robotModel):
for side in ['left', 'right']:
setRobotiqJoints(robotModel, side, [0.25, 0.0, -0.55], [-0.15, 0.15, 0.0])
def setRobotiqJointsToPinchClosedHand(robotModel):
for side in ['left', 'right']:
setRobotiqJoints(robotModel, side, [0.8, 0.0, -0.55], [-0.15, 0.15, 0.0])
def setRobotiqJoints(robotModel, side, fingers=[0.0, 0.0, 0.0], palm=[0.0, 0.0, 0.0]):
robotModel.model.setJointPositions(np.tile(fingers, 3), ['%s_finger_%s_joint_%d' % (side, n, i+1) for n in ['1', '2', 'middle'] for i in range(3)])
robotModel.model.setJointPositions(palm, ['%s_palm_finger_%s_joint' % (side, n) for n in ['1', '2', 'middle']])
def getRobotiqJoints():
return ['%s_finger_%s_joint_%d' % (side, n, i+1) for n in ['1', '2', 'middle'] for i in range(3) for side in ['left', 'right']] + \
['%s_palm_finger_%s_joint' % (side, n) for n in ['1', '2', 'middle'] for side in ['left', 'right']]
|
gizatt/director
|
src/python/ddapp/roboturdf.py
|
Python
|
bsd-3-clause
| 18,377
|
[
"VTK"
] |
d0535c0bb2b801c1d210b3af939c3dca332bcca8a662f7182df5e36071ffcb70
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for spectral_ops."""
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import spectral_ops
from tensorflow.python.ops.signal import window_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class SpectralOpsTest(test.TestCase, parameterized.TestCase):
@staticmethod
def _np_hann_periodic_window(length):
if length == 1:
return np.ones(1)
odd = length % 2
if not odd:
length += 1
window = 0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))
if not odd:
window = window[:-1]
return window
@staticmethod
def _np_frame(data, window_length, hop_length):
num_frames = 1 + int(np.floor((len(data) - window_length) // hop_length))
shape = (num_frames, window_length)
strides = (data.strides[0] * hop_length, data.strides[0])
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@staticmethod
def _np_stft(data, fft_length, hop_length, window_length):
frames = SpectralOpsTest._np_frame(data, window_length, hop_length)
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return np.fft.rfft(frames * window, fft_length)
@staticmethod
def _np_inverse_stft(stft, fft_length, hop_length, window_length):
frames = np.fft.irfft(stft, fft_length)
# Pad or truncate frames's inner dimension to window_length.
frames = frames[..., :window_length]
frames = np.pad(frames, [[0, 0]] * (frames.ndim - 1) +
[[0, max(0, window_length - frames.shape[-1])]], "constant")
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return SpectralOpsTest._np_overlap_add(frames * window, hop_length)
@staticmethod
def _np_overlap_add(stft, hop_length):
num_frames, window_length = np.shape(stft)
# Output length will be one complete window, plus another hop_length's
# worth of points for each additional window.
output_length = window_length + (num_frames - 1) * hop_length
output = np.zeros(output_length)
for i in range(num_frames):
output[i * hop_length:i * hop_length + window_length] += stft[i,]
return output
def _compare(self, signal, frame_length, frame_step, fft_length, tol):
actual_stft = spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
signal_ph = array_ops.placeholder_with_default(signal, shape=signal.shape)
actual_stft_from_ph = spectral_ops.stft(
signal_ph, frame_length, frame_step, fft_length, pad_end=False)
actual_inverse_stft = spectral_ops.inverse_stft(
actual_stft, frame_length, frame_step, fft_length)
actual_stft, actual_stft_from_ph, actual_inverse_stft = self.evaluate(
[actual_stft, actual_stft_from_ph, actual_inverse_stft])
actual_stft_ph = array_ops.placeholder_with_default(
actual_stft, shape=actual_stft.shape)
actual_inverse_stft_from_ph = self.evaluate(
spectral_ops.inverse_stft(
actual_stft_ph, frame_length, frame_step, fft_length))
# Confirm that there is no difference in output when shape/rank is fully
# unknown or known.
self.assertAllClose(actual_stft, actual_stft_from_ph)
self.assertAllClose(actual_inverse_stft, actual_inverse_stft_from_ph)
expected_stft = SpectralOpsTest._np_stft(
signal, fft_length, frame_step, frame_length)
self.assertAllClose(expected_stft, actual_stft, rtol=tol, atol=tol)
expected_inverse_stft = SpectralOpsTest._np_inverse_stft(
expected_stft, fft_length, frame_step, frame_length)
self.assertAllClose(
expected_inverse_stft, actual_inverse_stft, rtol=tol, atol=tol)
def test_shapes(self):
signal = np.zeros((512,)).astype(np.float32)
# If fft_length is not provided, the smallest enclosing power of 2 of
# frame_length (8) is used.
stft = spectral_ops.stft(signal, frame_length=7, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
fft_length=16, pad_end=True)
self.assertAllEqual([64, 9], stft.shape.as_list())
self.assertAllEqual([64, 9], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=16, frame_step=8,
fft_length=8, pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = np.zeros((32, 9)).astype(np.complex64)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length=8,
fft_length=16, frame_step=8)
expected_length = (stft.shape[0] - 1) * 8 + 8
self.assertAllEqual([256], inverse_stft.shape.as_list())
self.assertAllEqual([expected_length], self.evaluate(inverse_stft).shape)
@parameterized.parameters(
(512, 64, 32, 64, np.float32, 1e-4),
(512, 64, 32, 64, np.float64, 1e-8),
(512, 64, 64, 64, np.float32, 1e-4),
(512, 64, 64, 64, np.float64, 1e-8),
(512, 72, 64, 64, np.float32, 1e-4),
(512, 72, 64, 64, np.float64, 1e-8),
(512, 64, 25, 64, np.float32, 1e-4),
(512, 64, 25, 64, np.float64, 1e-8),
(512, 25, 15, 36, np.float32, 1e-4),
(512, 25, 15, 36, np.float64, 1e-8),
(123, 23, 5, 42, np.float32, 1e-4),
(123, 23, 5, 42, np.float64, 1e-8))
def test_stft_and_inverse_stft(self, signal_length, frame_length,
frame_step, fft_length, np_rtype, tol):
"""Test that spectral_ops.stft/inverse_stft match a NumPy implementation."""
signal = np.random.random(signal_length).astype(np_rtype)
self._compare(signal, frame_length, frame_step, fft_length, tol)
@parameterized.parameters(
# 87.5% overlap.
(4096, 256, 32, 256, np.float32, 1e-5, 1e-6),
(4096, 256, 32, 256, np.float64, 1e-8, 1e-8),
# 75% overlap.
(4096, 256, 64, 256, np.float32, 1e-5, 1e-6),
(4096, 256, 64, 256, np.float64, 1e-8, 1e-8),
# Odd frame hop.
(4096, 128, 25, 128, np.float32, 1e-3, 1e-6),
(4096, 128, 25, 128, np.float64, 5e-4, 1e-8),
# Odd frame length.
(4096, 127, 32, 128, np.float32, 1e-3, 1e-6),
(4096, 127, 32, 128, np.float64, 1e-3, 1e-8),
# 50% overlap.
(4096, 128, 64, 128, np.float32, 0.4, 1e-6),
(4096, 128, 64, 128, np.float64, 0.4, 1e-8))
def test_stft_round_trip(self, signal_length, frame_length, frame_step,
fft_length, np_rtype, threshold,
corrected_threshold):
# Generate a random white Gaussian signal.
signal = np.random.normal(size=signal_length).astype(np_rtype)
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,
pad_end=False)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,
fft_length)
inverse_stft_corrected = spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length,
window_fn=spectral_ops.inverse_stft_window_fn(frame_step))
inverse_stft, inverse_stft_corrected = self.evaluate(
[inverse_stft, inverse_stft_corrected])
# Truncate signal to the size of inverse stft.
signal = signal[:inverse_stft.shape[0]]
# Ignore the frame_length samples at either edge.
signal = signal[frame_length:-frame_length]
inverse_stft = inverse_stft[frame_length:-frame_length]
inverse_stft_corrected = inverse_stft_corrected[
frame_length:-frame_length]
# Check that the inverse and original signal are close up to a scale
# factor.
inverse_stft_scaled = inverse_stft / np.mean(np.abs(inverse_stft))
signal_scaled = signal / np.mean(np.abs(signal))
self.assertLess(np.std(inverse_stft_scaled - signal_scaled), threshold)
# Check that the inverse with correction and original signal are close.
self.assertLess(np.std(inverse_stft_corrected - signal),
corrected_threshold)
@parameterized.parameters(
(256, 32),
(256, 64),
(128, 25),
(127, 32),
(128, 64))
def test_inverse_stft_window_fn(self, frame_length, frame_step):
"""Test that inverse_stft_window_fn has unit gain at each window phase."""
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
hann_window, inverse_window = self.evaluate([hann_window, inverse_window])
# Expect unit gain at each phase of the window.
product_window = hann_window * inverse_window
for i in range(frame_step):
self.assertAllClose(1.0, np.sum(product_window[i::frame_step]))
@parameterized.parameters((256, 64), (128, 32))
def test_inverse_stft_window_fn_special_case(self, frame_length, frame_step):
"""Test inverse_stft_window_fn in special overlap = 3/4 case."""
# Cases in which frame_length is an integer multiple of 4 * frame_step are
# special because they allow exact reproduction of the waveform with a
# squared Hann window (Hann window in both forward and reverse transforms).
# In the case where frame_length = 4 * frame_step, that combination
# produces a constant gain of 1.5, and so the corrected window will be the
# Hann window / 1.5.
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
self.assertAllClose(hann_window, inverse_window * 1.5)
@staticmethod
def _compute_stft_gradient(signal, frame_length=32, frame_step=16,
fft_length=32):
"""Computes the gradient of the STFT with respect to `signal`."""
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length)
magnitude_stft = math_ops.abs(stft)
loss = math_ops.reduce_sum(magnitude_stft)
return gradients_impl.gradients([loss], [signal])[0]
def test_gradients(self):
"""Test that spectral_ops.stft has a working gradient."""
# TODO(rjryan): Update gradient tests for Eager.
if context.executing_eagerly():
return
with self.session() as sess:
signal_length = 512
# An all-zero signal has all zero gradients with respect to the sum of the
# magnitude STFT.
empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)
empty_signal_gradient = sess.run(
self._compute_stft_gradient(empty_signal))
self.assertTrue((empty_signal_gradient == 0.0).all())
# A sinusoid will have non-zero components of its gradient with respect to
# the sum of the magnitude STFT.
sinusoid = math_ops.sin(
2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))
sinusoid_gradient = self.evaluate(self._compute_stft_gradient(sinusoid))
self.assertFalse((sinusoid_gradient == 0.0).all())
@parameterized.parameters(
(64, 16, 8, 16, np.float32, 2e-3, 5e-4),
(64, 16, 8, 16, np.float64, 1e-8, 1e-8),
(64, 16, 16, 16, np.float32, 2e-3, 5e-4),
(64, 16, 16, 16, np.float64, 1e-8, 1e-8),
(64, 16, 7, 16, np.float32, 2e-3, 5e-4),
(64, 16, 7, 16, np.float64, 1e-8, 1e-8),
(64, 7, 4, 9, np.float32, 2e-3, 5e-4),
(64, 7, 4, 9, np.float64, 1e-8, 1e-8),
(29, 5, 1, 10, np.float32, 2e-3, 5e-4),
(29, 5, 1, 10, np.float64, 1e-8, 1e-8))
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="On ROCm, this fails with mismatches at some locations "
"(possibly due to peculiarities of rocFFT - investigate)")
def test_gradients_numerical(self, signal_length, frame_length, frame_step,
fft_length, np_rtype, forward_tol, backward_tol):
# TODO(rjryan): Investigate why STFT gradient error is so high.
signal = np.random.rand(signal_length).astype(np_rtype) * 2 - 1
def forward(signal):
return spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
((f_jacob_t,), (f_jacob_n,)) = gradient_checker_v2.compute_gradient(
forward, [signal])
self.assertAllClose(f_jacob_t, f_jacob_n,
rtol=forward_tol, atol=forward_tol)
def backward(stft):
return spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length)
stft = forward(signal)
((b_jacob_t,), (b_jacob_n,)) = gradient_checker_v2.compute_gradient(
backward, [stft])
self.assertAllClose(b_jacob_t, b_jacob_n,
rtol=backward_tol, atol=backward_tol)
@parameterized.parameters(
itertools.product(
(4000,),
(256,),
(np.float32, np.float64),
("ortho", None),
("vorbis", "kaiser_bessel_derived", None),
(False, True)))
def test_mdct_round_trip(self, signal_length, frame_length, np_rtype,
norm, window_type, pad_end):
if np_rtype == np.float32:
tol = 1e-5
else:
if window_type == "kaiser_bessel_derived":
tol = 1e-6
else:
tol = 1e-8
# Generate a random white Gaussian signal.
signal = np.random.normal(size=signal_length).astype(np_rtype)
if window_type == "vorbis":
window_fn = window_ops.vorbis_window
elif window_type == "kaiser_bessel_derived":
window_fn = window_ops.kaiser_bessel_derived_window
elif window_type is None:
window_fn = None
mdct = spectral_ops.mdct(signal, frame_length, norm=norm,
window_fn=window_fn, pad_end=pad_end)
inverse_mdct = spectral_ops.inverse_mdct(mdct, norm=norm,
window_fn=window_fn)
inverse_mdct = self.evaluate(inverse_mdct)
# Truncate signal and inverse_mdct to their minimum length.
min_length = np.minimum(signal.shape[0], inverse_mdct.shape[0])
# Ignore the half_len samples at either edge.
half_len = frame_length // 2
signal = signal[half_len:min_length-half_len]
inverse_mdct = inverse_mdct[half_len:min_length-half_len]
# Check that the inverse and original signal are close.
self.assertAllClose(inverse_mdct, signal, atol=tol, rtol=tol)
if __name__ == "__main__":
test.main()
|
tensorflow/tensorflow
|
tensorflow/python/kernel_tests/signal/spectral_ops_test.py
|
Python
|
apache-2.0
| 15,849
|
[
"Gaussian"
] |
4b8680cdd21f6dde05a8dac0c24dc10530ececb21f34313b023fcdba3a47c9da
|
#!/home/user/anaconda2/bin/python
## Modified from Daniel Buscombe's starter script at https://github.com/dbuscombe-usgs/pyhum
## Supply .DAT file using -i and SON folder using -s
## OR cd into a directory with both .DAT/SON and process all using 'humall' (~/bin/humall)
import sys, getopt
from Tkinter import Tk
from tkFileDialog import askopenfilename, askdirectory
import PyHum
import os
if __name__ == '__main__':
argv = sys.argv[1:]
humfile = ''; sonpath = ''
cs = 26983 # default to Maine East State Plane meters if no argument given
# parse inputs to variables
try:
opts, args = getopt.getopt(argv,"hi:s:e:")
except getopt.GetoptError:
print 'error'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'help'
sys.exit()
elif opt in ("-i"):
humfile = arg
elif opt in ("-s"):
sonpath = arg
elif opt in ("-e"):
cs = arg
# prompt user to supply file if no input file given
if not humfile:
print 'An input file is required!!!!!!'
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
humfile = askopenfilename(filetypes=[("DAT files","*.DAT")])
# prompt user to supply directory if no input sonpath is given
if not sonpath:
print 'A *.SON directory is required!!!!!!'
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
sonpath = askdirectory()
# print given arguments to screen and convert data type where necessary
if humfile:
print 'Input file is %s' % (humfile)
if sonpath:
print 'Son files are in %s' % (sonpath)
doplot = 1 #yes
# reading specific settings
cs2cs_args = "epsg:"+str(cs)
# for mapping
res = 99 # grid resolution in metres
# if res==99, the program will automatically calc res from the spatial res of the scans
mode = 1 # gridding mode (simple nearest neighbour)
#mode = 2 # gridding mode (inverse distance weighted nearest neighbour)
#mode = 3 # gridding mode (gaussian weighted nearest neighbour)
#dowrite = 1 #writing of point cloud data to file
use_uncorrected = 0
nn = 64 # number of nearest neighbours for gridding (used if mode > 1)
#influence = 1 # Radius of influence used in gridding. Cut of distance in meters.
numstdevs = 5 # Threshold number of standard deviations in sidescan intensity per grid cell up to which to accept
## grid and map the scans
PyHum.map(humfile, sonpath, cs2cs_args, res, mode, nn, numstdevs, use_uncorrected) #dowrite,
|
iannesbitt/sidescantools
|
hummap.py
|
Python
|
mit
| 2,643
|
[
"Gaussian"
] |
850b344d839744e449e82571cf5dc12e8ee6641984b8d89c4beec67667b6c22d
|
import numpy as np
import scipy.optimize as opt
from wavelets import Morlet
import pylab as plb
class Wavenet():
def __init__(self, wavelet, ncount, x, y):
"""Create wavenet.
Args:
cn: int: Number of neurons
Returns:
net: Wavenet
Attributes:
param: dict: Parameters of net
ncount: int: Count of neuron
"""
self.wt = wavelet
self.param = {}
self.param['p'] = np.ones(ncount)*5
self.deltax = np.max(x)-np.min(x)
countx = (np.max(x)-np.min(x))
deltay = np.max(y)-np.min(y)
self.param['b'] = np.ones(ncount)*(np.max(x)+np.min(x))*0.5
#self.param['b'] = np.random.random(ncount)
self.param['c'] = np.zeros(1) + np.mean(y)
self.param['d'] = np.zeros(1)
#self.param['c']= np.random.random(1)+y[0]
self.ncount = ncount
self.tau = np.vectorize(self._tau, cache=True)
self.h = np.vectorize(self.wt.wavelet, cache=True)
self.step = np.vectorize(self._step, cache=True)
self.xcount = x.shape[-1]
self.param['a'] = np.ones(ncount)*(np.max(x)-np.min(x))*0.2
self.param['w'] = np.zeros(ncount)
def __call__(self, x, *args):
self.param = self.unpack(x)
return self.energy(self.input, self.target)
def derivative(self, x, *args):
self.param = self.unpack(x)
gr = self.antigradient(self.input, self.target)
#import pdb;pdb.set_trace()
return self.pack(gr)
def sim(self, t):
"""Simulate network
Args:
t: array: Time series
Returns:
array: Net output
"""
#t = (t-self.d)*self.k
return self.step(t)+self.param['c']
def _step(self, t):
"""
Compute result for one time moment
Args:
double: t: Time moment
Return:
double: x Net resulst
"""
tau =(t-self.param['b'])/self.param['a']
return np.sum(self.wt.wavelet(self.param['p'], tau)*self.param['w'])+t*self.param['d']
def train(self, input, target, maxiter):
x0 = self.pack(self.param)
self.input=input
self.target=target
res1 = opt.fmin_bfgs(self, x0, fprime=self.derivative, maxiter=maxiter)
return res1
def pack(self, param):
x = np.array([])
for k in self.param.keys():
x = np.append(x, param[k])
return np.array(x)
def unpack(self, aparam):
inx = 0
p = {}
for k in self.param.keys():
l = self.param[k].shape[-1]
p[k] = aparam[inx:inx+l]
inx += l
return p
def _train(self, input, target, error, extend=False, epoch=None):
"""Train network
Args:
input: array: Input signal
target: array: Target output
error: double: Sumsqrt error
"""
errlist = {}
err = []
a = []
b = []
w = []
p = []
c = []
j = []
count = 0
while True:
e = self.energy(input, target)
#delta = np.abs(e-e0)
print (count+1,':', e)
err.append(e)
b.append(list(self.param['b']))
w.append(list(self.param['w']))
a.append(list(self.param['a']))
p.append(list(self.param['p']))
c.append(self.param['c'])
j.append(self.rinx)
if e <= error or epoch is not None and count >= epoch-1:
errlist['e'] = np.array(err)
errlist['a'] = np.array(a)
errlist['b'] = np.array(b)
errlist['w'] = np.array(w)
errlist['p'] = np.array(p)
errlist['c'] = np.array(c)
errlist['j'] = np.array(j)
#import pdb; pdb.set_trace()
return errlist
count += 1
#e0 = self.energy(input, target)
#da, db, dw, dp, dc = self.antigradient(input, target, extend=extend)
delta = self.antigradient(input, target, extend=extend)
self.try_dzeta(delta, input, target, extend=extend)
def error(self, input, target):
"""Error function
Args:
array: input: Input series
array: target: Target resposnse
Return:
array: Error
"""
return target - self.sim(input)
def energy(self, input, target):
"""Energy function
Args:
array: input: Input series
array: target: Target resposnse
Return:
array: Energy of error
"""
return np.sum(self.error(input, target)**2)/2
def _tau(self, t, k=None):
"""
Args:
t: array: time Point
k: int: index (opitonal, use when need compute result by neurons)
Return:
array: Scaled and shifted time point
"""
if k is None:
#import pdb; pdb.set_trace()
return (t-self.param['b'])/self.param['a']
else:
return (t-self.param['b'][k])/self.param['a'][k]
def antigradient(self, input, target):
"""
Return:
da:antigradient by scales
db: double: Antigradient by shiftes
dw: double: Antigradient by weightes
"""
e = self.error(input, target)
da = np.zeros(self.ncount)
db = np.zeros(self.ncount)
dw = np.zeros(self.ncount)
dp = np.zeros(self.ncount)
a = self.param['a']
p = self.param['p']
w = self.param['w']
for k in range(self.ncount):
tau = (input-self.param['b'][k])/self.param['a'][k]
h_tau = self.h(p[k], tau)
dw[k] = -np.sum(e*h_tau)
d = e*w[k]*self.wt._dh_db(p[k], tau, h_tau, a[k])
db[k] = -np.sum(d)
da[k] = -np.sum(d*tau)
dp[k] = -np.sum(e*w[k]*self.wt._dh_dp(p[k], tau, h_tau, a[k]))
dc = -np.sum(e)
dd = -np.sum(e*input)
return {'a': da, 'b': db, 'w': dw, 'p': dp, 'c': dc, 'd':dd}
def test_func(x):
if x < -2:
return -2.186*x-12.846-2
elif -2 <= x < 0:
return 4.246*x-2
elif 0 <= x:
return 10*np.exp(-.05*x - 0.5)*np.sin((0.3*x + 0.7)*x)-2
if __name__ == "__main__":
t = np.linspace(-10, 10, num=100)
target = np.vectorize(test_func)(t)
wn = Wavenet(Morlet, 30, t, target)
wn.train(t, target, maxiter=300)
plb.plot(t, target, label='Сигнал')
plb.plot(t, wn.sim(t), label='Аппроксимация')
plb.show()
|
abalckin/wavenet
|
wavenet.py
|
Python
|
lgpl-3.0
| 6,671
|
[
"NEURON"
] |
52f06037266587c98d6ec562bc06dcf24ddfb0548c3eacdc3bbfd02dda665683
|
import unittest
import pylab as pl
import matplotlib as mpl
import itertools
import sys
import math
import timeit
import copy
import time
import struct
import scipy.stats.mstats as stats
import ConfigParser
import os.path
import getopt
import h5py
from gmm_specializer.gmm import *
MINVALUEFORMINUSLOG = -1000.0
class Diarizer(object):
def __init__(self, f_file_name, sp_file_name):
f = open(f_file_name, "rb")
print "...Reading in HTK feature file..."
#=== Read Feature File ==
try:
nSamples = struct.unpack('>i', f.read(4))[0]
sampPeriod = struct.unpack('>i', f.read(4))[0]
sampSize = struct.unpack('>h', f.read(2))[0]
sampKind = struct.unpack('>h', f.read(2))[0]
print "INFO: total number of frames read: ", nSamples
self.total_num_frames = nSamples
D = sampSize/4 #dimension of feature vector
l = []
count = 0
while count < (nSamples * D):
bFloat = f.read(4)
fl = struct.unpack('>f', bFloat)[0]
l.append(fl)
count = count + 1
finally:
f.close()
#=== Prune to Speech Only ==
print "...Reading in speech/nonspeech file..."
pruned_list = []
num_speech_frames = nSamples
if sp_file_name:
sp = open(sp_file_name, "r")
l_start = []
l_end = []
num_speech_frames = 0
for line in sp:
s = line.split(' ')
st = math.floor(100 * float(s[2]) + 0.5)
en = math.floor(100 * float(s[3].replace('\n','')) + 0.5)
st1 = int(st)
en1 = int(en)
l_start.append(st1*19)
l_end.append(en1*19)
num_speech_frames = num_speech_frames + (en1 - st1 + 1)
print "INFO: total number of speech frames: ", num_speech_frames
total = 0
for start in l_start:
end = l_end[l_start.index(start)]
total += (end/19 - start/19 + 1)
x = 0
index = start
while x < (end-start+19):
pruned_list.append(l[index])
index += 1
x += 1
else: #no speech file, take in all features
pruned_list = l
floatArray = np.array(pruned_list, dtype = np.float32)
self.X = floatArray.reshape(num_speech_frames, D)
self.N = self.X.shape[0]
self.D = self.X.shape[1]
def write_to_RTTM(self, rttm_file_name, sp_file_name,\
meeting_name, most_likely, num_gmms,\
seg_length):
print "...Writing out RTTM file..."
#do majority voting in chunks of 250
duration = seg_length
chunk = 0
end_chunk = duration
max_gmm_list = []
smoothed_most_likely = np.array([], dtype=np.float32)
while end_chunk < len(most_likely):
chunk_arr = most_likely[range(chunk, end_chunk)]
max_gmm = stats.mode(chunk_arr)[0][0]
max_gmm_list.append(max_gmm)
smoothed_most_likely = np.append(smoothed_most_likely, max_gmm*np.ones(250))
chunk += duration
end_chunk += duration
end_chunk -= duration
if end_chunk < len(most_likely):
chunk_arr = most_likely[range(end_chunk, len(most_likely))]
max_gmm = stats.mode(chunk_arr)[0][0]
max_gmm_list.append(max_gmm)
smoothed_most_likely = np.append(smoothed_most_likely,\
max_gmm*np.ones(len(most_likely)-end_chunk))
most_likely = smoothed_most_likely
out_file = open(rttm_file_name, 'w')
with_non_speech = -1*np.ones(self.total_num_frames)
if sp_file_name:
speech_seg = np.loadtxt(sp_file_name, delimiter=' ',usecols=(2,3))
speech_seg_i = np.round(speech_seg*100).astype('int32')
sizes = np.diff(speech_seg_i)
sizes = sizes.reshape(sizes.size)
offsets = np.cumsum(sizes)
offsets = np.hstack((0, offsets[0:-1]))
offsets += np.array(range(len(offsets)))
#populate the array with speech clusters
speech_index = 0
counter = 0
for pair in speech_seg_i:
st = pair[0]
en = pair[1]
speech_index = offsets[counter]
counter+=1
idx = 0
for x in range(st+1, en+1):
with_non_speech[x] = most_likely[speech_index+idx]
idx += 1
else:
with_non_speech = most_likely
cnum = with_non_speech[0]
cst = 0
cen = 0
for i in range(1,self.total_num_frames):
if with_non_speech[i] != cnum:
if (cnum >= 0):
start_secs = ((cst)*0.01)
dur_secs = (cen - cst + 2)*0.01
out_file.write("SPEAKER " + meeting_name + " 1 " +\
str(start_secs) + " "+ str(dur_secs) +\
" <NA> <NA> " + "speaker_" + str(cnum) + " <NA>\n")
cst = i
cen = i
cnum = with_non_speech[i]
else:
cen+=1
if cst < cen:
cnum = with_non_speech[self.total_num_frames-1]
if(cnum >= 0):
start_secs = ((cst+1)*0.01)
dur_secs = (cen - cst + 1)*0.01
out_file.write("SPEAKER " + meeting_name + " 1 " +\
str(start_secs) + " "+ str(dur_secs) +\
" <NA> <NA> " + "speaker_" + str(cnum) + " <NA>\n")
print "DONE writing RTTM file"
def write_to_GMM(self, gmmfile):
gmm_f = open(gmmfile, 'w')
gmm_f.write("Number of clusters: " + str(len(self.gmm_list)) + "\n")
#print parameters
cluster_count = 0
for gmm in self.gmm_list:
gmm_f.write("Cluster " + str(cluster_count) + "\n")
means = gmm.components.means
covars = gmm.components.covars
weights = gmm.components.weights
gmm_f.write("Number of Gaussians: "+ str(gmm.M) + "\n")
gmm_count = 0
for g in range(0, gmm.M):
g_means = means[gmm_count]
g_covar_full = covars[gmm_count]
g_covar = np.diag(g_covar_full)
g_weight = weights[gmm_count]
gmm_f.write("Gaussian: " + str(gmm_count) + "\n")
gmm_f.write("Weight: " + str(g_weight) + "\n")
for f in range(0, gmm.D):
gmm_f.write("Feature " + str(f) + " Mean " + str(g_means[f]) +\
" Var " + str(g_covar[f]) + "\n")
gmm_count+=1
cluster_count+=1
print "DONE writing GMM file"
def new_gmm(self, M, cvtype):
self.M = M
self.gmm = GMM(self.M, self.D, cvtype=cvtype)
def new_gmm_list(self, M, K, cvtype):
self.M = M
self.init_num_clusters = K
self.gmm_list = [GMM(self.M, self.D, cvtype=cvtype) for i in range(K)]
def segment_majority_vote(self, interval_size, em_iters):
num_clusters = len(self.gmm_list)
# Resegment data based on likelihood scoring
likelihoods = self.gmm_list[0].score(self.X)
for g in self.gmm_list[1:]:
likelihoods = np.column_stack((likelihoods, g.score(self.X)))
if num_clusters == 1:
most_likely = np.zeros(len(self.X))
else:
most_likely = likelihoods.argmax(axis=1)
# Across 2.5 secs of observations, vote on which cluster they should be associated with
iter_training = {}
for i in range(interval_size, self.N, interval_size):
arr = np.array(most_likely[(range(i-interval_size, i))])
max_gmm = int(stats.mode(arr)[0][0])
iter_training.setdefault((self.gmm_list[max_gmm],max_gmm),[]).append(self.X[i-interval_size:i,:])
arr = np.array(most_likely[(range((self.N/interval_size)*interval_size, self.N))])
max_gmm = int(stats.mode(arr)[0][0])
iter_training.setdefault((self.gmm_list[max_gmm], max_gmm),[]).\
append(self.X[(self.N/interval_size) *interval_size:self.N,:])
iter_bic_dict = {}
iter_bic_list = []
# for each gmm, append all the segments and retrain
for gp, data_list in iter_training.iteritems():
g = gp[0]
p = gp[1]
cluster_data = data_list[0]
for d in data_list[1:]:
cluster_data = np.concatenate((cluster_data, d))
g.train(cluster_data, max_em_iters=em_iters)
iter_bic_list.append((g,cluster_data))
iter_bic_dict[p] = cluster_data
return iter_bic_dict, iter_bic_list, most_likely
def segment_majority_vote_indices(self, interval_size, em_iters):
num_clusters = len(self.gmm_list)
# Resegment data based on likelihood scoring
likelihoods = self.gmm_list[0].score(self.X)
for g in self.gmm_list[1:]:
likelihoods = np.column_stack((likelihoods, g.score(self.X)))
if num_clusters == 1:
most_likely = np.zeros(len(self.X))
else:
most_likely = likelihoods.argmax(axis=1)
# Across 2.5 secs of observations, vote on which cluster they should be associated with
iter_training = {}
for i in range(interval_size, self.N, interval_size):
arr = np.array(most_likely[(range(i-interval_size, i))])
max_gmm = int(stats.mode(arr)[0][0])
iter_training.setdefault((self.gmm_list[max_gmm],max_gmm),[]).append((i-interval_size,i))
arr = np.array(most_likely[(range((self.N/interval_size)*interval_size, self.N))])
max_gmm = int(stats.mode(arr)[0][0])
iter_training.setdefault((self.gmm_list[max_gmm], max_gmm),[]).\
append((self.N/interval_size*interval_size, self.N))
iter_bic_dict = {}
iter_bic_list = []
for gp, e_tuple_list in iter_training.iteritems():
g = gp[0]
p = gp[1]
cluster_indices = np.array(range(e_tuple_list[0][0], e_tuple_list[0][1],1), dtype=np.int32)
for d in e_tuple_list[1:]:
cluster_indices = np.concatenate((cluster_indices,\
np.array(range(d[0],d[1],1),\
dtype=np.int32)))
g.train_on_subset(self.X, cluster_indices, max_em_iters=em_iters)
iter_bic_list.append((g,cluster_indices))
iter_bic_dict[p] = cluster_indices
return iter_bic_dict, iter_bic_list, most_likely
def cluster(self, em_iters, KL_ntop, NUM_SEG_LOOPS_INIT, NUM_SEG_LOOPS, seg_length):
print " ====================== CLUSTERING ====================== "
main_start = time.time()
# ----------- Uniform Initialization -----------
# Get the events, divide them into an initial k clusters and train each GMM on a cluster
per_cluster = self.N/self.init_num_clusters
init_training = zip(self.gmm_list,np.vsplit(self.X, range(per_cluster, self.N, per_cluster)))
for g, x in init_training:
g.train(x, max_em_iters=em_iters)
# ----------- First majority vote segmentation loop ---------
for segment_iter in range(0,NUM_SEG_LOOPS_INIT):
iter_bic_dict, iter_bic_list, most_likely = self.segment_majority_vote(seg_length, em_iters)
# ----------- Main Clustering Loop using BIC ------------
# Perform hierarchical agglomeration based on BIC scores
best_BIC_score = 1.0
total_events = 0
total_loops = 0
while (best_BIC_score > 0 and len(self.gmm_list) > 1):
total_loops+=1
for segment_iter in range(0,NUM_SEG_LOOPS):
iter_bic_dict, iter_bic_list, most_likely = self.segment_majority_vote(seg_length, em_iters)
# Score all pairs of GMMs using BIC
best_merged_gmm = None
best_BIC_score = 0.0
merged_tuple = None
merged_tuple_indices = None
# ------- KL distance to compute best pairs to merge -------
if KL_ntop > 0:
top_K_gmm_pairs = self.gmm_list[0].find_top_KL_pairs(KL_ntop, self.gmm_list)
for pair in top_K_gmm_pairs:
score = 0.0
gmm1idx = pair[0]
gmm2idx = pair[1]
g1 = self.gmm_list[gmm1idx]
g2 = self.gmm_list[gmm2idx]
if gmm1idx in iter_bic_dict and gmm2idx in iter_bic_dict:
d1 = iter_bic_dict[gmm1idx]
d2 = iter_bic_dict[gmm2idx]
data = np.concatenate((d1,d2))
elif gmm1idx in iter_bic_dict:
data = iter_bic_dict[gmm1idx]
elif gmm2idx in iter_bic_dict:
data = iter_bic_dict[gmm2idx]
else:
continue
new_gmm, score = compute_distance_BIC(g1, g2, data, em_iters)
#print "Comparing BIC %d with %d: %f" % (gmm1idx, gmm2idx, score)
if score > best_BIC_score:
best_merged_gmm = new_gmm
merged_tuple = (g1, g2)
merged_tuple_indices = (gmm1idx, gmm2idx)
best_BIC_score = score
# ------- All-to-all comparison of gmms to merge -------
else:
l = len(iter_bic_list)
for gmm1idx in range(l):
for gmm2idx in range(gmm1idx+1, l):
score = 0.0
g1, d1 = iter_bic_list[gmm1idx]
g2, d2 = iter_bic_list[gmm2idx]
data = np.concatenate((d1,d2))
new_gmm, score = compute_distance_BIC(g1, g2, data, em_iters)
#print "Comparing BIC %d with %d: %f" % (gmm1idx, gmm2idx, score)
if score > best_BIC_score:
best_merged_gmm = new_gmm
merged_tuple = (g1, g2)
merged_tuple_indices = (gmm1idx, gmm2idx)
best_BIC_score = score
# Merge the winning candidate pair if its deriable to do so
if best_BIC_score > 0.0:
gmms_with_events = []
for gp in iter_bic_list:
gmms_with_events.append(gp[0])
#cleanup the gmm_list - remove empty gmms
for g in self.gmm_list:
if g not in gmms_with_events and g != merged_tuple[0] and g!= merged_tuple[1]:
#remove
self.gmm_list.remove(g)
self.gmm_list.remove(merged_tuple[0])
self.gmm_list.remove(merged_tuple[1])
self.gmm_list.append(best_merged_gmm)
print " size of each cluster:", [ g.M for g in self.gmm_list]
print "=== Total clustering time: ", time.time()-main_start
print "=== Final size of each cluster:", [ g.M for g in self.gmm_list]
return most_likely
def cluster_use_subset(self, em_iters, KL_ntop, NUM_SEG_LOOPS_INIT, NUM_SEG_LOOPS, seg_length):
print " ====================== CLUSTERING ON SUBSET ====================== "
main_start = time.time()
# ----------- Uniform Initialization -----------
# Get the events, divide them into an initial k clusters and train each GMM on a cluster
per_cluster = self.N/self.init_num_clusters
init_training = zip(self.gmm_list,np.vsplit(self.X, range(per_cluster, self.N, per_cluster)))
for g, x in init_training:
g.train(x, max_em_iters=em_iters)
# ----------- First majority vote segmentation loop ---------
for segment_iter in range(0,NUM_SEG_LOOPS_INIT):
iter_bic_dict, iter_bic_list, most_likely = self.segment_majority_vote_indices(seg_length, em_iters)
# ----------- Main Clustering Loop using BIC ------------
# Perform hierarchical agglomeration based on BIC scores
best_BIC_score = 1.0
total_events = 0
total_loops = 0
while (best_BIC_score > 0 and len(self.gmm_list) > 1):
total_loops+=1
for segment_iter in range(0,NUM_SEG_LOOPS):
iter_bic_dict, iter_bic_list, most_likely = self.segment_majority_vote_indices(seg_length, em_iters)
# Score all pairs of GMMs using BIC
best_merged_gmm = None
best_BIC_score = 0.0
merged_tuple = None
merged_tuple_indices = None
# ------- KL distance to compute best pairs to merge -------
if KL_ntop > 0:
top_K_gmm_pairs = self.gmm_list[0].find_top_KL_pairs(KL_ntop, self.gmm_list)
for pair in top_K_gmm_pairs:
score = 0.0
gmm1idx = pair[0]
gmm2idx = pair[1]
g1 = self.gmm_list[gmm1idx]
g2 = self.gmm_list[gmm2idx]
if gmm1idx in iter_bic_dict and gmm2idx in iter_bic_dict:
i1 = iter_bic_dict[gmm1idx]
i2 = iter_bic_dict[gmm2idx]
indices = np.concatenate((i1,i2))
elif gmm1idx in iter_bic_dict:
indices = iter_bic_dict[gmm1idx]
elif gmm2idx in iter_bic_dict:
indices = iter_bic_dict[gmm2idx]
else:
continue
new_gmm, score = compute_distance_BIC_idx(g1, g2, self.X, indices)
#print "Comparing BIC %d with %d: %f" % (gmm1idx, gmm2idx, score)
if score > best_BIC_score:
best_merged_gmm = new_gmm
merged_tuple = (g1, g2)
merged_tuple_indices = (gmm1idx, gmm2idx)
best_BIC_score = score
# ------- All-to-all comparison of gmms to merge -------
else:
l = len(iter_bic_list)
for gmm1idx in range(l):
for gmm2idx in range(gmm1idx+1, l):
score = 0.0
g1, i1 = iter_bic_list[gmm1idx]
g2, i2 = iter_bic_list[gmm2idx]
indices = np.concatenate((i1,i2))
new_gmm, score = compute_distance_BIC_idx(g1, g2, self.X, indices)
#print "Comparing BIC %d with %d: %f" % (gmm1idx, gmm2idx, score)
if score > best_BIC_score:
best_merged_gmm = new_gmm
merged_tuple = (g1, g2)
merged_tuple_indices = (gmm1idx, gmm2idx)
best_BIC_score = score
# Merge the winning candidate pair if its deriable to do so
if best_BIC_score > 0.0:
gmms_with_events = []
for gp in iter_bic_list:
gmms_with_events.append(gp[0])
#cleanup the gmm_list - remove empty gmms
for g in self.gmm_list:
if g not in gmms_with_events and g != merged_tuple[0] and g!= merged_tuple[1]:
#remove
self.gmm_list.remove(g)
self.gmm_list.remove(merged_tuple[0])
self.gmm_list.remove(merged_tuple[1])
self.gmm_list.append(best_merged_gmm)
print " size of each cluster:", [ g.M for g in self.gmm_list]
print "=== Total clustering time: ", time.time()-main_start
print "=== Final size of each cluster:", [ g.M for g in self.gmm_list]
return most_likely
def print_usage():
print """ ---------------------------------------------------------------------
Speaker Diarization in Python with Asp and the GMM Specializer usage:
---------------------------------------------------------------------
Arguments for the diarizer are parsed from a config file.
Default config file is diarizer.cfg, but you can pass your own file with the '-c' option.
Required is the config file header: [Diarizer] and the options are as follows:
--- Required: ---
basename: \t Basename of the file to process
mfcc_feats: \t MFCC input feature file
output_cluster: \t Output clustering file
gmm_output: \t Output GMMs parameters file
M_mfcc: \t Amount of gaussains per model for mfcc
initial_clusters: Number of initial clusters
--- Optional: ---
spnsp_file: \t spnsp file (all features used by default)
KL_ntop: \t Nuber of combinations to evaluate BIC on
\t 0 to deactive KL-divergency (fastmatch-component)
em_iterations: \t Number of iterations for the standard
\t segmentation loop training (3 by default)
num_seg_iters_init: \t Number of majority vote iterations
\t in the initialization phase (2 by default)
num_seg_iters: \t Number of majority vote iterations
\t in the main loop (3 by default)
seg_length: \t Segment length for majority vote in frames
\t (250 frames by default)
For fastest performance, enable KL-divergency (KL_ntop = 3) and set
\t num_seg_iters_init and num_seg_iters to 1
"""
def print_no_config():
print "Please supply a config file with -c 'config_file_name.cfg' "
return
def get_config_params(config):
#read in filenames
try:
meeting_name = config.get('Diarizer', 'basename')
except:
print "basename not specified in config file! exiting..."
sys.exit(2)
try:
f = config.get('Diarizer', 'mfcc_feats')
except:
print "Feature file mfcc_feats not specified in config file! exiting..."
sys.exit(2)
try:
sp = config.get('Diarizer', 'spnsp_file')
except:
print "Speech file spnsp_file not specified, continuing without it..."
sp = False
try:
outfile = config.get('Diarizer', 'output_cluster')
except:
print "output_cluster file not specified in config file! exiting..."
sys.exit(2)
try:
gmmfile = config.get('Diarizer', 'gmm_output')
except:
print "gmm_output file not specified in config file! exiting..."
sys.exit(2)
#read GMM paramters
try:
num_gmms = int(config.get('Diarizer', 'initial_clusters'))
except:
print "initial_clusters not specified in config file! exiting..."
sys.exit(2)
try:
num_comps = int(config.get('Diarizer', 'M_mfcc'))
except:
print "M_mfcc not specified in config file! exiting..."
sys.exit(2)
#read algorithm configuration
try:
kl_ntop = int(config.get('Diarizer', 'KL_ntop'))
except:
kl_ntop = 0
try:
num_seg_iters_init = int(config.get('Diarizer', 'num_seg_iters_init'))
except:
num_seg_iters_init = 2
try:
num_seg_iters = int(config.get('Diarizer', 'num_seg_iters'))
except:
num_seg_iters = 3
try:
num_em_iters = int(config.get('Diarizer', 'em_iterations'))
except:
num_em_iters = 3
try:
seg_length = int(config.get('Diarizer', 'seg_length'))
except:
seg_length = 250
return meeting_name, f, sp, outfile, gmmfile, num_gmms,\
num_comps, num_em_iters, kl_ntop, num_seg_iters_init,\
num_seg_iters, seg_length
if __name__ == '__main__':
device_id = 0
# Process commandline arguments
try:
opts, args = getopt.getopt(sys.argv[1:], "c:", ["help"])
except getopt.GetoptError, err:
print_no_config()
sys.exit(2)
config_file = 'diarizer.cfg'
config_specified = False
for o, a in opts:
if o == '-c':
config_file = a
config_specified = True
if o == '--help':
print_usage()
sys.exit(2)
if not config_specified:
print "No config file specified, using defaul 'diarizer.cfg' file"
else:
print "Using the config file specified: '", config_file, "'"
try:
open(config_file)
except IOError, err:
print "Error! Config file: '", config_file, "' does not exist"
sys.exit(2)
# Parse diarizer config file
config = ConfigParser.ConfigParser()
config.read(config_file)
meeting_name, f, sp, outfile, gmmfile,\
num_gmms, num_comps, num_em_iters, kl_ntop,\
num_seg_iters_init, num_seg_iters, seg_length = get_config_params(config)
# Create tester object
diarizer = Diarizer(f, sp)
# Create the GMM list
diarizer.new_gmm_list(num_comps, num_gmms, 'diag')
# Cluster
#most_likely = diarizer.cluster(num_em_iters, kl_ntop, num_seg_iters_init, num_seg_iters, seg_length)
most_likely = diarizer.cluster_use_subset(num_em_iters, kl_ntop,\
num_seg_iters_init,\
num_seg_iters, seg_length)
# Write out RTTM and GMM parameter files
diarizer.write_to_RTTM(outfile, sp, meeting_name, most_likely, num_gmms, seg_length)
diarizer.write_to_GMM(gmmfile)
|
RedHenLab/Audio
|
GSoC2015/Speaker_Dia_RedHen/pycasp/cluster.py
|
Python
|
gpl-2.0
| 26,523
|
[
"Gaussian"
] |
e5207d0b8f68d3447e15bd528e11d89e8ad132fa758f7ce60d02b61dc92d3fc3
|
import os
import os.path
import sys
import xml.sax.handler
import xml.sax
#---------------------------------------------------------------------------
class ClassExtractor(xml.sax.handler.ContentHandler):
"""This class is essentially a callback. It is used to parse the xml files.
It searches for "class" attribute and collects it in a 'classes' array"""
def __init__(self):
self.classes = []
def startElement(self, name, attributes):
if attributes.has_key("class"):
self.classes.append(attributes["class"])
#---------------------------------------------------------------------------
def parseAndGetUniqueClasses(filesList):
"""Given a list of XML files, parses the files to get a list of
unique vtk class names for all vtk class names"""
parser = xml.sax.make_parser()
handler= ClassExtractor()
parser.setContentHandler(handler)
classNames = []
for item in filesList:
parser.parse(item)
for className in handler.classes:
if len(className) and className != "not-used":
classNames.append(className)
return sortAndRemoveDuplicates(classNames)
#---------------------------------------------------------------------------
def sortAndRemoveDuplicates(stringList):
d = dict() # using dict instead of set
for s in stringList: d[s] = None
uniqueList = d.keys()
uniqueList.sort()
return uniqueList
#---------------------------------------------------------------------------
def writeStrings(stringList, outFile):
"""Writes a list of strings to a file, writing one string per line"""
fd = open(outFile, 'w')
for string in stringList:
fd.write(string)
fd.write("\n")
#---------------------------------------------------------------------------
def printStrings(stringList):
for string in stringList: print string
#---------------------------------------------------------------------------
def main(argv=None):
if not argv: argv = sys.argv
if len(argv) < 3:
print "Usage: python ParseXMLResources.py <Output-File-Name> <List-Of-XML-Files>"
sys.exit(1)
classList = parseAndGetUniqueClasses(argv[2:])
writeStrings(classList, argv[1])
#---------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/Utilities/MinimalBuildTools/ParseXMLResources.py
|
Python
|
gpl-3.0
| 2,361
|
[
"VTK"
] |
8010826ef964edbd067fe7b32901dbfcd1b60f4b9ef86fc9d1a22936d3d1fddf
|
#!/usr/bin/env python
'''
MakeMagneticContours.py:
This script will create a magnetic field map using
equipartition estimates from a spectral index fits
file. Please edit the global constants as needed.
For more information about the magnetic field
equipartition estimates, please consult the standard
literature on the revised classical formula from:
Beck and Krause 2005.
'''
__author__ = "Michael Busch and Trevor Van Engelhoven"
__license__ = "MIT License"
__version__ = "1.0.0"
__maintainer__ = "Michael Busch"
__email__ = "mpbusch@jhu.edu"
__status__ = "Production"
from astropy.io import fits
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy
#### Global Constants ###
# Please change as needed.
spix_infile = 'M63_spix_rmscut.fits' # Change to spectral index map of your galaxy.
intensity_infile = 'M63_LOFAR_convolve2_cut_reorder_regrid.fits' # Change to intensity map for your galaxy.
outfile = 'M63_magneticfield.fits'
get_shape = fits.open(spix_infile) # These commands are needed to correctly extract data from a .fits file
file_shape = get_shape[0].data[0,0,:,:] # Unpacking data from .fits file to determine shape.
rows,columns= file_shape.shape # Determining number of columns and rows.
Bv=np.zeros_like(file_shape) # Array of shape with same dimensions as data.
K0=100 # Proton-electron number density ration, 100 assumed for star forming regions.
Ep=938.28 # Mev or 1.5033*10**-3 erg
inclination=55 # user input, change for each galaxy
l= # l, user input? has to be assumed, 'pathlength'
v=131 # v, frequency in MHz, user input
#alpha defined in loop
#gamma defined in loop
c1=6.26428e18 #erg**-2 s**-1 G**-1
#c2 defined in loop (we can make a function of this if wanted instead)
c3=1.86558e(-23) #erg G**-1 sterad**-1
c4=(np.cos(inclination))**((gamma+1)/2)
def B_calc(spix_infile, intensity_infile):
alpha_list = fits.open(spix_infile)
intensity_list = fits.open(intensity_infile)
for i in range(0,rows):
for j in range(0,columns):
alpha=alpha_list[0].data[0,0,i,j] #Spectral Index per pixel from Spectral Index Map
gamma= 2*alpha + 1 #I guess? Please check this.
#alpha= (gamma-1)/2 #This may be a special case only, synchrotron spectral index
c2= .25*c3*(gamma+(7/3))/(gamma+1)*scipy.gamma((3*gamma-1)/12)*scipy.gamma((3*gamma+7)/12)
Iv=intensity_list[0].data[0,0,i,j] #Intensity per pixel from Intensity Map
Bv[i,j]=(4*np.pi*(2*alpha+1)*(K0+1)*Iv*Ep**(1-2*alpha)*(v/(2*c1))**alpha/((2*alpha-1)*c2*l*c4))**(1/(alpha+3))
return Bv
def plot_data(data):
''' Plots Fits Data for simple inspection '''
plt.imshow(data)
plt.title('Image Data')
plt.colorbar()
plt.show()
def print_info(infile):
hdu_list = fits.open(infile)
hdu_list.info()
return print(hdu_list.info())
if __name__ == "__main__":
image_data = hdu_list[0].data[0,0,:,:] # Check axis from print_info()
print(type(image_data))
print(image_data.shape()) # Check to make sure this makes sense. Can use np.swapaxes(array, indx, indx) if needed
|
mpbusch/LOFAR-ForegroundGalaxyCollaboration
|
MakeMagneticContours.py
|
Python
|
mit
| 3,346
|
[
"Galaxy"
] |
f4489795e4b569b47b358e84507012af0d202e6c21a568c671e67889db8dbd83
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License
"""
Module for reading Lobster output files. For more information
on LOBSTER see www.cohp.de.
"""
import itertools
import os
import warnings
from typing import Any, Dict, List, Optional
import numpy as np
import spglib
from monty.io import zopen
from monty.json import MSONable
from monty.serialization import loadfn
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Incar, Kpoints, Potcar
from pymatgen.symmetry.bandstructure import HighSymmKpath
__author__ = "Janine George, Marco Esters"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.2"
__maintainer__ = "Janine George, Marco Esters "
__email__ = "janine.george@uclouvain.be, esters@uoregon.edu"
__date__ = "Dec 13, 2017"
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
class Lobsterin(dict, MSONable):
"""
This class can handle and generate lobsterin files
Furthermore, it can also modify INCAR files for lobster, generate KPOINT files for fatband calculations in Lobster,
and generate the standard primitive cells in a POSCAR file that are needed for the fatband calculations.
There are also several standard lobsterin files that can be easily generated.
"""
# reminder: lobster is not case sensitive
# keyword + one float can be used in file
FLOATKEYWORDS = [
"COHPstartEnergy",
"COHPendEnergy",
"gaussianSmearingWidth",
"useDecimalPlaces",
"COHPSteps",
]
# one of these keywords +endstring can be used in file
STRINGKEYWORDS = [
"basisSet",
"cohpGenerator",
"realspaceHamiltonian",
"realspaceOverlap",
"printPAWRealSpaceWavefunction",
"printLCAORealSpaceWavefunction",
"kSpaceCOHP",
"EwaldSum",
]
# the keyword alone will turn on or off a function
BOOLEANKEYWORDS = [
"saveProjectionToFile",
"skipdos",
"skipcohp",
"skipcoop",
"skipcobi",
"skipMadelungEnergy",
"loadProjectionFromFile",
"forceEnergyRange",
"DensityOfEnergy",
"BWDF",
"BWDFCOHP",
"skipPopulationAnalysis",
"skipGrossPopulation",
"userecommendedbasisfunctions",
"skipProjection",
"writeBasisFunctions",
"writeMatricesToFile",
"noFFTforVisualization",
"RMSp",
"onlyReadVasprun.xml",
"noMemoryMappedFiles",
"skipPAWOrthonormalityTest",
"doNotIgnoreExcessiveBands",
"doNotUseAbsoluteSpilling",
"skipReOrthonormalization",
"forceV1HMatrix",
"useOriginalTetrahedronMethod",
"forceEnergyRange",
"bandwiseSpilling",
"kpointwiseSpilling",
]
# several of these keywords + ending can be used in a lobsterin file:
LISTKEYWORDS = ["basisfunctions", "cohpbetween", "createFatband"]
# all keywords known to this class so far
AVAILABLEKEYWORDS = FLOATKEYWORDS + STRINGKEYWORDS + BOOLEANKEYWORDS + LISTKEYWORDS
def __init__(self, settingsdict: dict):
"""
Args:
settingsdict: dict to initialize Lobsterin
"""
super().__init__()
# check for duplicates
listkey = [key.lower() for key in settingsdict.keys()]
if len(listkey) != len(list(set(listkey))):
raise OSError("There are duplicates for the keywords! The program will stop here.")
self.update(settingsdict)
def __setitem__(self, key, val):
"""
Add parameter-val pair to Lobsterin. Warns if parameter is not in list of
valid lobsterintags. Also cleans the parameter and val by stripping
leading and trailing white spaces. Similar to INCAR class.
"""
# due to the missing case sensitivity of lobster, the following code is necessary
found = False
for key_here in self.keys():
if key.strip().lower() == key_here.lower():
new_key = key_here
found = True
if not found:
new_key = key
if new_key.lower() not in [element.lower() for element in Lobsterin.AVAILABLEKEYWORDS]:
raise ValueError("Key is currently not available")
super().__setitem__(new_key, val.strip() if isinstance(val, str) else val)
def __getitem__(self, item):
"""
implements getitem from dict to avoid problems with cases
"""
found = False
for key_here in self.keys():
if item.strip().lower() == key_here.lower():
new_key = key_here
found = True
if not found:
new_key = item
val = dict.__getitem__(self, new_key)
return val
def diff(self, other):
"""
Diff function for lobsterin. Compares two lobsterin and indicates which parameters are the same.
Similar to the diff in INCAR.
Args:
other (Lobsterin): Lobsterin object to compare to
Returns:
dict with differences and similarities
"""
similar_param = {}
different_param = {}
key_list_others = [element.lower() for element in other.keys()]
for k1, v1 in self.items():
k1lower = k1.lower()
if k1lower not in key_list_others:
different_param[k1.upper()] = {"lobsterin1": v1, "lobsterin2": None}
else:
for key_here in other.keys():
if k1.lower() == key_here.lower():
new_key = key_here
if isinstance(v1, str):
if v1.strip().lower() != other[new_key].strip().lower():
different_param[k1.upper()] = {
"lobsterin1": v1,
"lobsterin2": other[new_key],
}
else:
similar_param[k1.upper()] = v1
elif isinstance(v1, list):
new_set1 = {element.strip().lower() for element in v1}
new_set2 = {element.strip().lower() for element in other[new_key]}
if new_set1 != new_set2:
different_param[k1.upper()] = {
"lobsterin1": v1,
"lobsterin2": other[new_key],
}
else:
if v1 != other[new_key]:
different_param[k1.upper()] = {
"lobsterin1": v1,
"lobsterin2": other[new_key],
}
else:
similar_param[k1.upper()] = v1
for k2, v2 in other.items():
if k2.upper() not in similar_param and k2.upper() not in different_param:
for key_here in self.keys():
if k2.lower() == key_here.lower():
new_key = key_here
else:
new_key = k2
if new_key not in self:
different_param[k2.upper()] = {"lobsterin1": None, "lobsterin2": v2}
return {"Same": similar_param, "Different": different_param}
def _get_nbands(self, structure: Structure):
"""
get number of nbands
"""
if self.get("basisfunctions") is None:
raise OSError("No basis functions are provided. The program cannot calculate nbands.")
basis_functions = [] # type: List[str]
for string_basis in self["basisfunctions"]:
# string_basis.lstrip()
string_basis_raw = string_basis.strip().split(" ")
while "" in string_basis_raw:
string_basis_raw.remove("")
for i in range(0, int(structure.composition.element_composition[string_basis_raw[0]])):
basis_functions.extend(string_basis_raw[1:])
no_basis_functions = 0
for basis in basis_functions:
if "s" in basis:
no_basis_functions = no_basis_functions + 1
elif "p" in basis:
no_basis_functions = no_basis_functions + 3
elif "d" in basis:
no_basis_functions = no_basis_functions + 5
elif "f" in basis:
no_basis_functions = no_basis_functions + 7
return int(no_basis_functions)
def write_lobsterin(self, path="lobsterin", overwritedict=None):
"""
writes a lobsterin file
Args:
path (str): filename of the lobsterin file that will be written
overwritedict (dict): dict that can be used to overwrite lobsterin, e.g. {"skipdos": True}
"""
# will overwrite previous entries
# has to search first if entry is already in Lobsterindict (due to case insensitivity)
if overwritedict is not None:
for key, entry in overwritedict.items():
found = False
for key2 in self.keys():
if key.lower() == key2.lower():
self[key2] = entry
found = True
if not found:
self[key] = entry
filename = path
with open(filename, "w") as f:
for key in Lobsterin.AVAILABLEKEYWORDS:
if key.lower() in [element.lower() for element in self.keys()]:
if key.lower() in [element.lower() for element in Lobsterin.FLOATKEYWORDS]:
f.write(key + " " + str(self.get(key)) + "\n")
elif key.lower() in [element.lower() for element in Lobsterin.BOOLEANKEYWORDS]:
# checks if entry is True or False
for key_here in self.keys():
if key.lower() == key_here.lower():
new_key = key_here
if self.get(new_key):
f.write(key + "\n")
elif key.lower() in [element.lower() for element in Lobsterin.STRINGKEYWORDS]:
f.write(key + " " + str(self.get(key) + "\n"))
elif key.lower() in [element.lower() for element in Lobsterin.LISTKEYWORDS]:
for entry in self.get(key):
f.write(key + " " + str(entry) + "\n")
def as_dict(self):
"""
:return: MSONable dict
"""
d = dict(self)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: Lobsterin
"""
return Lobsterin({k: v for k, v in d.items() if k not in ["@module", "@class"]})
def write_INCAR(
self,
incar_input: str = "INCAR",
incar_output: str = "INCAR.lobster",
poscar_input: str = "POSCAR",
isym: int = -1,
further_settings: dict = None,
):
"""
Will only make the run static, insert nbands, make ISYM=-1, set LWAVE=True and write a new INCAR.
You have to check for the rest.
Args:
incar_input (str): path to input INCAR
incar_output (str): path to output INCAR
poscar_input (str): path to input POSCAR
isym (int): isym equal to -1 or 0 are possible. Current Lobster version only allow -1.
further_settings (dict): A dict can be used to include further settings, e.g. {"ISMEAR":-5}
"""
# reads old incar from file, this one will be modified
incar = Incar.from_file(incar_input)
warnings.warn("Please check your incar_input before using it. This method only changes three settings!")
if isym == -1:
incar["ISYM"] = -1
elif isym == 0:
incar["ISYM"] = 0
else:
ValueError("isym has to be -1 or 0.")
incar["NSW"] = 0
incar["LWAVE"] = True
# get nbands from _get_nbands (use basis set that is inserted)
incar["NBANDS"] = self._get_nbands(Structure.from_file(poscar_input))
if further_settings is not None:
for key, item in further_settings.items():
incar[key] = further_settings[key]
# print it to file
incar.write_file(incar_output)
@staticmethod
def get_basis(
structure: Structure,
potcar_symbols: list,
address_basis_file: str = os.path.join(MODULE_DIR, "lobster_basis/BASIS_PBE_54_standard.yaml"),
):
"""
will get the basis from given potcar_symbols (e.g., ["Fe_pv","Si"]
#include this in lobsterin class
Args:
structure (Structure): Structure object
potcar_symbols: list of potcar symbols
Returns:
returns basis
"""
Potcar_names = list(potcar_symbols)
AtomTypes_Potcar = [name.split("_")[0] for name in Potcar_names]
AtomTypes = structure.symbol_set
if set(AtomTypes) != set(AtomTypes_Potcar):
raise OSError("Your POSCAR does not correspond to your POTCAR!")
BASIS = loadfn(address_basis_file)["BASIS"]
basis_functions = []
list_forin = []
for itype, type in enumerate(Potcar_names):
if type not in BASIS:
raise ValueError(
"You have to provide the basis for"
+ str(type)
+ "manually. We don't have any information on this POTCAR."
)
basis_functions.append(BASIS[type].split())
tojoin = str(AtomTypes_Potcar[itype]) + " "
tojoin2 = "".join(str(str(e) + " ") for e in BASIS[type].split())
list_forin.append(str(tojoin + tojoin2))
return list_forin
@staticmethod
def get_all_possible_basis_functions(
structure: Structure,
potcar_symbols: list,
address_basis_file_min: str = os.path.join(MODULE_DIR, "lobster_basis/BASIS_PBE_54_min.yaml"),
address_basis_file_max: str = os.path.join(MODULE_DIR, "lobster_basis/BASIS_PBE_54_max.yaml"),
):
"""
Args:
structure: Structure object
potcar_symbols: list of the potcar symbols
address_basis_file_min: path to file with the minimum required basis by the POTCAR
address_basis_file_max: path to file with the largest possible basis of the POTCAR
Returns: List of dictionaries that can be used to create new Lobsterin objects in
standard_calculations_from_vasp_files as dict_for_basis
"""
max_basis = Lobsterin.get_basis(
structure=structure,
potcar_symbols=potcar_symbols,
address_basis_file=address_basis_file_max,
)
min_basis = Lobsterin.get_basis(
structure=structure,
potcar_symbols=potcar_symbols,
address_basis_file=address_basis_file_min,
)
all_basis = get_all_possible_basis_combinations(min_basis=min_basis, max_basis=max_basis)
list_basis_dict = []
for ibasis, basis in enumerate(all_basis):
basis_dict = {}
for iel, elba in enumerate(basis):
basplit = elba.split()
basis_dict[basplit[0]] = " ".join(basplit[1:])
list_basis_dict.append(basis_dict)
return list_basis_dict
@staticmethod
def write_POSCAR_with_standard_primitive(POSCAR_input="POSCAR", POSCAR_output="POSCAR.lobster", symprec=0.01):
"""
writes a POSCAR with the standard primitive cell. This is needed to arrive at the correct kpath
Args:
POSCAR_input (str): filename of input POSCAR
POSCAR_output (str): filename of output POSCAR
symprec (float): precision to find symmetry
"""
structure = Structure.from_file(POSCAR_input)
kpath = HighSymmKpath(structure, symprec=symprec)
new_structure = kpath.prim
new_structure.to(fmt="POSCAR", filename=POSCAR_output)
@staticmethod
def write_KPOINTS(
POSCAR_input: str = "POSCAR",
KPOINTS_output="KPOINTS.lobster",
reciprocal_density: int = 100,
isym: int = -1,
from_grid: bool = False,
input_grid: list = [5, 5, 5],
line_mode: bool = True,
kpoints_line_density: int = 20,
symprec: float = 0.01,
):
"""
writes a KPOINT file for lobster (only ISYM=-1 and ISYM=0 are possible), grids are gamma centered
Args:
POSCAR_input (str): path to POSCAR
KPOINTS_output (str): path to output KPOINTS
reciprocal_density (int): Grid density
isym (int): either -1 or 0. Current Lobster versions only allow -1.
from_grid (bool): If True KPOINTS will be generated with the help of a grid given in input_grid. Otherwise,
they will be generated from the reciprocal_density
input_grid (list): grid to generate the KPOINTS file
line_mode (bool): If True, band structure will be generated
kpoints_line_density (int): density of the lines in the band structure
symprec (float): precision to determine symmetry
"""
structure = Structure.from_file(POSCAR_input)
if not from_grid:
kpointgrid = Kpoints.automatic_density_by_vol(structure, reciprocal_density).kpts
mesh = kpointgrid[0]
else:
mesh = input_grid
# The following code is taken from: SpacegroupAnalyzer
# we need to switch off symmetry here
latt = structure.lattice.matrix
positions = structure.frac_coords
unique_species = [] # type: List[Any]
zs = []
magmoms = []
for species, g in itertools.groupby(structure, key=lambda s: s.species):
if species in unique_species:
ind = unique_species.index(species)
zs.extend([ind + 1] * len(tuple(g)))
else:
unique_species.append(species)
zs.extend([len(unique_species)] * len(tuple(g)))
for site in structure:
if hasattr(site, "magmom"):
magmoms.append(site.magmom)
elif site.is_ordered and hasattr(site.specie, "spin"):
magmoms.append(site.specie.spin)
else:
magmoms.append(0)
# For now, we are setting magmom to zero. (Taken from INCAR class)
cell = latt, positions, zs, magmoms
# TODO: what about this shift?
mapping, grid = spglib.get_ir_reciprocal_mesh(mesh, cell, is_shift=[0, 0, 0])
# exit()
# get the kpoints for the grid
if isym == -1:
kpts = []
weights = []
all_labels = []
for gp in grid:
kpts.append(gp.astype(float) / mesh)
weights.append(float(1))
all_labels.append("")
elif isym == 0:
# time reversal symmetry: k and -k are equivalent
kpts = []
weights = []
all_labels = []
newlist = [list(gp) for gp in list(grid)]
mapping = []
for gp in newlist:
minusgp = [-k for k in gp]
if minusgp in newlist and minusgp not in [[0, 0, 0]]:
mapping.append(newlist.index(minusgp))
else:
mapping.append(newlist.index(gp))
for igp, gp in enumerate(newlist):
if mapping[igp] > igp:
kpts.append(np.array(gp).astype(float) / mesh)
weights.append(float(2))
all_labels.append("")
elif mapping[igp] == igp:
kpts.append(np.array(gp).astype(float) / mesh)
weights.append(float(1))
all_labels.append("")
else:
ValueError("Only isym=-1 and isym=0 are allowed.")
# line mode
if line_mode:
kpath = HighSymmKpath(structure, symprec=symprec)
if not np.allclose(kpath.prim.lattice.matrix, structure.lattice.matrix):
raise ValueError(
"You are not using the standard primitive cell. The k-path is not correct. Please generate a "
"standard primitive cell first."
)
frac_k_points, labels = kpath.get_kpoints(line_density=kpoints_line_density, coords_are_cartesian=False)
for k, f in enumerate(frac_k_points):
kpts.append(f)
weights.append(0.0)
all_labels.append(labels[k])
if isym == -1:
comment = (
"ISYM=-1, grid: " + str(mesh) if not line_mode else "ISYM=-1, grid: " + str(mesh) + " plus kpoint path"
)
elif isym == 0:
comment = (
"ISYM=0, grid: " + str(mesh) if not line_mode else "ISYM=0, grid: " + str(mesh) + " plus kpoint path"
)
KpointObject = Kpoints(
comment=comment,
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(kpts),
kpts=kpts,
kpts_weights=weights,
labels=all_labels,
)
KpointObject.write_file(filename=KPOINTS_output)
@classmethod
def from_file(cls, lobsterin: str):
"""
Args:
lobsterin (str): path to lobsterin
Returns:
Lobsterin object
"""
with zopen(lobsterin, "rt") as f:
data = f.read().split("\n")
if len(data) == 0:
raise OSError("lobsterin file contains no data.")
Lobsterindict = {} # type: Dict
for datum in data:
# will remove all comments to avoid complications
raw_datum = datum.split("!")[0]
raw_datum = raw_datum.split("//")[0]
raw_datum = raw_datum.split("#")[0]
raw_datum = raw_datum.split(" ")
while "" in raw_datum:
raw_datum.remove("")
if len(raw_datum) > 1:
# check which type of keyword this is, handle accordingly
if raw_datum[0].lower() not in [datum2.lower() for datum2 in Lobsterin.LISTKEYWORDS]:
if raw_datum[0].lower() not in [datum2.lower() for datum2 in Lobsterin.FLOATKEYWORDS]:
if raw_datum[0].lower() not in Lobsterindict:
Lobsterindict[raw_datum[0].lower()] = " ".join(raw_datum[1:])
else:
raise ValueError("Same keyword " + str(raw_datum[0].lower()) + "twice!")
else:
if raw_datum[0].lower() not in Lobsterindict:
Lobsterindict[raw_datum[0].lower()] = float(raw_datum[1])
else:
raise ValueError("Same keyword " + str(raw_datum[0].lower()) + "twice!")
else:
if raw_datum[0].lower() not in Lobsterindict:
Lobsterindict[raw_datum[0].lower()] = [" ".join(raw_datum[1:])]
else:
Lobsterindict[raw_datum[0].lower()].append(" ".join(raw_datum[1:]))
elif len(raw_datum) > 0:
Lobsterindict[raw_datum[0].lower()] = True
return cls(Lobsterindict)
@staticmethod
def _get_potcar_symbols(POTCAR_input: str) -> list:
"""
will return the name of the species in the POTCAR
Args:
POTCAR_input(str): string to potcar file
Returns:
list of the names of the species in string format
"""
potcar = Potcar.from_file(POTCAR_input)
for pot in potcar:
if pot.potential_type != "PAW":
raise OSError("Lobster only works with PAW! Use different POTCARs")
# Warning about a bug in lobster-4.1.0
with zopen(POTCAR_input, "r") as f:
data = f.read()
if isinstance(data, bytes):
data = data.decode("utf-8")
if "SHA256" in data or "COPYR" in data:
warnings.warn(
"These POTCARs are not compatible with "
"Lobster up to version 4.1.0."
"\n The keywords SHA256 and COPYR "
"cannot be handled by Lobster"
" \n and will lead to wrong results."
)
if potcar.functional != "PBE":
raise OSError("We only have BASIS options for PBE so far")
Potcar_names = [name["symbol"] for name in potcar.spec]
return Potcar_names
@classmethod
def standard_calculations_from_vasp_files(
cls,
POSCAR_input: str = "POSCAR",
INCAR_input: str = "INCAR",
POTCAR_input: Optional[str] = None,
dict_for_basis: Optional[dict] = None,
option: str = "standard",
):
"""
will generate Lobsterin with standard settings
Args:
POSCAR_input(str): path to POSCAR
INCAR_input(str): path to INCAR
POTCAR_input (str): path to POTCAR
dict_for_basis (dict): can be provided: it should look the following:
dict_for_basis={"Fe":'3p 3d 4s 4f', "C": '2s 2p'} and will overwrite all settings from POTCAR_input
option (str): 'standard' will start a normal lobster run where COHPs, COOPs, DOS, CHARGE etc. will be
calculated
'standard_from_projection' will start a normal lobster run from a projection
'standard_with_fatband' will do a fatband calculation, run over all orbitals
'onlyprojection' will only do a projection
'onlydos' will only calculate a projected dos
'onlycohp' will only calculate cohp
'onlycoop' will only calculate coop
'onlycohpcoop' will only calculate cohp and coop
Returns:
Lobsterin Object with standard settings
"""
warnings.warn(
"Always check and test the provided basis functions. The spilling of your Lobster calculation might help"
)
# warn that fatband calc cannot be done with tetrahedron method at the moment
if option not in [
"standard",
"standard_from_projection",
"standard_with_fatband",
"onlyprojection",
"onlydos",
"onlycohp",
"onlycoop",
"onlycobi",
"onlycohpcoop",
"onlycohpcoopcobi",
"onlymadelung",
]:
raise ValueError("The option is not valid!")
Lobsterindict = {} # type: Dict[Any,Any]
# this basis set covers most elements
Lobsterindict["basisSet"] = "pbeVaspFit2015"
# energies around e-fermi
Lobsterindict["COHPstartEnergy"] = -35.0
Lobsterindict["COHPendEnergy"] = 5.0
if option in [
"standard",
"onlycohp",
"onlycoop",
"onlycobi",
"onlycohpcoop",
"onlycohpcoopcobi",
"standard_with_fatband",
]:
# every interaction with a distance of 6.0 is checked
Lobsterindict["cohpGenerator"] = "from 0.1 to 6.0 orbitalwise"
# the projection is saved
Lobsterindict["saveProjectionToFile"] = True
if option == "standard_from_projection":
Lobsterindict["cohpGenerator"] = "from 0.1 to 6.0 orbitalwise"
Lobsterindict["loadProjectionFromFile"] = True
# TODO: add cobi here! might be relevant lobster version
if option == "onlycohp":
Lobsterindict["skipdos"] = True
Lobsterindict["skipcoop"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
# lobster-4.1.0
Lobsterindict["skipcobi"] = True
Lobsterindict["skipMadelungEnergy"] = True
if option == "onlycoop":
Lobsterindict["skipdos"] = True
Lobsterindict["skipcohp"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
# lobster-4.1.0
Lobsterindict["skipcobi"] = True
Lobsterindict["skipMadelungEnergy"] = True
if option == "onlycohpcoop":
Lobsterindict["skipdos"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
# lobster-4.1.0
Lobsterindict["skipcobi"] = True
Lobsterindict["skipMadelungEnergy"] = True
if option == "onlycohpcoopcobi":
Lobsterindict["skipdos"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
Lobsterindict["skipMadelungEnergy"] = True
if option == "onlydos":
Lobsterindict["skipcohp"] = True
Lobsterindict["skipcoop"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
# lobster-4.1.0
Lobsterindict["skipcobi"] = True
Lobsterindict["skipMadelungEnergy"] = True
if option == "onlyprojection":
Lobsterindict["skipdos"] = True
Lobsterindict["skipcohp"] = True
Lobsterindict["skipcoop"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
Lobsterindict["saveProjectionToFile"] = True
# lobster-4.1.0
Lobsterindict["skipcobi"] = True
Lobsterindict["skipMadelungEnergy"] = True
if option == "onlycobi":
Lobsterindict["skipdos"] = True
Lobsterindict["skipcohp"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
# lobster-4.1.0
Lobsterindict["skipcobi"] = True
Lobsterindict["skipMadelungEnergy"] = True
if option == "onlymadelung":
Lobsterindict["skipdos"] = True
Lobsterindict["skipcohp"] = True
Lobsterindict["skipcoop"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
Lobsterindict["saveProjectionToFile"] = True
# lobster-4.1.0
Lobsterindict["skipcobi"] = True
incar = Incar.from_file(INCAR_input)
if incar["ISMEAR"] == 0:
Lobsterindict["gaussianSmearingWidth"] = incar["SIGMA"]
if incar["ISMEAR"] != 0 and option == "standard_with_fatband":
raise ValueError("ISMEAR has to be 0 for a fatband calculation with Lobster")
if dict_for_basis is not None:
# dict_for_basis={"Fe":'3p 3d 4s 4f', "C": '2s 2p'}
# will just insert this basis and not check with poscar
basis = [key + " " + value for key, value in dict_for_basis.items()]
elif POTCAR_input is not None:
# get basis from POTCAR
potcar_names = Lobsterin._get_potcar_symbols(POTCAR_input=POTCAR_input)
basis = Lobsterin.get_basis(structure=Structure.from_file(POSCAR_input), potcar_symbols=potcar_names)
else:
raise ValueError("basis cannot be generated")
Lobsterindict["basisfunctions"] = basis
if option == "standard_with_fatband":
Lobsterindict["createFatband"] = basis
return cls(Lobsterindict)
def get_all_possible_basis_combinations(min_basis: list, max_basis: list) -> list:
"""
Args:
min_basis: list of basis entries: e.g., ['Si 3p 3s ']
max_basis: list of basis entries: e.g., ['Si 3p 3s ']
Returns: all possible combinations of basis functions, e.g. [['Si 3p 3s']]
"""
max_basis_lists = [x.split() for x in max_basis]
min_basis_lists = [x.split() for x in min_basis]
# get all possible basis functions
basis_dict: Dict[str, dict] = {}
for iel, el in enumerate(max_basis_lists):
basis_dict[el[0]] = {"fixed": [], "variable": [], "combinations": []}
for basis in el[1:]:
if basis in min_basis_lists[iel]:
basis_dict[el[0]]["fixed"].append(basis)
if basis not in min_basis_lists[iel]:
basis_dict[el[0]]["variable"].append(basis)
for L in range(0, len(basis_dict[el[0]]["variable"]) + 1):
for subset in itertools.combinations(basis_dict[el[0]]["variable"], L):
basis_dict[el[0]]["combinations"].append(" ".join([el[0]] + basis_dict[el[0]]["fixed"] + list(subset)))
list_basis = []
for el, item in basis_dict.items():
list_basis.append(item["combinations"])
# get all combinations
start_basis = list_basis[0]
if len(list_basis) > 1:
for iel, el in enumerate(list_basis[1:], 1):
new_start_basis = []
for ielbasis, elbasis in enumerate(start_basis):
for ielbasis2, elbasis2 in enumerate(list_basis[iel]):
if not isinstance(elbasis, list):
new_start_basis.append([elbasis, elbasis2])
else:
new_start_basis.append(elbasis.copy() + [elbasis2])
start_basis = new_start_basis
return start_basis
return [[basis] for basis in start_basis]
|
materialsproject/pymatgen
|
pymatgen/io/lobster/inputs.py
|
Python
|
mit
| 33,885
|
[
"VASP",
"pymatgen"
] |
6e65a23f8eba710773e7e54e3d49b6fa2d762089616db2ceaa4be30fc1c7b610
|
import os, pdb, platform, time, warnings
import ctypes as ct
import numpy as np
MAX_ONES = 1024*256
if platform.system() == 'Windows' or platform.system().startswith('CYGWIN_NT'):
mydir = os.path.dirname(__file__)
os.environ['PATH'] += ";" + mydir
_cudamat = ct.cdll.LoadLibrary('libcudamat.dll')
else:
_cudamat = ct.cdll.LoadLibrary('libcudamat.so')
_cudamat.get_last_cuda_error.restype = ct.c_char_p
_cudamat.cublas_init.restype = ct.c_int
_cudamat.cublas_shutdown.restype = ct.c_int
_cudamat.cuda_set_device.restype = ct.c_int
_cudamat.init_random.restype = ct.c_int
_cudamat.init_empty.restype = ct.c_int
_cudamat.reshape.restype = ct.c_int
_cudamat.copy_to_host.restype = ct.c_int
_cudamat.allocate_device_memory = ct.c_int
_cudamat.copy_to_device.restype = ct.c_int
_cudamat.copy_on_device.restype = ct.c_int
_cudamat.free_device_memory.restype = ct.c_int
_cudamat.get_slice.restype = ct.c_int
_cudamat.get_row_slice.restype = ct.c_int
_cudamat.set_row_slice.restype = ct.c_int
_cudamat.copy_transpose.restype = ct.c_int
_cudamat.get_vector_slice.restype = ct.c_int
_cudamat.fill_with_rand.restype = ct.c_int
_cudamat.fill_with_randn.restype = ct.c_int
_cudamat.add_col_vec.restype = ct.c_int
_cudamat.add_col_mult.restype = ct.c_int
_cudamat.add_row_mult.restype = ct.c_int
_cudamat.add_row_vec.restype = ct.c_int
_cudamat.mult_by_col_vec.restype = ct.c_int
_cudamat.mult_by_row_vec.restype = ct.c_int
_cudamat.div_by_col_vec.restype = ct.c_int
_cudamat.div_by_row_vec.restype = ct.c_int
_cudamat.less_than.restype = ct.c_int
_cudamat.less_than_scalar.restype = ct.c_int
_cudamat.greater_than.restype = ct.c_int
_cudamat.greater_than_scalar.restype = ct.c_int
_cudamat.max_by_axis.restype = ct.c_int
_cudamat.argmax_by_axis.restype = ct.c_int
_cudamat.sqsum_by_axis.restype = ct.c_int
_cudamat.normlimit_by_axis.restype = ct.c_int
_cudamat.sign.restype = ct.c_int
_cudamat.apply_sigmoid.restype = ct.c_int
_cudamat.apply_tanh.restype = ct.c_int
_cudamat.apply_abs.restype = ct.c_int
_cudamat.apply_log_1_plus_exp.restype = ct.c_int
_cudamat.apply_log.restype = ct.c_int
_cudamat.apply_floor.restype = ct.c_int
_cudamat.apply_ceil.restype = ct.c_int
_cudamat.apply_exp.restype = ct.c_int
_cudamat.apply_sqrt.restype = ct.c_int
_cudamat.apply_pow.restype = ct.c_int
_cudamat.apply_pow_matrix.restype = ct.c_int
_cudamat.reciprocal.restype = ct.c_int
_cudamat.add_elementwise.restype = ct.c_int
_cudamat.subtract_elementwise.restype = ct.c_int
_cudamat.divide_elementwise.restype = ct.c_int
_cudamat.mult_elementwise.restype = ct.c_int
_cudamat.apply_logistic_deriv.restype = ct.c_int
_cudamat.assign_scalar.restype = ct.c_int
_cudamat.mult_by_scalar.restype = ct.c_int
_cudamat.divide_by_scalar.restype = ct.c_int
_cudamat.add_scalar.restype = ct.c_int
_cudamat.euclid_norm.restype = ct.c_float
_cudamat.selectRows.restype = ct.c_int
_cudamat.setSelectedRows.restype = ct.c_int
_cudamat.vdot.restype = ct.c_float
_cudamat.dot.restype = ct.c_int
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
class CUDAMatException(Exception):
pass
def get_last_cuda_error():
return str(_cudamat.get_last_cuda_error())
def generate_exception(err_code):
"""
Return a CUDAMatException object based on the error code err_code.
"""
if err_code == -1:
return CUDAMatException("Incompatible matrix dimensions.")
elif err_code == -2:
return CUDAMatException("CUBLAS error.")
elif err_code == -3:
return CUDAMatException("CUDA error: " + get_last_cuda_error())
elif err_code == -4:
return CUDAMatException("Operation not supported on views.")
elif err_code == -5:
return CUDAMatException("Operation not supported on transposed matrices.")
elif err_code == -6:
return CUDAMatException("")
elif err_code == -7:
return CUDAMatException("Incompatible transposedness.")
elif err_code == -8:
return CUDAMatException("Matrix is not in device memory.")
elif err_code == -9:
return CUDAMatException("Operation not supported.")
class cudamat(ct.Structure):
_fields_ = [('data_host', ct.POINTER(ct.c_float)),
('data_device', ct.POINTER(ct.c_float)),
('on_device', ct.c_int),
('on_host', ct.c_int),
('size', ct.c_int * 2),
('is_trans', ct.c_int),
('owns_data', ct.c_int)]
class rnd_struct(ct.Structure):
_fields_ = [('dev_rnd_mults', ct.POINTER(ct.c_uint)),
('dev_rnd_words', ct.POINTER(ct.c_longlong))]
class TransposedCUDAMatrix(object):
def __init__(self, mat):
self.mat = cudamat()
ct.memmove(ct.pointer(self.mat), ct.pointer(mat), ct.sizeof(self.mat))
self.mat.is_trans = 1
self.p_mat = ct.pointer(self.mat)
class CUDAMatrix(object):
"""
A CUDAMatrix object represents a matrix of single precision floating point
numbers on a GPU.
"""
def overwrite(self, array, copy_to_device=True):
"""Overwrites self with array.
'array' should have a size smaller than that of the array used to
initialize the CUDAMatrix. The method will not throw an Exception just
yet if this is not true. It will throw exceptions or behave in strange
ways later on.
"""
assert type(array) == np.ndarray, 'array must be a np.ndarray.'
array = reformat(array)
self.numpy_array = array
_cudamat.init_from_array(self.p_mat, array.ctypes.data_as(ct.POINTER(ct.c_float)), ct.c_int(array.shape[0]), ct.c_int(array.shape[1]))
_cudamat.set_on_device(self.p_mat)
if copy_to_device:
err_code = _cudamat.copy_to_device(self.p_mat)
if err_code:
raise generate_exception(err_code)
def __init__(self, array, copy_to_device = True):
"""
Initializes a new matrix object in one of two ways. If array is a numpy
ndarray, memory for a matrix with the same dimensions is allocated on
the GPU. If the copy_to_device flag is set to True, the GPU matrix is
initialized with the given ndarray. If array is not an ndarray, it must
be a cudamat structure (typically the user will never use this way of
calling __init__).
"""
if type(array) == np.ndarray:
# Convert array to float32 in FORTRAN order
array = reformat(array)
# Initialize as a ndarray-tied matrix.
self.mat = cudamat()
self.size = self.mat.size
self.p_mat = ct.pointer(self.mat)
self.numpy_array = array
_cudamat.init_from_array(self.p_mat, array.ctypes.data_as(ct.POINTER(ct.c_float)), ct.c_int(array.shape[0]), ct.c_int(array.shape[1]))
if copy_to_device:
err_code = _cudamat.copy_to_device(self.p_mat)
if err_code:
raise generate_exception(err_code)
else:
# Initialize based on existing cudamat structure.
mat = array
self.mat = mat
self.p_mat = ct.pointer(self.mat)
self.T = TransposedCUDAMatrix(self.mat)
# Keep a reference to free device memory in case of a crash.
self.__free_device_memory = _cudamat.free_device_memory
@staticmethod
def init_random(seed = 0):
"""
Initialize and seed the random number generator.
"""
NUM_RND_STREAMS = 96*128
CUDAMatrix.rndInitialized = 1
CUDAMatrix.rnd_state = rnd_struct()
CUDAMatrix.rnd_state_p = ct.pointer(CUDAMatrix.rnd_state)
cudamat_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'rnd_multipliers_32bit.txt')
err_code = _cudamat.init_random(CUDAMatrix.rnd_state_p, ct.c_int(seed), cudamat_path)
if err_code:
raise generate_exception(err_code)
@property
def shape(self):
return (self.mat.size[0], self.mat.size[1])
def set_shape(self, shape):
"""
Sets the shape of the array to the given array.
Highly unsafe method. Does no checking.
Do not use this unless you know what you are doing.
"""
m = ct.c_uint(shape[0])
n = ct.c_uint(shape[1])
err_code = _cudamat.set_shape(self.p_mat, m, n)
if err_code:
raise generate_exception(err_code)
return self
def reshape(self, shape):
"""
Reshapes self to have the given shape. The number of elements cannot
change as this only changes how the contents are interpreted.
"""
m, n = shape
mlen = self.shape[0] * self.shape[1]
if m == -1:
assert n > 0 and mlen % n == 0
m = mlen / n
elif n == -1:
assert m > 0 and mlen % m == 0
n = mlen / m
err_code = _cudamat.reshape(self.p_mat, ct.c_uint(m), ct.c_uint(n))
if err_code:
raise generate_exception(err_code)
return self
def blockify(source, blocksize, target = None):
if target == None:
target = source
err_code = _cudamat.blockify(source.p_mat, target.p_mat, ct.c_uint(blocksize))
if err_code:
raise generate_exception(err_code)
return target
def generate_translations(source, source_w, target_w, off_x, off_y, target = None):
num_channels = source.shape[0] / (source_w**2)
if target == None:
batch_s = source.shape[1]
target = empty((target_w**2, batch_s))
err_code = _cudamat.generate_translations_big_var_off(source.p_mat, target.p_mat, off_x.p_mat, off_y.p_mat, ct.c_uint(source_w), ct.c_uint(target_w), ct.c_uint(num_channels))
if err_code:
raise generate_exception(err_code)
return target
def asarray(self):
"""
Copies the matrix to an ndarray on the CPU and returns it.
"""
self.copy_to_host()
return self.numpy_array
def copy_to_device(self):
"""
Copy the matrix to the GPU.
"""
err_code = _cudamat.copy_to_device(self.p_mat)
if err_code:
raise generate_exception(err_code)
def copy_to_host(self):
"""
Copy the matrix to the CPU.
"""
if not self.mat.on_host:
# allocate host storage if necessary
m = self.mat.size[0]
n = self.mat.size[1]
self.numpy_array = np.empty((m, n), dtype=np.float32, order = 'F')
self.mat.data_host = self.numpy_array.ctypes.data_as(ct.POINTER(ct.c_float))
self.mat.on_host = 1
err_code = _cudamat.copy_to_host(self.p_mat)
if err_code:
raise generate_exception(err_code)
def assign(self, val):
"""Assign val to self, where val can be a scalar or a CUDAMatrix
with the same dimensions as self. """
if isinstance(val, CUDAMatrix):
err_code = _cudamat.copy_on_device(val.p_mat, self.p_mat)
elif isinstance(val, (int, float)):
err_code = _cudamat.assign_scalar(self.p_mat, ct.c_float(val))
else:
raise ValueError, "Assigned value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return self
def free_device_memory(self):
"""
Free memory used up by the matrix on the GPU.
"""
err_code = _cudamat.free_device_memory(self.p_mat)
if err_code:
raise generate_exception(err_code)
def set_trans(self, is_trans):
"""
Set the transposedness flag to is_trans.
"""
_cudamat.set_transpose(self.p_mat, ct.c_int(1 * is_trans))
def slice(self, first_col, last_col):
mat = cudamat()
if self.mat.size[0] == 1 or self.mat.size[1] == 1:
err_code = _cudamat.get_vector_slice(self.p_mat, ct.pointer(mat), ct.c_int(first_col), ct.c_int(last_col))
else:
err_code = _cudamat.get_slice(self.p_mat, ct.pointer(mat), ct.c_int(first_col), ct.c_int(last_col))
if err_code:
raise generate_exception(err_code)
new_mat = CUDAMatrix(mat)
try:
new_mat.sliceof = self.sliceof
except:
new_mat.sliceof = self
return new_mat
def get_col_slice(self, first_col, last_col, target = None):
col_slice = self.slice(first_col, last_col)
if target:
target.assign(col_slice)
return target
else:
return col_slice
def set_col_slice(self, first_col, last_col, mat):
self.slice(first_col, last_col).assign(mat)
return self
def get_row_slice(self, start, end, target = None):
"""
Get the rows with indices start through end. If target is not provided
memory for a new matrix will be allocated.
"""
width = self.shape[1]
if not target:
target = empty((end-start, width))
err_code = _cudamat.get_row_slice(self.p_mat, target.p_mat, ct.c_int(start), ct.c_int(end))
if err_code:
raise generate_exception(err_code)
return target
def set_row_slice(self, start, end, mat):
"""
Assign the contents of mat to the rows with indices start through end.
"""
err_code = _cudamat.set_row_slice(mat.p_mat, self.p_mat, ct.c_int(start), ct.c_int(end))
if err_code:
raise generate_exception(err_code)
return self
def transpose(self, target = None):
"""
Return a transposed copy of the matrix.
"""
if not target:
target = empty((self.shape[1], self.shape[0]))
err_code = _cudamat.copy_transpose(self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def fill_with_rand(self):
"""
Fill matrix on the GPU with random numbers drawn from the uniform
distribution over the (0,1) interval.
"""
err_code = _cudamat.fill_with_rand(CUDAMatrix.rnd_state_p, self.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def fill_with_randn(self):
"""
Fill matrix on the GPU with random numbers drawn from the standard normal
distribution.
"""
err_code = _cudamat.fill_with_randn(CUDAMatrix.rnd_state_p, self.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def dropout(self, dropprob, val=0.0):
"""
Drop entries in this matrix uniformly randomly with given probability
and set the dropped out unit to state val.
"""
err_code = _cudamat.dropout(CUDAMatrix.rnd_state_p, self.p_mat,
ct.c_float(dropprob), ct.c_float(val))
if err_code:
raise generate_exception(err_code)
return self
def sample_bernoulli(self, target=None):
"""
Sample a bernoulli distribution. Choose 1 with probability given by entries of self, 0 otherwise.
"""
if not target:
target = self
err_code = _cudamat.sample_bernoulli(CUDAMatrix.rnd_state_p, self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def sample_bernoulli_tanh(self, target=None):
"""
Sample a bernoulli distribution. Choose 1 with probability given by entries of (1+self)/2, -1 otherwise.
"""
if not target:
target = self
err_code = _cudamat.sample_bernoulli_tanh(CUDAMatrix.rnd_state_p, self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def sample_poisson(self, target=None):
"""
Sample a poisson distribution. Choose 1 with probability given by entries of self.
Not implemented yet.
"""
if not target:
target = self
err_code = _cudamat.sample_poisson(CUDAMatrix.rnd_state_p, self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def sample_gaussian(self, mult=1.0, target=None):
"""
Add zero mean gaussian noise to the matrix. mult is the stddev.
"""
if not target:
target = self
err_code = _cudamat.sample_gaussian(CUDAMatrix.rnd_state_p, self.p_mat, target.p_mat, ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return self
def perturb_energy_for_softmax_sampling(self, target=None):
"""
Add by -log(-log(rand)).
"""
if not target:
target = self
err_code = _cudamat.perturb_energy(CUDAMatrix.rnd_state_p, self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def perturb_prob_for_softmax_sampling(self, target=None):
"""
Divide by -log(rand).
"""
if not target:
target = self
err_code = _cudamat.perturb_prob(CUDAMatrix.rnd_state_p, self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def add_col_vec(self, vec, target = None):
"""
Add vector vec to every column of the matrix. If a target is provided,
it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _cudamat.add_col_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def add_col_mult(self, vec, mult, target = None):
"""
Add a multiple of vector vec to every column of the matrix. If a target
is provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _cudamat.add_col_mult(self.p_mat, vec.p_mat, target.p_mat, ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return target
def mult_diagonal(self, val, target = None):
"""
Mult val to the diagonal of self. If a target
is provided, it is used to store the result instead of self.
"""
if not target:
target = self
assert self.shape[0] == self.shape[1], 'self must be a square matrix'
if isinstance(val, CUDAMatrix):
err_code = _cudamat.mult_diagonal(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _cudamat.mult_diagonal_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return target
def add_diagonal(self, val, target = None):
"""
Add val to the diagonal of self. If a target
is provided, it is used to store the result instead of self.
"""
if not target:
target = self
assert self.shape[0] == self.shape[1], 'self must be a square matrix'
if isinstance(val, CUDAMatrix):
err_code = _cudamat.add_diagonal(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _cudamat.add_diagonal_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return target
def add_row_mult(self, vec, mult, target = None):
"""
Add a multiple of vector vec to every row of the matrix. If a target
is provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _cudamat.add_row_mult(self.p_mat, vec.p_mat, target.p_mat, ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return target
def add_row_vec(self, vec, target = None):
"""
Add vector vec to every row of the matrix. If a target is provided,
it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _cudamat.add_row_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def mult_by_col(self, vec, target = None):
"""
Multiply vector vec into every column of the matrix. If a target is
provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _cudamat.mult_by_col_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def mult_by_row(self, vec, target = None):
"""
Multiply vector vec into every row of the matrix. If a target is
provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _cudamat.mult_by_row_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def div_by_col(self, vec, target = None):
"""
Multiply vector vec into every column of the matrix. If a target is
provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _cudamat.div_by_col_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def div_by_row(self, vec, target = None):
"""
Divide vector vec into every row of the matrix. If a target is
provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _cudamat.div_by_row_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def sum(self, axis=None, target = None):
"""
Sum the matrix along the given dimension, where 0 represents the leading
dimension and 1 represents the non-leading dimension. If None, the sum
of all elements is returned. If a target is not prvided, a new vector is
created for storing the result.
"""
if axis is None:
return vdot(self, CUDAMatrix.ones.slice(0, self.shape[0]*self.shape[1]))
else:
return sum(self, axis, target)
def add_sums(self, mat, axis, mult = 1.):
"""
Add a multiple of the sums of the matrix mat along the given dimension
to self.
"""
m = _cudamat.get_leading_dimension(mat.p_mat)
n = _cudamat.get_nonleading_dimension(mat.p_mat)
if axis == 0:
# sum along leading dimension
left = CUDAMatrix.ones.slice(0, m)
left.set_trans(True)
right = mat
elif axis == 1:
# sum along non-leading dimension
left = mat
right = CUDAMatrix.ones.slice(0, n)
err_code = _cudamat.dot(left.p_mat, right.p_mat, self.p_mat, ct.c_float(1.), ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return self
def less_than_eq(self, val, target = None):
"""
Perform the operation target = 1. * (self < val), where val can be a matrix or a scalar.
"""
if not target:
target = self
if isinstance(val, (int, float)):
err_code = _cudamat.less_than_eq_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
err_code = _cudamat.less_than_eq(self.p_mat, val.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def less_than(self, val, target = None):
"""
Perform the operation target = 1. * (self < val), where val can be a matrix or a scalar.
"""
if not target:
target = self
if isinstance(val, (int, float)):
err_code = _cudamat.less_than_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
err_code = _cudamat.less_than(self.p_mat, val.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def greater_than_eq(self, val, target = None):
"""
Perform the operation target = 1. * (self > val), where val can be a matrix or a scalar.
"""
if not target:
target = self
if isinstance(val, (int, float)):
err_code = _cudamat.greater_than_eq_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
err_code = _cudamat.greater_than_eq(self.p_mat, val.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def greater_than(self, val, target = None):
"""
Perform the operation target = 1. * (self > val), where val can be a matrix or a scalar.
"""
if not target:
target = self
if isinstance(val, (int, float)):
err_code = _cudamat.greater_than_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
err_code = _cudamat.greater_than(self.p_mat, val.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def upper_bound(self, val, target = None):
"""
Perform the operation target = (self > val) ? val:self, where val can be a matrix or a scalar.
"""
if not target:
target = self
if isinstance(val, (int, float)):
err_code = _cudamat.upper_bound_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
err_code = _cudamat.upper_bound(self.p_mat, val.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def lower_bound(self, val, target = None):
"""
Perform the operation target = (self < val) ? val:self, where val can be a matrix or a scalar.
"""
if not target:
target = self
if isinstance(val, (int, float)):
err_code = _cudamat.lower_bound_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
err_code = _cudamat.lower_bound(self.p_mat, val.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def cumsum(self, axis, temp=None, target = None):
"""
Cumulative sum along axis.
"""
m, n = self.shape
assert axis == 0, 'axis = 1 not implemented.'
if not target:
target = empty((m, n))
if not temp:
temp = empty((m, n))
"""
elif axis == 1:
if not target:
target = empty((m, 1))
"""
err_code = _cudamat.cumsum_by_axis(self.p_mat, target.p_mat, temp.p_mat, ct.c_int(axis))
if err_code:
raise generate_exception(err_code)
return target
def choose_max_and_accumulate(self, acc):
"""
Find the maximum value along the given dimension, where 0 represents the
leading dimension and 1 represents the non-leading dimension. If a target
is not prvided, a new vector is created for storing the result.
"""
m, n = self.shape
err_code = _cudamat.choose_max_and_accumulate(self.p_mat, acc.p_mat)
if err_code:
raise generate_exception(err_code)
return acc
def choose_max(self, axis, target = None):
"""
Sets the argmax along axis to 1 and rest to zero.
"""
m, n = self.shape
assert axis == 0, 'Axis = 1 not implemented.'
if not target:
target = self
err_code = _cudamat.choose_max_by_axis(self.p_mat, target.p_mat, ct.c_int(axis))
if err_code:
raise generate_exception(err_code)
return target
def max(self, axis, target = None):
"""
Find the maximum value along the given dimension, where 0 represents the
leading dimension and 1 represents the non-leading dimension. If a target
is not prvided, a new vector is created for storing the result.
"""
m, n = self.shape
if axis == 0:
if not target:
target = empty((1, n))
elif axis == 1:
if not target:
target = empty((m, 1))
err_code = _cudamat.max_by_axis(self.p_mat, target.p_mat, ct.c_int(axis))
if err_code:
raise generate_exception(err_code)
return target
def argmax(self, axis, target = None):
"""
Find the index with the maximum value along the given dimension, where 0 represents the
leading dimension and 1 represents the non-leading dimension. If a target
is not prvided, a new vector is created for storing the result.
"""
m, n = self.shape
if axis == 0:
if not target:
target = empty((1, n))
elif axis == 1:
if not target:
target = empty((m, 1))
err_code = _cudamat.argmax_by_axis(self.p_mat, target.p_mat, ct.c_int(axis))
if err_code:
raise generate_exception(err_code)
return target
def add_sqsums(self, mat, axis, mult = 1.):
"""
Add the sum of squares of mat along the given dimension to self. 0 represents the
leading dimension and 1 represents the non-leading dimension.
"""
m, n = mat.shape
if axis == 0:
assert self.shape == (1, n), 'Self has shape %s but mat has shape %s' % (self.shape, mat.shape)
elif axis == 1:
assert self.shape == (m, 1)
err_code = _cudamat.sqsum_by_axis(mat.p_mat, self.p_mat,
ct.c_int(axis), ct.c_float(mult),
ct.c_float(1.0))
if err_code:
raise generate_exception(err_code)
def sqsum(self, axis, target = None):
"""
Find the sum of squares along the given dimension, where 0 represents the
leading dimension and 1 represents the non-leading dimension. If a target
is not prvided, a new vector is created for storing the result.
"""
m, n = self.shape
if axis == 0:
if not target:
target = empty((1, n))
elif axis == 1:
if not target:
target = empty((m, 1))
err_code = _cudamat.sqsum_by_axis(self.p_mat, target.p_mat, ct.c_int(axis), 1.0, 0.0)
if err_code:
raise generate_exception(err_code)
return target
def norm_limit(self, norm, axis, target = None):
"""
Limit the norm along the given dimension to be 'norm', where 0
represents the leading dimension and 1 represents the non-leading
dimension. If a target is not provided, self is used as target.
"""
m, n = self.shape
if not target:
target = self
err_code = _cudamat.normlimit_by_axis(self.p_mat, target.p_mat,
ct.c_int(axis), ct.c_float(norm))
if err_code:
raise generate_exception(err_code)
return target
def apply_softmax(self, target = None):
"""
Apply the softmax activation function.
"""
return softmax(self, target)
def sign(self, target = None):
"""
Find the sign of each element of the matrix.
"""
if not target:
target = empty((self.mat.size[0], self.mat.size[1]))
err_code = _cudamat.sign(self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def apply_cos(self, target = None):
"""
Apply the cos sigmoid to each element of the matrix.
"""
return cos(self, target)
def apply_sin(self, target = None):
"""
Apply the sin sigmoid to each element of the matrix.
"""
return sin(self, target)
def apply_sigmoid(self, target = None):
"""
Apply the logistic sigmoid to each element of the matrix.
"""
return sigmoid(self, target)
def reciprocal(self, target = None):
"""
Find the reciprocal of each element of the matrix.
"""
if not target:
target = self
err_code = _cudamat.reciprocal(self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def dot(self, mat2, mult=1.0, target = None):
"""
Multiply the matrix by mat2 from the right and multiply by scalar mult.
"""
return dot(self, mat2, mult, target)
def add_dot(self, m1, m2, mult=1.0):
"""
Add the dot product of m1 and m2 to the matrix.
"""
err_code = _cudamat.dot(m1.p_mat, m2.p_mat, self.p_mat, ct.c_float(1.), ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return self
def subtract_dot(self, m1, m2):
"""
Subtract the dot product of m1 and m2 from the matrix.
"""
err_code = _cudamat.dot(m1.p_mat, m2.p_mat, self.p_mat, ct.c_float(1.), ct.c_float(-1.))
if err_code:
raise generate_exception(err_code)
return self
def add_mult_sign(self, mat2, mult = 1.):
"""
Add multiple of sign of mat2 to the matrix.
"""
err_code = _cudamat.add_mult_sign(self.p_mat, mat2.p_mat, ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return self
def add_mult(self, mat2, mult = 1.):
"""
Add multiple of mat2 to the matrix.
"""
err_code = _cudamat.add_mult(self.p_mat, mat2.p_mat, ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return self
def subtract_mult(self, mat2, mult = 1.):
"""
Subtract a multiple of mat2 from the matrix.
"""
err_code = _cudamat.add_mult(self.p_mat, mat2.p_mat, ct.c_float(-1. * mult))
if err_code:
raise generate_exception(err_code)
return self
def add(self, val, target = None):
"""Add val to self, where val can be a scalar or a CUDAMatrix with the
same dimensions as self. """
if not target:
target = self
if isinstance(val, CUDAMatrix):
err_code = _cudamat.add_elementwise(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _cudamat.add_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return target
def accumulate_columns(self, indices, target, mult=1.0, avg=False):
if not target:
target = self
if avg:
avgg = 1
else:
avgg = 0
err_code = _cudamat.accumulate_columns(self.p_mat, indices.p_mat, target.p_mat, ct.c_float(mult), ct.c_int(avgg))
if err_code:
raise generate_exception(err_code)
return target
def expand(self, expansion_indices, target):
err_code = _cudamat.expand(self.p_mat, expansion_indices.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def expand_and_add(self, val, expansion_indices, target = None, mult=1.0):
if not target:
target = self
if isinstance(val, CUDAMatrix) and isinstance(expansion_indices, CUDAMatrix):
err_code = _cudamat.expand_and_add(self.p_mat, val.p_mat, expansion_indices.p_mat, target.p_mat, ct.c_float(mult))
else:
raise ValueError, "Value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return target
def subtract(self, val, target = None):
"""Subtract val from self, where val can be a scalar or a CUDAMatrix with
the same dimensions as self. """
if not target:
target = self
if isinstance(val, CUDAMatrix):
err_code = _cudamat.subtract_elementwise(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _cudamat.add_scalar(self.p_mat, ct.c_float(-1*val), target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return target
def divide(self, val, target = None):
"""Divide self by val, where val can be a scalar or a CUDAMatrix with the
same dimensions as self. """
if not target:
target = self
if isinstance(val, CUDAMatrix):
err_code = _cudamat.divide_elementwise(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _cudamat.divide_by_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return target
def mult(self, val, target = None):
"""Multiply self by val, where val can be a scalar or a CUDAMatrix with
the same dimensions as self. """
if not target:
target = self
if isinstance(val, CUDAMatrix):
err_code = _cudamat.mult_elementwise(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _cudamat.mult_by_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return target
def apply_cos_deriv(self, val, target = None):
"""
Apply cos derivative, where val is the activation of cos units.
"""
if not target:
target = self
if isinstance(val, CUDAMatrix):
err_code = _cudamat.apply_cos_deriv(self.p_mat, val.p_mat, target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix."
if err_code:
raise generate_exception(err_code)
return target
def apply_sin_deriv(self, val, target = None):
"""
Apply sin derivative, where val is the activation of sin units.
"""
if not target:
target = self
if isinstance(val, CUDAMatrix):
err_code = _cudamat.apply_sin_deriv(self.p_mat, val.p_mat, target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix."
if err_code:
raise generate_exception(err_code)
return target
def get_softmax_correct(self, labels, target):
"""
target[i] = 1, iff labels[i] is correctly predicted; 0 otherwise.
"""
assert labels.shape == (1, self.shape[1])
assert target.shape == labels.shape
if isinstance(labels, CUDAMatrix):
err_code = _cudamat.get_softmax_correct(self.p_mat, labels.p_mat, target.p_mat)
else:
raise ValueError, "labels must be of type CUDAMatrix."
if err_code:
raise generate_exception(err_code)
return target
def get_softmax_cross_entropy(self, labels, target, tiny=1e-10):
"""
target[i] = -log(self[label[i]] + tiny).
"""
assert labels.shape == (1, self.shape[1])
assert target.shape == labels.shape
if isinstance(labels, CUDAMatrix):
err_code = _cudamat.get_softmax_cross_entropy(self.p_mat, labels.p_mat, target.p_mat, ct.c_float(tiny))
else:
raise ValueError, "labels must be of type CUDAMatrix."
if err_code:
raise generate_exception(err_code)
return target
def apply_softmax_grad(self, labels, target = None):
"""
Apply softmax derivative, where labels are the correct labels.
"""
if not target:
target = self
assert labels.shape == (1, self.shape[1])
assert target.shape == self.shape
if isinstance(labels, CUDAMatrix):
err_code = _cudamat.apply_softmax_grad(self.p_mat, labels.p_mat, target.p_mat)
else:
raise ValueError, "labels must be of type CUDAMatrix."
if err_code:
raise generate_exception(err_code)
return target
def apply_logistic_deriv(self, val, target = None):
"""
Apply logistic derivative, where val is the activation of logistic units.
"""
if not target:
target = self
if isinstance(val, CUDAMatrix):
err_code = _cudamat.apply_logistic_deriv(self.p_mat, val.p_mat, target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix."
if err_code:
raise generate_exception(err_code)
return target
def apply_tanh_deriv(self, val, target = None):
"""
Apply tanh derivative, where val is the activation of the units.
"""
if not target:
target = self
if isinstance(val, CUDAMatrix):
err_code = _cudamat.apply_tanh_deriv(self.p_mat, val.p_mat, target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix."
if err_code:
raise generate_exception(err_code)
return target
def apply_rectified_linear_deriv(self, val, target = None):
"""
Apply rectified linear derivative, where val is the activation of the units.
"""
if not target:
target = self
if isinstance(val, CUDAMatrix):
err_code = _cudamat.apply_rectified_linear_deriv(self.p_mat, val.p_mat, target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix."
if err_code:
raise generate_exception(err_code)
return target
def apply_rectified_linear_smooth_deriv(self, val, target = None):
"""
Apply rectified linear smooth derivative, where val is the activation of the units.
"""
if not target:
target = self
if isinstance(val, CUDAMatrix):
err_code = _cudamat.apply_rectified_linear_smooth_deriv(self.p_mat, val.p_mat, target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix."
if err_code:
raise generate_exception(err_code)
return target
@deprecated
def assign_scalar(self, alpha):
"""
Assign scalar alpha to every element of the matrix.
"""
err_code = _cudamat.assign_scalar(self.p_mat, ct.c_float(alpha))
if err_code:
raise generate_exception(err_code)
return self
@deprecated
def mult_by_scalar(self, alpha, target = None):
"""
Multiply the matrix by a scalar.
"""
if not target:
target = self
err_code = _cudamat.mult_by_scalar(self.p_mat, ct.c_float(alpha), target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
@deprecated
def div_by_scalar(self, alpha, target = None):
"""
Divide the matrix by a scalar.
"""
if not target:
target = self
err_code = _cudamat.divide_by_scalar(self.p_mat, ct.c_float(alpha), target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
@deprecated
def add_scalar(self, alpha, target = None):
"""
Increment the matrix by a scalar.
"""
if not target:
target = self
err_code = _cudamat.add_scalar(self.p_mat, ct.c_float(alpha), target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def euclid_norm(self):
err_code = ct.c_int(0)
res = _cudamat.euclid_norm(self.p_mat, ct.byref(err_code))
if err_code:
raise generate_exception(err_code.value)
return res
def select_columns(self, indices, target):
"""
copies some columns of self into target.
<indices> must be a row vector. Its elements are float32's representing integers, e.g. "34.0" means the integer "34".
after this call, for all r,c, target[r,c]=self[r,indices[c]].
This returns target.
Negative indices are interpreted in the usual Python way: all elements of <indices> had better be in the range [-self.shape[1], self.shape[1]-1].
This does bounds checking, but out of bounds indices do not raise an exception (because the programmer was lazy). Instead, they result in NaN values in <target>.
"""
err_code = _cudamat.selectRows(self.p_mat, target.p_mat, indices.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def swap_columns(self, indices1, indices2, target):
"""
swap columns at indices1 of self with columns at indices2 of target.
<indices1> and <indices2> must be row vectors of equal length. Its elements are float32's representing integers, e.g. "34.0" means the integer "34".
after this call, for all r,c, target[r,indices2[c]=self[r,indices1[c]].
self can be same as target, but then the result will be non-deterministic if there is overlap between indices1 and indices2. Can be used for in-place shuffling by making sure indices1 and indices2 do not overlap.
This returns target.
Negative indices are interpreted in the usual Python way: all elements of <indices> had better be in the range [-self.shape[1], self.shape[1]-1].
This does bounds checking, but out of bounds indices do not raise an exception (because the programmer was lazy). Instead, they result in NaN values in <target>.
"""
assert indices1.shape == indices2.shape
err_code = _cudamat.swapColumns(self.p_mat, target.p_mat, indices1.p_mat, indices2.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def set_selected_columns(self, indices, source):
"""
copies all columns of source into some columns of self.
<indices> must be a row vector. Its elements are float32's representing
integers, e.g. "34.0" means the integer "34". after this call, for all
r,c, self[r,indices[c]]=source[r,c]. This returns self.
Negative indices are interpreted in the usual Python way: all elements
of <indices> had better be in the range [-self.shape[1], self.shape[1]-1].
This does bounds checking, but out of bounds indices do not raise an
exception (because the programmer was lazy). Instead, they result in NaN
values in <self>.
"""
err_code = _cudamat.setSelectedRows(self.p_mat, source.p_mat, indices.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def empty(shape):
"""
Creates and returns a new CUDAMatrix with the given shape.
"""
mat = cudamat()
err_code = _cudamat.init_empty(ct.pointer(mat), ct.c_int(shape[0]), ct.c_int(shape[1]))
if err_code:
raise generate_exception(err_code)
return CUDAMatrix(mat)
def sum(mat, axis, target = None):
"""
Sum the matrix along the given dimension, where 0 represents the leading
dimension and 1 represents the non-leading dimension. If a target is
not prvided, a new vector is created for storing the result.
"""
m = _cudamat.get_leading_dimension(mat.p_mat)
n = _cudamat.get_nonleading_dimension(mat.p_mat)
if axis == 0:
# sum along leading dimension
left = CUDAMatrix.ones.slice(0, m)
left.set_trans(True)
right = mat
if not target:
target = empty((1, n))
elif axis == 1:
# sum along non-leading dimension
left = mat
right = CUDAMatrix.ones.slice(0, n)
if not target:
target = empty((m, 1))
err_code = _cudamat.dot(left.p_mat, right.p_mat, target.p_mat, ct.c_float(0.), ct.c_float(1.))
if err_code:
raise generate_exception(err_code)
return target
def dot(m1, m2, mult=1.0, target = None):
"""
Find the dot product between m1 and m2.
"""
if not target:
m = _cudamat.get_leading_dimension(m1.p_mat)
n = _cudamat.get_nonleading_dimension(m2.p_mat)
target = empty((m, n))
err_code = _cudamat.dot(m1.p_mat, m2.p_mat, target.p_mat, ct.c_float(0.), ct.c_float(1.))
if err_code:
raise generate_exception(err_code)
return target
def vdot(m1, m2):
"""
Compute the vector dot product of matrices m1 and m2.
"""
err_code = ct.c_int(0)
res = _cudamat.vdot(m1.p_mat, m2.p_mat, ct.byref(err_code))
if err_code:
raise generate_exception(err_code.value)
return res
def softmax(mat, target = None):
"""
Apply cos to each element of the matrix mat.
"""
if target:
err_code = _cudamat.softmax(mat.p_mat, target.p_mat)
else:
err_code = _cudamat.softmax_overwrite(mat.p_mat)
target = mat
if err_code:
raise generate_exception(err_code)
return target
def cos(mat, target = None):
"""
Apply cos to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_cos(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def sin(mat, target = None):
"""
Apply sin to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_sin(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def sigmoid(mat, target = None):
"""
Apply the logistic sigmoid to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_sigmoid(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def tanh(mat, target = None):
"""
Apply the tanh to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_tanh(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def abs(mat, target = None):
"""
Apply abs to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_abs(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def log_1_plus_exp(mat, target = None, exact=False):
"""
Apply log(1+exp(x)) to each element of the matrix mat. If exact is True, use
slow and accurate log and exp.
"""
if not target:
target = mat
if exact:
err_code = _cudamat.apply_log_1_plus_exp_exact(mat.p_mat, target.p_mat)
else:
err_code = _cudamat.apply_log_1_plus_exp(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def log(mat,target = None, tiny=0.0):
"""
Find the natural logarithm of each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_log(mat.p_mat, target.p_mat, ct.c_float(tiny))
if err_code:
raise generate_exception(err_code)
return target
def exp(mat, target = None):
"""
Apply the exponential function to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_exp(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def ceil(mat, target = None):
"""
Apply the ceil function to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_ceil(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def floor(mat, target = None):
"""
Apply the floor function to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_floor(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def sqrt(mat, target = None):
"""
Compute the square root of each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_sqrt(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def cross_entropy_bernoulli(mat, p, target = None, tiny=1e-10):
"""
Compute -mat*log(p) - (1-mat).*log(1-p)
"""
if not target:
target = mat
if isinstance(p, CUDAMatrix):
err_code = _cudamat.compute_cross_entropy_bernoulli(mat.p_mat, p.p_mat, target.p_mat, ct.c_float(tiny))
else:
raise ValueError, "Value must be of type CUDAMatrix."
if err_code:
raise generate_exception(err_code)
return target
def cross_entropy(mat, p, target = None, tiny=1e-10):
"""
Compute -mat*log(p)
"""
if not target:
target = mat
if isinstance(p, CUDAMatrix):
err_code = _cudamat.compute_cross_entropy(mat.p_mat, p.p_mat, target.p_mat, ct.c_float(tiny))
else:
raise ValueError, "Value must be of type CUDAMatrix."
if err_code:
raise generate_exception(err_code)
return target
def correct_preds(mat, p, target = None, cutoff=0.5):
"""
Compute mat*(p >= 0.5) + (1-mat).*(p < 0.5)
"""
if not target:
target = mat
if isinstance(p, CUDAMatrix):
err_code = _cudamat.correct_preds(mat.p_mat, p.p_mat, target.p_mat, ct.c_float(cutoff))
else:
raise ValueError, "Value must be of type CUDAMatrix."
if err_code:
raise generate_exception(err_code)
return target
def pow(mat, p, target = None):
"""
If p is a scalar, compute the 'p'th power of each element of the matrix mat,
otherwise raise each element of the matrix mat to the power given by the
corresponding element of the matrix p.
"""
if not target:
target = mat
if isinstance(p, CUDAMatrix):
err_code = _cudamat.apply_pow_matrix(mat.p_mat, p.p_mat, target.p_mat)
elif isinstance(p, (int, float)):
err_code = _cudamat.apply_pow(mat.p_mat, ct.c_float(p), target.p_mat)
else:
raise ValueError, "Value must be of type CUDAMatrix, int, or float."
if err_code:
raise generate_exception(err_code)
return target
def cuda_sync_threads():
_cudamat.cuda_sync_threads()
def reformat(array):
"""
Returns array as a float32 array in FORTRAN order.
"""
return np.array(array, dtype=np.float32, order='F')
def cuda_set_device(dev_id):
"""
Selects the CUDA device with the given ID.
"""
err_code = _cudamat.cuda_set_device(ct.c_int(dev_id))
if err_code:
raise generate_exception(err_code)
def cublas_init():
"""
Initialize Cublas.
"""
_cudamat.cublas_init()
CUDAMatrix.ones = CUDAMatrix(np.ones((MAX_ONES, 1), dtype=np.float32, order = 'F'))
init = cublas_init
def cublas_shutdown():
"""
Shut down Cublas.
"""
CUDAMatrix.ones = 0
_cudamat.cublas_shutdown()
shutdown = cublas_shutdown
|
gomyway/deeplink
|
cudamat/cudamat.py
|
Python
|
gpl-3.0
| 57,243
|
[
"Gaussian"
] |
8aa88de50fae60bf44ca8076da30a29a71896c496ce027f5806099a218d05bd0
|
# Copyright 2013 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
"""OAuth2 related utilities and implementation of browser based login flow."""
# pylint: disable=W0613
import base64
import BaseHTTPServer
import collections
import datetime
import json
import logging
import optparse
import os
import socket
import sys
import threading
import time
import urllib
import urlparse
import webbrowser
# All libraries here expect to find themselves in sys.path.
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(ROOT_DIR, 'third_party'))
sys.path.insert(0, os.path.join(ROOT_DIR, 'third_party', 'pyasn1'))
sys.path.insert(0, os.path.join(ROOT_DIR, 'third_party', 'rsa'))
import httplib2
import rsa
from pyasn1.codec.der import decoder
from pyasn1.type import univ
from oauth2client import client
from oauth2client import multistore_file
from third_party import requests
from utils import tools
# Path to a file with cached OAuth2 credentials used by default. Can be
# overridden by command line option or env variable.
DEFAULT_OAUTH_TOKENS_CACHE = os.path.join(
os.path.expanduser('~'), '.isolated_oauth')
# List of space separated OAuth scopes for generated tokens. GAE apps usually
# use userinfo.email scope for authentication.
OAUTH_SCOPES = 'https://www.googleapis.com/auth/userinfo.email'
# Endpoint to generate access tokens.
OAUTH_TOKEN_ENDPOINT = 'https://www.googleapis.com/oauth2/v3/token'
# OAuth authentication method configuration, used by utils/net.py.
# See doc string for 'make_oauth_config' for meaning of fields.
OAuthConfig = collections.namedtuple('OAuthConfig', [
'disabled',
'tokens_cache',
'no_local_webserver',
'webserver_port',
'service_account_json',
])
# Access token with its expiration time (UTC datetime, or None if not known).
AccessToken = collections.namedtuple('AccessToken', [
'token',
'expires_at',
])
# Service account credentials as loaded from JSON file.
ServiceAccountCredentials = collections.namedtuple('ServiceAccountCredentials',
[
'client_email',
'client_id',
'private_key', # PEM encoded.
'private_key_id',
])
# Configuration fetched from a service, returned by _fetch_service_config.
_ServiceConfig = collections.namedtuple('_ServiceConfig', [
'client_id',
'client_secret',
'primary_url',
])
# Process cache of _fetch_service_config results.
_service_config_cache = {}
_service_config_cache_lock = threading.Lock()
class BadServiceAccountCredentials(Exception):
"""Service account JSON is missing or not valid."""
def make_oauth_config(
disabled=None,
tokens_cache=None,
no_local_webserver=None,
webserver_port=None,
service_account_json=None):
"""Returns new instance of OAuthConfig.
If some config option is not provided or None, it will be set to a reasonable
default value. This function also acts as an authoritative place for default
values of corresponding command line options.
Args:
disabled: True to completely turn off OAuth authentication.
tokens_cache: path to a file with cached OAuth2 credentials.
no_local_webserver: if True, do not try to run local web server that
handles redirects. Use copy-pasted verification code instead.
webserver_port: port to run local webserver on.
service_account_json: path to JSON file with service account credentials.
"""
if tokens_cache is None:
tokens_cache = os.environ.get(
'SWARMING_AUTH_TOKENS_CACHE', DEFAULT_OAUTH_TOKENS_CACHE)
if no_local_webserver is None:
no_local_webserver = tools.get_bool_env_var(
'SWARMING_AUTH_NO_LOCAL_WEBSERVER')
if webserver_port is None:
webserver_port = 8090
if service_account_json is None:
service_account_json = os.environ.get('SWARMING_AUTH_SERVICE_ACCOUNT_JSON')
if disabled is None:
disabled = tools.is_headless() and not service_account_json
return OAuthConfig(
disabled,
tokens_cache,
no_local_webserver,
webserver_port,
service_account_json)
def add_oauth_options(parser):
"""Appends OAuth related options to OptionParser."""
default_config = make_oauth_config()
parser.oauth_group = optparse.OptionGroup(parser, 'OAuth options')
parser.oauth_group.add_option(
'--auth-disabled',
type=int,
default=int(default_config.disabled),
help='Set to 1 to disable OAuth and rely only on IP whitelist for '
'authentication. Currently used from bots. [default: %default]')
parser.oauth_group.add_option(
'--auth-tokens-cache',
default=default_config.tokens_cache,
help='Path to a file with oauth2client tokens cache. It should be a safe '
'location accessible only to a current user: knowing content of this '
'file is roughly equivalent to knowing account password. Can also be '
'set with SWARMING_AUTH_TOKENS_CACHE environment variable. '
'[default: %default]')
parser.oauth_group.add_option(
'--auth-no-local-webserver',
action='store_true',
default=default_config.no_local_webserver,
help='Do not run a local web server when performing OAuth2 login flow. '
'Can also be set with SWARMING_AUTH_NO_LOCAL_WEBSERVER=1 '
'environment variable. [default: %default]')
parser.oauth_group.add_option(
'--auth-host-port',
type=int,
default=default_config.webserver_port,
help='Port a local web server should listen on. Used only if '
'--auth-no-local-webserver is not set. [default: %default]')
parser.oauth_group.add_option(
'--auth-service-account-json',
default=default_config.service_account_json,
help='Path to a JSON file with service account credentials to use. '
'Can be generated by "Generate new JSON key" button in "Credentials" '
'section of any Cloud Console project. The value can also be set '
'with SWARMING_AUTH_SERVICE_ACCOUNT_JSON environment variable. '
'[default: %default]')
parser.add_option_group(parser.oauth_group)
def extract_oauth_config_from_options(options):
"""Given OptionParser with oauth options, extracts OAuthConfig from it.
OptionParser should be populated with oauth options by 'add_oauth_options'.
"""
# Validate service account JSON is correct by trying to load it.
try:
if options.auth_service_account_json:
acc = _load_service_account_json(options.auth_service_account_json)
_parse_private_key(acc.private_key)
except BadServiceAccountCredentials as exc:
raise ValueError('Bad service account credentials: %s' % exc)
return make_oauth_config(
disabled=(
bool(options.auth_disabled) and
not options.auth_service_account_json),
tokens_cache=options.auth_tokens_cache,
no_local_webserver=options.auth_no_local_webserver,
webserver_port=options.auth_host_port,
service_account_json=options.auth_service_account_json)
def load_access_token(urlhost, config):
"""Returns cached AccessToken if it is not expired yet."""
assert isinstance(config, OAuthConfig)
if config.disabled:
return None
auth_service_url = _fetch_auth_service_url(urlhost)
if not auth_service_url:
return None
storage = _get_storage(auth_service_url, config)
credentials = storage.get()
# Missing?
if not credentials or credentials.invalid:
return None
# Expired?
if not credentials.access_token or credentials.access_token_expired:
return None
return AccessToken(credentials.access_token, credentials.token_expiry)
def create_access_token(urlhost, config, allow_user_interaction):
"""Mints and caches new access_token, launching OAuth2 dance if necessary.
Args:
urlhost: base URL of a host to make OAuth2 token for.
config: OAuthConfig instance.
allow_user_interaction: if False, do not use interactive browser based
flow (return None instead if it is required).
Returns:
AccessToken on success.
None on error or if OAuth2 flow was interrupted.
"""
assert isinstance(config, OAuthConfig)
if config.disabled:
return None
auth_service_url = _fetch_auth_service_url(urlhost)
if not auth_service_url:
return None
storage = _get_storage(auth_service_url, config)
credentials = None
if config.service_account_json:
# 2-legged flow that uses service account credentials.
try:
service_account = _load_service_account_json(config.service_account_json)
except BadServiceAccountCredentials as e:
logging.error('Bad service account credentials: %s', e)
return None
# Body of token refresh request (with JWT assertion signed with secret key).
body = urllib.urlencode({
'assertion': _make_assertion_jwt(service_account),
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
})
# Exchange it for access_token.
http = httplib2.Http(ca_certs=tools.get_cacerts_bundle())
resp, content = http.request(
uri=OAUTH_TOKEN_ENDPOINT,
method='POST',
body=body,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
if resp.status != 200:
logging.error(
'Failed to grab access token for service account: %r', content)
return None
try:
token = json.loads(content)
access_token = token['access_token']
expires_at = None
if 'expires_in' in token:
expires_at = datetime.datetime.utcnow()
expires_at += datetime.timedelta(seconds=int(token['expires_in']))
except (KeyError, ValueError) as e:
logging.error('Unexpected access token response format: %s', e)
return None
credentials = client.OAuth2Credentials(
access_token=access_token,
client_id=service_account.client_id,
client_secret=None,
refresh_token=None,
token_expiry=expires_at,
token_uri=None,
user_agent=None)
else:
# 3-legged flow with (perhaps cached) refresh token.
credentials = storage.get()
refreshed = False
if credentials and not credentials.invalid:
try:
credentials.refresh(httplib2.Http(ca_certs=tools.get_cacerts_bundle()))
refreshed = True
except client.Error as err:
logging.error('OAuth error: %s', err)
# Refresh token is missing or invalid, go through full flow.
if not refreshed:
if not allow_user_interaction:
return None
credentials = _run_oauth_dance(auth_service_url, config)
if not credentials:
return None
# Success.
logging.info('OAuth access_token refreshed. Expires in %s.',
credentials.token_expiry - datetime.datetime.utcnow())
credentials.set_store(storage)
storage.put(credentials)
return AccessToken(credentials.access_token, credentials.token_expiry)
def purge_access_token(urlhost, config):
"""Deletes OAuth tokens that can be used to access |urlhost|."""
assert isinstance(config, OAuthConfig)
auth_service_url = _fetch_auth_service_url(urlhost)
if auth_service_url:
_get_storage(auth_service_url, config).delete()
def _get_storage(urlhost, config):
"""Returns oauth2client.Storage with tokens to access |urlhost|."""
# Do not mix access_token caches for different service accounts.
if config.service_account_json:
creds = _load_service_account_json(config.service_account_json)
key = 'sa:%s:%s' % (creds.client_id, urlhost.rstrip('/'))
else:
key = urlhost.rstrip('/')
return multistore_file.get_credential_storage_custom_string_key(
config.tokens_cache, key)
def _fetch_auth_service_url(urlhost):
"""Fetches URL of a main authentication service used by |urlhost|.
Returns:
* If |urlhost| is using a authentication service, returns its URL.
* If |urlhost| is not using authentication servier, returns |urlhost|.
* If there was a error communicating with |urlhost|, returns None.
"""
service_config = _fetch_service_config(urlhost)
if not service_config:
return None
url = (service_config.primary_url or urlhost).rstrip('/')
assert url.startswith(
('https://', 'http://127.0.0.1:', 'http://localhost:')), url
return url
def _fetch_service_config(urlhost):
"""Fetches OAuth related configuration from a service.
The configuration includes OAuth client_id and client_secret, as well as
URL of a primary authentication service (or None if not used).
Returns:
Instance of _ServiceConfig on success, None on failure.
"""
def do_fetch():
# client_secret is not really a secret in that case. So an attacker can
# impersonate service's identity in OAuth2 flow. But that's generally
# fine as long as a list of allowed redirect_uri's associated with client_id
# is limited to 'localhost' or 'urn:ietf:wg:oauth:2.0:oob'. In that case
# attacker needs some process running on user's machine to successfully
# complete the flow and grab access_token. When you have malicious code
# running on your machine you're screwed anyway.
response = requests.get(
'%s/auth/api/v1/server/oauth_config' % urlhost.rstrip('/'),
verify=tools.get_cacerts_bundle())
if response.status_code == 200:
try:
config = response.json()
if not isinstance(config, dict):
raise ValueError()
return _ServiceConfig(
config['client_id'],
config['client_not_so_secret'],
config.get('primary_url'))
except (KeyError, ValueError) as err:
logging.error('Invalid response from the service: %s', err)
else:
logging.warning(
'Error when fetching oauth_config, HTTP status code %d',
response.status_code)
return None
# Use local cache to avoid unnecessary network calls.
with _service_config_cache_lock:
if urlhost not in _service_config_cache:
config = do_fetch()
if config:
_service_config_cache[urlhost] = config
return _service_config_cache.get(urlhost)
# Service account related code.
def _load_service_account_json(path):
"""Loads ServiceAccountCredentials from a JSON file.
Raises BadServiceAccountCredentials if file is missing or not valid.
"""
try:
with open(path, 'r') as f:
data = json.load(f)
return ServiceAccountCredentials(
client_email=str(data['client_email']),
client_id=str(data['client_id']),
private_key=str(data['private_key']),
private_key_id=str(data['private_key_id']))
except IOError as e:
raise BadServiceAccountCredentials('Can\'t open %s: %s' % (path, e))
except ValueError as e:
raise BadServiceAccountCredentials('Not a JSON file %s: %s' % (path, e))
except KeyError as e:
raise BadServiceAccountCredentials('Missing key in %s: %s' % (path, e))
def _parse_private_key(pem):
"""PEM encoded OpenSSL private RSA key -> rsa.PrivateKey."""
# Cloud console generates OpenSSL compatible private RSA keys. 'rsa' library
# doesn't support them natively. Do some ASN unwrapping to extract naked
# RSA key (in der-encoded form). See https://www.ietf.org/rfc/rfc2313.txt.
try:
der = rsa.pem.load_pem(pem, 'PRIVATE KEY')
keyinfo, _ = decoder.decode(der)
if keyinfo[1][0] != univ.ObjectIdentifier('1.2.840.113549.1.1.1'):
raise BadServiceAccountCredentials(
'Not a DER-encoded OpenSSL private RSA key')
private_key_der = keyinfo[2].asOctets()
except IndexError:
raise BadServiceAccountCredentials(
'Not a DER-encoded OpenSSL private RSA key')
return rsa.PrivateKey.load_pkcs1(private_key_der, format='DER')
def _make_assertion_jwt(service_account):
"""Generates signed assertion JWT for 2-legged OAuth flow."""
# For more info see:
# https://developers.google.com/accounts/docs/OAuth2ServiceAccount.
now = long(time.time())
payload = {
'aud': OAUTH_TOKEN_ENDPOINT,
'scope': OAUTH_SCOPES,
'iat': now,
'exp': now + 3600,
'iss': service_account.client_email,
}
# oauth2client knows how to use PyCrypo or PyOpenSSL for signing. Both are
# heavy libraries, that require compiled extensions. Use pure python 'rsa' lib
# instead. It is slower, but we do not care (since this code path is exercised
# only when access token expires (once an hour).
pkey = _parse_private_key(service_account.private_key)
return _make_signed_jwt(payload, pkey)
def _make_signed_jwt(payload, pkey):
"""Wraps |payload| dict into signed JSON Web Token."""
# See http://self-issued.info/docs/draft-jones-json-web-token.html.
as_json = lambda d: json.dumps(d, sort_keys=True, separators=(',', ':'))
b64encode = lambda d: base64.urlsafe_b64encode(d).rstrip('=')
to_sign = '%s.%s' % (
b64encode(as_json({'typ': 'JWT', 'alg': 'RS256'})),
b64encode(as_json(payload)))
signature = rsa.sign(to_sign, pkey, 'SHA-256')
return '%s.%s' % (to_sign, b64encode(signature))
# The chunk of code below is based on oauth2client.tools module, but adapted for
# usage of _fetch_service_config, our command line arguments, and so on.
def _run_oauth_dance(urlhost, config):
"""Perform full OAuth2 dance with the browser."""
def out(s):
print s
def err(s):
print >> sys.stderr, s
# Fetch client_id and client_secret from the service itself.
service_config = _fetch_service_config(urlhost)
if not service_config:
err('Couldn\'t fetch OAuth configuration')
return None
if not service_config.client_id or not service_config.client_secret:
err('OAuth is not configured on the service')
return None
flow = client.OAuth2WebServerFlow(
service_config.client_id,
service_config.client_secret,
OAUTH_SCOPES,
approval_prompt='force')
use_local_webserver = not config.no_local_webserver
port = config.webserver_port
if use_local_webserver:
success = False
try:
httpd = ClientRedirectServer(('localhost', port), ClientRedirectHandler)
except socket.error:
pass
else:
success = True
use_local_webserver = success
if not success:
out(
'Failed to start a local webserver listening on port %d.\n'
'Please check your firewall settings and locally running programs that '
'may be blocking or using those ports.\n\n'
'Falling back to --auth-no-local-webserver and continuing with '
'authentication.\n' % port)
if use_local_webserver:
oauth_callback = 'http://localhost:%s/' % port
else:
oauth_callback = client.OOB_CALLBACK_URN
flow.redirect_uri = oauth_callback
authorize_url = flow.step1_get_authorize_url()
if use_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
out(
'Your browser has been opened to visit:\n\n'
' %s\n\n'
'If your browser is on a different machine then exit and re-run this '
'application with the command-line parameter\n\n'
' --auth-no-local-webserver\n' % authorize_url)
else:
out(
'Go to the following link in your browser:\n\n'
' %s\n' % authorize_url)
try:
code = None
if use_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
err('Authentication request was rejected.')
return None
if 'code' not in httpd.query_params:
err(
'Failed to find "code" in the query parameters of the redirect.\n'
'Try running with --auth-no-local-webserver.')
return None
code = httpd.query_params['code']
else:
code = raw_input('Enter verification code: ').strip()
except KeyboardInterrupt:
err('Canceled.')
return None
try:
return flow.step2_exchange(code)
except client.FlowExchangeError as e:
err('Authentication has failed: %s' % e)
return None
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(self):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
query = self.path.split('?', 1)[-1]
query = dict(urlparse.parse_qsl(query))
self.server.query_params = query
self.wfile.write('<html><head><title>Authentication Status</title></head>')
self.wfile.write('<body><p>The authentication flow has completed.</p>')
self.wfile.write('</body></html>')
def log_message(self, _format, *args):
"""Do not log messages to stdout while running as command line program."""
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/tools/swarming_client/utils/oauth.py
|
Python
|
mit
| 21,103
|
[
"VisIt"
] |
f54b022b6cd7be2aba1e2d266438da956990d86409b82def2da938edf0408604
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Dtf(AutotoolsPackage):
"""DTF (Data Transfer Framework) is a general I/O arbitration
middleware designed for multi-component applications that use
file-base component coupling.
DTF works for applications that use the Parallel netCDF (PnetCDF)
library for file I/O. It allows the user to transparently replace
file I/O with sending the data directly between the components.
"""
homepage = "https://github.com/maneka07/DTF"
git = "https://github.com/maneka07/DTF.git"
version('master', branch='master')
variant('cxx', default=True, description='Build pnetcdf the C++ Interface')
variant('fortran', default=True, description='Build pnetcdf the Fortran Interface')
depends_on('mpi')
depends_on('m4', type='build')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('perl', type='build')
configure_directory = 'pnetcdf'
def setup_build_environment(self, env):
dtf_srcdir = join_path(self.stage.source_path, 'libdtf')
env.append_path('LD_LIBRARY_PATH', self.prefix.lib)
env.append_path('LD_LIBRARY_PATH', dtf_srcdir)
@run_before('autoreconf')
def build_dtf(self):
with working_dir('libdtf'):
make('all', 'MPICC={0}'.format(self.spec['mpi'].mpicc))
def configure_args(self):
dtf_srcdir = join_path(self.stage.source_path, 'libdtf')
args = [
'CFLAGS=-I{0}'.format(dtf_srcdir),
'LDFLAGS=-L{0} -ldtf'.format(dtf_srcdir)
]
args += self.enable_or_disable('cxx')
args += self.enable_or_disable('fortran')
return args
def install(self, spec, prefix):
with working_dir('pnetcdf'):
make('install')
with working_dir('libdtf'):
install('libdtf.*', prefix.lib)
install('dtf.h', prefix.include)
install_tree('doc', prefix.doc)
install_tree('example', prefix.example)
install('COPYRIGHT', prefix)
|
LLNL/spack
|
var/spack/repos/builtin/packages/dtf/package.py
|
Python
|
lgpl-2.1
| 2,275
|
[
"NetCDF"
] |
2b4a21bda88679ce5f646d7207a13296a3bbfaacf667c211f9fb26d1d7d2c7ff
|
#!/usr/bin/env python
# coding: utf-8
# Features
# ============================
#
# Features within PHOEBE are anything that can be "attached" to a component or a dataset to inform how to compute the forward-model. These currently include spots and gaussian processes - but the framework is flexible enough to handle future development to support pulsations, rings, disks, etc.
#
# Although features are entirely optional and may not be used for most systems, let's get familiar with the basics before moving on to computing the forward model.
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[1]:
#!pip install -I "phoebe>=2.3,<2.4"
# In[2]:
import phoebe
from phoebe import u # units
logger = phoebe.logger()
b = phoebe.default_binary()
# ## Available Features
#
# As you may expect by now, adding a feature will be done through a call to [b.add_feature](../api/phoebe.frontend.bundle.Bundle.add_feature.md) where the first argument is the "kind" of the feature - a list of available options which can be accessed via [phoebe.list_available_features](../api/phoebe.list_available_features.md).
# In[3]:
phoebe.list_available_features()
# The API docs for each of these can be found in [phoebe.parameters.feature](../api/phoebe.parameters.feature.md). Each entry will list the allowable component and/or dataset-types that that kind of feature can be attached to. For example:
# In[4]:
help(phoebe.parameters.feature.spot)
# ## Adding a Feature
# If we look at the API docs for a [spot](../api/phoebe.parameters.feature.spot.md), we can see that it can be attached to any star component, but not attached to a dataset. So when calling [b.add_feature](../api/phoebe.frontend.bundle.Bundle.add_feature.md), we need to send a valid tag for component that points to a star (i.e. 'primary' or 'secondary')
# In[5]:
b.add_feature('spot', component='primary', feature='spot01')
# In[6]:
b.get_feature('spot01')
# Next
# ----------
#
# That's it for the forward model! Next we'll get started discussing the inverse problem by introducing [distributions](./distributions.ipynb).
#
# Or see some of these advanced topics:
#
# * [Advanced: Spots](spots.ipynb)
# * [Example: Gaussian Processes](../examples/minimal_GPs.ipynb)
# In[ ]:
|
phoebe-project/phoebe2-docs
|
development/tutorials/features.py
|
Python
|
gpl-3.0
| 2,424
|
[
"Gaussian"
] |
46bb1ff7c67d4ff73a06a65fed09ef13fce72ab5fc259273129eed125fed6411
|
"""
ftpwalk -- Walk a hierarchy of files using FTP (Adapted from os.walk()).
"""
def ftpwalk(ftp, top, topdown=True, onerror=None):
"""
Generator that yields tuples of (root, dirs, nondirs).
"""
# Make the FTP object's current directory to the top dir.
ftp.cwd(top)
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
dirs, nondirs = _ftp_listdir(ftp)
except os.error, err:
if onerror is not None:
onerror(err)
return
if topdown:
yield top, dirs, nondirs
for entry in dirs:
dname = entry[0]
path = posixjoin(top, dname)
if entry[-1] is None: # not a link
for x in ftpwalk(ftp, path, topdown, onerror):
yield x
if not topdown:
yield top, dirs, nondirs
_calmonths = dict( (x, i+1) for i, x in
enumerate(('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')) )
def _ftp_listdir(ftp):
"""
List the contents of the FTP opbject's cwd and return two tuples of
(filename, size, mtime, mode, link)
one for subdirectories, and one for non-directories (normal files and other
stuff). If the path is a symbolic link, 'link' is set to the target of the
link (note that both files and directories can be symbolic links).
Note: we only parse Linux/UNIX style listings; this could easily be
extended.
"""
dirs, nondirs = [], []
listing = []
ftp.retrlines('LIST', listing.append)
for line in listing:
# Parse, assuming a UNIX listing
words = line.split(None, 8)
if len(words) < 6:
print >> sys.stderr, 'Warning: Error reading short line', line
continue
# Get the filename.
filename = words[-1].lstrip()
if filename in ('.', '..'):
continue
# Get the link target, if the file is a symlink.
extra = None
i = filename.find(" -> ")
if i >= 0:
# words[0] had better start with 'l'...
extra = filename[i+4:]
filename = filename[:i]
# Get the file size.
size = int(words[4])
# Get the date.
year = datetime.today().year
month = _calmonths[words[5]]
day = int(words[6])
mo = re.match('(\d+):(\d+)', words[7])
if mo:
hour, min = map(int, mo.groups())
else:
mo = re.match('(\d\d\d\d)', words[7])
if mo:
year = int(mo.group(1))
hour, min = 0, 0
else:
raise ValueError("Could not parse time/year in line: '%s'" % line)
dt = datetime(year, month, day, hour, min)
mtime = time.mktime(dt.timetuple())
# Get the type and mode.
mode = words[0]
entry = (filename, size, mtime, mode, extra)
if mode[0] == 'd':
dirs.append(entry)
else:
nondirs.append(entry)
return dirs, nondirs
|
ActiveState/code
|
recipes/Python/499334_Remove_FTP_directory_walk/recipe-499334.py
|
Python
|
mit
| 3,322
|
[
"VisIt"
] |
2f6425ee138894f6f492e728f6561620a78794859aeaac9a45772c9393d6a77d
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Brian Wray (brian@wrocket.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import subprocess
import json
import unittest
def call_tulip(args):
cmd = ['../../src/tulip']
cmd.extend(args)
out = subprocess.check_output(cmd)
return out.decode('utf-8')
class TestBasicMoveApplication(unittest.TestCase):
def setUp(self):
None
def zero_depth_eval(self, fen):
result = call_tulip(['-evalposition', fen])
parsed_output = json.loads(result)
return int(parsed_output['score'])
def classify_endgame(self, fen):
result = call_tulip(['-classifyendgame', fen])
parsed_output = json.loads(result)
return parsed_output['endgameType']
def size_king_rect(self, square):
result = call_tulip(['-kingrect', square])
parsed_output = json.loads(result)
return int(parsed_output['rectangleSize'])
def find_passed_pawns(self, fen):
result = call_tulip(['-passedpawns', fen])
parsed_output = json.loads(result)
result = dict()
result['w'] = parsed_output['whitePassedPawns']
result['b'] = parsed_output['blackPassedPawns']
return result
def assert_score_approx(self, expected, actual, tolerance=5):
self.assertTrue(abs(expected - actual) < abs(tolerance), "Expected score %i, got %i" % (expected, actual))
def assert_score_better_than_w(self, better, worse, by_at_least=1, by_no_more_than=100):
diff = better - worse
self.assertTrue(diff >= by_at_least, "Expected score to be better than %i by at least %i, got score %i (difference of %i)" % (worse, by_at_least, better, diff))
self.assertTrue(diff <= by_no_more_than, "Expected score to be better than %i by no more than %i, got score %i" % (worse, by_no_more_than, better))
def assert_score_better_than_b(self, better, worse, by_at_least=1, by_no_more_than=100):
diff = abs(better - worse)
self.assertTrue(diff >= by_at_least, "Expected score to be better than %i by at least %i, got score %i (difference of %i)" % (worse, by_at_least, better, diff))
self.assertTrue(diff <= by_no_more_than, "Expected score to be better than %i by no more than %i, got score %i" % (worse, by_no_more_than, better))
def test_zerodepth_initial_position(self):
result = self.zero_depth_eval('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1')
self.assert_score_approx(0, result)
def test_zerodepth_initial_position_no_bqueen(self):
result = self.zero_depth_eval('rnb1kbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1')
self.assert_score_approx(900, result)
def test_zerodepth_rr_white_same_rank_strong(self):
on_same_rank = self.zero_depth_eval('2R1k3/8/8/8/8/8/8/2R1K3 b - - 0 1')
not_on_same_rank = self.zero_depth_eval('1R2k3/8/8/8/8/8/8/2R1K3 b - - 0 1')
self.assert_score_better_than_w(on_same_rank, not_on_same_rank)
def test_zerodepth_rq_white_same_rank_strong(self):
on_same_rank = self.zero_depth_eval('2Q1k3/8/8/8/8/8/8/2R1K3 b - - 0 1')
not_on_same_rank = self.zero_depth_eval('1Q2k3/8/8/8/8/8/8/2R1K3 b - - 0 1')
self.assert_score_better_than_w(on_same_rank, not_on_same_rank)
def test_zerodepth_rr_black_same_rank_strong(self):
on_same_rank = self.zero_depth_eval('2r1k3/8/8/8/8/8/8/2r1K3 w - - 0 1')
not_on_same_rank = self.zero_depth_eval('2r1k3/8/8/8/8/8/8/1r2K3 w - - 0 1')
self.assert_score_better_than_b(on_same_rank, not_on_same_rank)
def test_zerodepth_rq_black_same_rank_strong(self):
on_same_rank = self.zero_depth_eval('2q1k3/8/8/8/8/8/8/2r1K3 w - - 0 1')
not_on_same_rank = self.zero_depth_eval('2q1k3/8/8/8/8/8/8/1r2K3 w - - 0 1')
self.assert_score_better_than_b(on_same_rank, not_on_same_rank)
def test_wpawn_better_central_pawn(self):
better = self.zero_depth_eval('rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq - 0 1')
worse = self.zero_depth_eval('rnbqkbnr/pppppppp/8/8/8/4P3/PPPP1PPP/RNBQKBNR b KQkq - 0 1')
self.assert_score_better_than_w(better, worse, by_no_more_than=50)
def test_bpawn_better_central_pawn(self):
better = self.zero_depth_eval('rnbqkbnr/pppp1ppp/8/4p3/8/4P3/PPPP1PPP/RNBQKBNR w KQkq - 0 1')
worse = self.zero_depth_eval('rnbqkbnr/pppp1ppp/4p3/8/8/4P3/PPPP1PPP/RNBQKBNR w KQkq - 0 1')
self.assert_score_better_than_b(better, worse, by_no_more_than=50)
def test_wknight_on_rim_is_dim(self):
better = self.zero_depth_eval('rnbqkbn1/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBN1 b KQkq - 0 1')
worse = self.zero_depth_eval('rnbqkbn1/pppppppp/8/8/8/8/PPPPPPPP/RNBQKB1N w KQkq - 0 1')
self.assert_score_better_than_w(better, worse, by_at_least = 15, by_no_more_than=50)
def test_wknight_better_developed(self):
better = self.zero_depth_eval('rnbqkbnr/pppppppp/8/8/8/2N5/PPPPPPPP/R1BQKBNR b KQkq - 1 1')
worse = self.zero_depth_eval('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1')
self.assert_score_better_than_w(better, worse, by_at_least = 10, by_no_more_than=70)
def test_bknight_on_rim_is_dim(self):
better = self.zero_depth_eval('1nbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBN1 b KQkq - 0 1')
worse = self.zero_depth_eval('n1bqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBN1 b KQkq - 0 1')
self.assert_score_better_than_b(better, worse, by_at_least = 15, by_no_more_than=50)
def test_bknight_better_developed(self):
better = self.zero_depth_eval('r1bqkbnr/pppppppp/2n5/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 1 1')
worse = self.zero_depth_eval('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR b KQkq - 0 1')
self.assert_score_better_than_b(better, worse, by_at_least = 10, by_no_more_than=70)
def test_opening_doubled_wpawns(self):
better = self.zero_depth_eval('rnbqkbnr/pppppppp/8/8/5P2/6P1/PPPPP2P/RNBQKBNR w KQkq - 0 1')
worse = self.zero_depth_eval('rnbqkbnr/pppppppp/8/8/5P2/5P2/PPPPP2P/RNBQKBNR w KQkq - 0 1')
self.assert_score_better_than_w(better, worse, by_at_least = 10, by_no_more_than=50)
def test_opening_doubled_bpawns(self):
better = self.zero_depth_eval('rnbqkbnr/p2ppppp/1p6/2p5/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1')
worse = self.zero_depth_eval('rnbqkbnr/p2ppppp/2p5/2p5/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1')
self.assert_score_better_than_b(better, worse, by_at_least = 10, by_no_more_than=50)
def test_knight_mobility_white(self):
better = self.zero_depth_eval('r4k1r/pppppppp/2P1P3/1P3P2/4N3/1P3P2/2P1P3/5K2 w - - 0 1')
worse = self.zero_depth_eval('r4k1r/pppppppp/2P1P3/1P3P2/3N4/1P3P2/2P1P3/5K2 w - - 0 1')
self.assert_score_better_than_b(better, worse, by_at_least = 50, by_no_more_than=100)
def test_classify_eg_brook(self):
result = self.classify_endgame('8/8/8/8/8/4k2r/8/3K4 b - - 0 1')
self.assertEqual('krvk_black', result)
def test_classify_eg_wrook(self):
result = self.classify_endgame('8/8/8/8/8/4k2r/8/3K4 b - - 0 1')
self.assertEqual('krvk_black', result)
def test_classify_eg_wrook(self):
result = self.classify_endgame('3k4/8/8/7R/8/8/3K4/8 b - - 0 1')
self.assertEqual('krvk_white', result)
def test_king_rect_size(self):
desired = {};
desired['a1'] = 1
desired['h8'] = 1
desired['a8'] = 1
desired['h1'] = 1
desired['a2'] = 2
desired['e4'] = 16
desired['f4'] = 12
for (square, size) in desired.items():
result = self.size_king_rect(square)
self.assertEqual(size, result)
def test_passed_pawns_01(self):
result = self.find_passed_pawns('4k3/8/6pp/8/3P1P2/1P6/8/4K3 b - - 0 1')
whitePawns = result['w']
blackPawns = result['b']
self.assertEqual(2, len(whitePawns))
self.assertTrue('b3' in whitePawns)
self.assertTrue('d4' in whitePawns)
self.assertEqual(1, len(blackPawns))
self.assertTrue('h6' in blackPawns)
if __name__ == '__main__':
unittest.main()
|
wrocket/Tulip-Chess
|
tests/basic-tests/test_evaluate.py
|
Python
|
mit
| 9,199
|
[
"Brian"
] |
6180e02825ed0937e5b98701b1e7b8ef902d56b42b705fa5b4886108f4af108d
|
#!/usr/bin/env python
#|
#| Downweight bases that are cytosines
#|
#|
#| Works to perform a C->T transition
#| and a G->A transition
#|
#| @author James Boocock
#| @date 12/09/2014
#|
from Bio import SeqIO
import argparse
def check_c_2_t(base):
"""
C to T checker
"""
if base == 'T':
return True
else:
return False
def check_g_2_a(base):
if base == 'A':
return True
else:
return False
def downweight_quality(quality,change_bases_c=None,change_bases_t=None):
"""
Returns PHRED qualities that remove downweight Cs or Ts at
the start of reads.
"""
if(change_bases_t and change_bases_c):
qual_filter=[ a or b for a, b in zip(change_bases_c,change_bases_t)]
elif(change_bases_t):
qual_filter=change_bases_t
else:
qual_filter=change_bases_c
quality=[chr(33 + 0) if filt else q for q, filt in zip(quality,qual_filter)]
return(quality)
def filter_fastq(input_file,output_file,downweight_number,ctot,gtoa):
"""
Takes a Fastq file as input and weights the quality of the bases down
at the start and the end of the reads.
"""
in_iterator = SeqIO.parse(input_file,'fastq')
input_records=list(in_iterator)
for i, record in enumerate(input_records):
change_bases_c = None
change_bases_t = None
temp_qual = record.letter_annotations['phred_quality']
if(ctot):
change_bases_c = [check_c_2_t(nuc) and i < downweight_number for i, nuc in enumerate(record.seq)]
if(gtoa):
change_bases_t = [check_g_2_a(nuc) and (len(record.seq)-i) <= downweight_number for i, nuc in enumerate(record.seq)]
new_qual =downweight_quality(temp_qual, change_bases_c ,change_bases_t)
input_records[i].letter_annotations['phred_quality']=new_qual
handle = file(output_file,'wt')
SeqIO.write(input_records, handle, "fastq")
import pysam
def filter_bam(input_file, output_file, downweight_number, ctot, gtoa):
"""
Takes a bam file as input and weights the quality of the reads down.
Need to ensure we write the header out :)
Investigate pysam and look for a header,
this should really help us understand how to get this bam filter working
and writing the bam files directly back out to the terminal.
"""
bam = pysam.Samfile(input_file,'rb')
bam_out = pysam.Samfile(output_file, 'wb',template=bam)
for line in bam:
change_bases_c = None
change_bases_t = None
seq = line.seq
qual = line.qual
if(ctot):
change_bases_c = [check_c_2_t(nuc) and i < downweight_number for i, nuc in enumerate(seq)]
if(gtoa):
change_bases_t = [check_g_2_a(nuc) and (len(seq)-i) <= downweight_number for i, nuc in enumerate(seq)]
new_qual = downweight_quality(qual,change_bases_c, change_bases_t)
line.qual = new_qual
bam_out.write(line)
def main():
parser = argparse.ArgumentParser(description='Downweight cytosine bases.')
parser.add_argument('-d','--downweight-number',dest='downweight',help='Downweight', default=int(2))
parser.add_argument('-c','--c2t',dest='ctot',action='store_true',help='Filter C to T transitions at the start of reads', default=False)
parser.add_argument('-g','--g2a',dest='gtoa',action='store_true',help='Filter G to A transitions at the end of reads', default=False)
parser.add_argument('-i','--input-file',dest='input_file',help='Input File - input_fastq' )
parser.add_argument('-o','--output-file',dest='output_file', help='Output File - out_fastq')
parser.add_argument('-f','--format',dest='format', help="File format fastq or bam",default="fastq")
args = parser.parse_args()
args.downweight = int(args.downweight)
assert (args.ctot or args.gtoa), "One of --c2t or g2a needs to be set"
if (args.format == 'fastq'):
filter_fastq(args.input_file, args.output_file, args.downweight, args.ctot, args.gtoa)
elif (args.format == 'bam'):
filter_bam(args.input_file, args.output_file, args.downweight, args.ctot, args.gtoa)
if __name__ == "__main__":
main()
|
smilefreak/NGaDNAP
|
scripts/ancient_filter.py
|
Python
|
mit
| 4,221
|
[
"pysam"
] |
a3eb7bbf3b9fda5432ed9f2a1e5a23b5ad93dd2414aed65ac15e571b26207b41
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Mar 15, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 15, 2012"
import unittest
import numpy as np
from pymatgen import Lattice, Structure
from pymatgen.transformations.site_transformations import \
InsertSitesTransformation, TranslateSitesTransformation, \
ReplaceSiteSpeciesTransformation, RemoveSitesTransformation, \
PartialRemoveSitesTransformation
from monty.os.path import which
enumlib_present = which('multienum.x') and which('makestr.x')
class TranslateSitesTransformationTest(unittest.TestCase):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([.5, .5, .5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
self.struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "O2-",
"O2-", "O2-", "O2-"], coords)
def test_apply_transformation(self):
t = TranslateSitesTransformation([0], [0.1, 0.2, 0.3])
s = t.apply_transformation(self.struct)
self.assertTrue(np.allclose(s[0].frac_coords, [0.1, 0.2, 0.3]))
inv_t = t.inverse
s = inv_t.apply_transformation(s)
self.assertTrue(np.allclose(s[0].frac_coords, [0, 0, 0]))
str(t)
def test_to_from_dict(self):
d = TranslateSitesTransformation([0], [0.1, 0.2, 0.3]).as_dict()
t = TranslateSitesTransformation.from_dict(d)
s = t.apply_transformation(self.struct)
self.assertTrue(np.allclose(s[0].frac_coords, [0.1, 0.2, 0.3]))
str(t)
class ReplaceSiteSpeciesTransformationTest(unittest.TestCase):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([.5, .5, .5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
self.struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "O2-",
"O2-", "O2-", "O2-"], coords)
def test_apply_transformation(self):
t = ReplaceSiteSpeciesTransformation({0: "Na"})
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Na1 Li3 O4")
str(t)
def test_to_from_dict(self):
d = ReplaceSiteSpeciesTransformation({0: "Na"}).as_dict()
t = ReplaceSiteSpeciesTransformation.from_dict(d)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Na1 Li3 O4")
class RemoveSitesTransformationTest(unittest.TestCase):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([.5, .5, .5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
self.struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "O2-",
"O2-", "O2-", "O2-"], coords)
def test_apply_transformation(self):
t = RemoveSitesTransformation(range(2))
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O4")
str(t)
def test_to_from_dict(self):
d = RemoveSitesTransformation(range(2)).as_dict()
t = RemoveSitesTransformation.from_dict(d)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O4")
class InsertSitesTransformationTest(unittest.TestCase):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([.5, .5, .5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
self.struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "O2-",
"O2-", "O2-", "O2-"], coords)
def test_apply_transformation(self):
t = InsertSitesTransformation(["Fe", "Mn"], [[0.1, 0, 0],
[0.1, 0.2, 0.2]])
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li4 Mn1 Fe1 O4")
t = InsertSitesTransformation(["Fe", "Mn"], [[0.001, 0, 0],
[0.1, 0.2, 0.2]])
#Test validate proximity
self.assertRaises(ValueError, t.apply_transformation, self.struct)
def test_to_from_dict(self):
d = InsertSitesTransformation(["Fe", "Mn"],
[[0.1, 0, 0], [0.1, 0.2, 0.2]]).as_dict()
t = InsertSitesTransformation.from_dict(d)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li4 Mn1 Fe1 O4")
class PartialRemoveSitesTransformationTest(unittest.TestCase):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([.5, .5, .5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
self.struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "O2-",
"O2-", "O2-", "O2-"], coords)
def test_apply_transformation_complete(self):
t = PartialRemoveSitesTransformation(
[tuple(range(4)), tuple(range(4, 8))],
[0.5, 0.5],
PartialRemoveSitesTransformation.ALGO_COMPLETE
)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O2")
s = t.apply_transformation(self.struct, 12)
self.assertEqual(len(s), 12)
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
def test_apply_transformation_enumerate(self):
t = PartialRemoveSitesTransformation(
[tuple(range(4)), tuple(range(4, 8))],
[0.5, 0.5],
PartialRemoveSitesTransformation.ALGO_ENUMERATE
)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O2")
s = t.apply_transformation(self.struct, 12)
self.assertEqual(len(s), 12)
def test_apply_transformation_best_first(self):
t = PartialRemoveSitesTransformation(
[tuple(range(4)), tuple(range(4, 8))],
[0.5, 0.5],
PartialRemoveSitesTransformation.ALGO_BEST_FIRST
)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O2")
def test_apply_transformation_fast(self):
t = PartialRemoveSitesTransformation(
[tuple(range(4)), tuple(range(4, 8))],
[0.5, 0.5],
PartialRemoveSitesTransformation.ALGO_FAST
)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O2")
t = PartialRemoveSitesTransformation(
[tuple(range(8))], [0.5],
PartialRemoveSitesTransformation.ALGO_FAST
)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O2")
def test_to_from_dict(self):
d = PartialRemoveSitesTransformation([tuple(range(4))], [0.5]).as_dict()
t = PartialRemoveSitesTransformation.from_dict(d)
s = t.apply_transformation(self.struct)
self.assertEqual(s.formula, "Li2 O4")
def test_str(self):
d = PartialRemoveSitesTransformation([tuple(range(4))], [0.5]).as_dict()
self.assertIsNotNone(str(d))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
migueldiascosta/pymatgen
|
pymatgen/transformations/tests/test_site_transformations.py
|
Python
|
mit
| 9,414
|
[
"pymatgen"
] |
e5572bf227250eaeac6abc5878e9a2065a9cc7449f3d78c796c98d095b6de0e2
|
#brain on drops
import os
import numpy as np
import nibabel as nib
from collections import OrderedDict
from mayavi import mlab
from boyle.nifti.roi import get_rois_centers_of_mass
def test_quiver3d():
x, y, z = np.mgrid[-2:3, -2:3, -2:3]
r = np.sqrt(x ** 2 + y ** 2 + z ** 4)
u = y * np.sin(r) / (r + 0.001)
v = -x * np.sin(r) / (r + 0.001)
w = np.zeros_like(z)
obj = mlab.quiver3d(x, y, z, u, v, w, line_width=3, scale_factor=1)
return obj
def get_random_color():
return tuple(np.random.rand(3))
@mlab.show
def show_brain_connectivity_on_atlas(vol, atlas_vol, colors={}, colormap=None,
weights=None, sizes={}, connections=None,
contour_intensity_threshold=0.4):
rois_centers = get_rois_centers_of_mass(atlas_vol)
show_brain_connectivity(vol, rois_centers, colors, colormap, weights, sizes,
connections, contour_intensity_threshold)
@mlab.show
def show_brain_connectivity(vol, rois_centers, colors={}, colormap=None,
weights=None, sizes={}, connections=None,
contour_intensity_threshold=0.4):
"""
:param vol: ndarray
3D brain image
:param rois_centers: ndarray
of shape Nx3 where N is the number of ROIs, and
for each ROI there is a 3D position vector.
:param colors: list of tuples
one tuple for each ROI indicating their colors.
(0. ,0., 0.) for black and (1., 1., 1.) for white.
If None, will see if colormap has been given,
or will plot yellow spheres.
:params colormap: matplotlib colormap LUT
If color param is None, this can be used to select ROI colors.
For a sensible result, weights vector must be given.
:params weights: vector
Vector of floats with size N.
:params sizes: vector of float
Vector with size N
Indicating the size of each drop
:param connections: ndarray
This array can have two different shapes:
- Connectivity matrix: shape NxN, where each component
is a connection weight.
- Connection pairs: 2xN, where each pair is the
indication what two ROIs is connected.
:param contour_intensity_threshold: float
This will indicate the contour process of the
vol surface the voxel intensity threshold
to use to select where to perform the contour.
"""
n_rois = len(rois_centers)
if colors is not None:
assert(n_rois == len(colors))
if connections is not None:
if connections.shape[0] != 2:
assert(n_rois == connections.shape[0] == connections.shape[1])
#plot brain contour
src = mlab.pipeline.scalar_field(vol)
mlab.pipeline.iso_surface(src, contours=[vol.min()+contour_intensity_threshold*vol.ptp(), ],
opacity=0.1)
#mlab.pipeline.volume(src)
#plot drops
all_rois = np.array(rois_centers.keys())
#mlab.pipeline.iso_surface(src, contours=[vol.max()-0.1*vol.ptp(), ],)
ridx = 0
for rval in rois_centers:
c = rois_centers[rval]
params = {}
params['color'] = colors.get(rval, (0.5, 0.5, 0))
#[rval] if colors is not None else (0.5, 0.5, 0)
params['scale_factor'] = sizes.get(rval, 1)
points = mlab.points3d(c[0], c[1], c[2],
resolution=20,
scale_mode='none',
**params)
ridx += 1
if connections is not None:
#if connectivity matrix, will transform it in array of pairs of indices
if isinstance(connections, np.ndarray):
if connections.shape == (n_rois, n_rois):
conns = np.array(np.where(connections > 0))
weights = connections[connections > 0]
#else, an array of pairs of ROI values has been given
else:
n_links = connections.shape[1]
#transform it in pairs of indices
conns = np.zeros((2, n_links))
for rpidx in list(range(n_links)):
conns[0, rpidx] = np.where(all_rois == connections[0, rpidx])[0][0]
conns[1, rpidx] = np.where(all_rois == connections[1, rpidx])[0][0]
for pidx in list(range(conns.shape[1])):
pair = conns[:, pidx]
if pair[0] != pair[1]:
#which rois?
r1val = all_rois[pair[0]]
r2val = all_rois[pair[1]]
#tube color
#this average color works if they are of different shade,
#otherwise, you should use Lab color space.
r1color = colors.get(r1val, (1, 1, 1))
r2color = colors.get(r2val, (1, 1, 1))
tube_color = tuple(np.mean([r1color, r2color], axis=0))
#extreme points coordinates
roi1coord = rois_centers[r1val]
roi2coord = rois_centers[r2val]
coords = np.concatenate([roi1coord, roi2coord])
x = coords[0:4:3]
y = coords[1:5:3]
z = coords[2:6:3]
w = weights[pidx]
#mlab.flow
mlab.plot3d(x, y, z, tube_radius=0.5, tube_sides=6,
color=tube_color)
mlab.pipeline.image_plane_widget(src,
plane_orientation='z_axes',
slice_index=10,
colormap='gray')
return 0
if __name__ == '__main__':
wd = '/home/alexandre/Dropbox/Documents/work/cobre/'
atlas = os.path.join(wd, 'aal_2mm.nii.gz')
anat = os.path.join(wd, 'MNI152_T1_2mm_brain.nii.gz')
atlas_vol = nib.load(atlas).get_data()
anat_vol = nib.load(anat).get_data()
roisvals = np.unique(atlas_vol)
roisvals = roisvals[roisvals != 0]
n_rois = len(roisvals)
rois_linspace = np.linspace(0.2, 1, n_rois)
#rois_centers = get_rois_centers_of_mass(atlas_vol)
idx = 0
sizes = OrderedDict()
colors = OrderedDict()
for r in roisvals:
colors[r] = (0, rois_linspace[idx], 0)
sizes[r] = 10*rois_linspace[idx]
idx += 1
#connections = np.random.randint(0, 2, (n_rois, n_rois))
connections = np.random.choice([0, 1], size=(n_rois, n_rois),
p=[99.7/100, 0.3/100])
show_brain_connectivity_on_atlas(anat_vol, atlas_vol, colors,
connections=connections)
|
Neurita/cajal
|
cajal/connectivity.py
|
Python
|
bsd-3-clause
| 6,547
|
[
"Mayavi"
] |
597f8ab6d751bc0b00748b8ac85fb69b8555d7dafec673ebe594033423e6584d
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkHyperOctreeSampleFunction(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkHyperOctreeSampleFunction(), 'Processing.',
(), ('vtkHyperOctree',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkHyperOctreeSampleFunction.py
|
Python
|
bsd-3-clause
| 500
|
[
"VTK"
] |
abfd95093a9d9ecbd30558fc73eec1193ff9ed374e05d9c63b682b63eb55a0d9
|
#!/usr/bin/env python
import math
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Image pipeline
reader = vtk.vtkPNGReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/fullhead15.png")
# Take the gradient in X, and smooth in Y
# Create a simple gradient filter
kernel = vtk.vtkFloatArray()
kernel.SetNumberOfTuples(3)
kernel.InsertValue(0, -1)
kernel.InsertValue(1, 0)
kernel.InsertValue(2, 1)
# Create a gaussian for Y
sigma = 1.5
sigma2 = sigma * sigma
gaussian = vtk.vtkFloatArray()
gaussian.SetNumberOfTuples(31)
i = 0
while i < 31:
x = i - 15
g = math.exp(-(x * x) / (2.0 * sigma2)) / (math.sqrt (2.0 * 3.1415) * sigma)
gaussian.InsertValue(i, g)
i += 1
convolve = vtk.vtkImageSeparableConvolution()
convolve.SetInputConnection(reader.GetOutputPort())
convolve.SetDimensionality(2)
convolve.SetXKernel(kernel)
convolve.SetYKernel(gaussian)
viewer = vtk.vtkImageViewer()
# viewer DebugOn
viewer.SetInputConnection(convolve.GetOutputPort())
viewer.SetColorWindow(500)
viewer.SetColorLevel(100)
viewer.Render()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Imaging/Core/Testing/Python/TestSeparableFilter.py
|
Python
|
bsd-3-clause
| 1,144
|
[
"Gaussian",
"VTK"
] |
a4c26840585ba05f5f7a3cd5abb30d7664fea9e09b2eadaf5b13c38590b7df43
|
"""Compatibility fixes for older versions of libraries
If you add content to this file, please give the version of the package
at which the fix is no longer needed.
# originally copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD
from distutils.version import LooseVersion
import functools
import inspect
from math import log
import os
from pathlib import Path
import warnings
import numpy as np
###############################################################################
# Misc
def _median_complex(data, axis):
"""Compute marginal median on complex data safely.
XXX: Can be removed when numpy introduces a fix.
See: https://github.com/scipy/scipy/pull/12676/.
"""
# np.median must be passed real arrays for the desired result
if np.iscomplexobj(data):
data = (np.median(np.real(data), axis=axis)
+ 1j * np.median(np.imag(data), axis=axis))
else:
data = np.median(data, axis=axis)
return data
# helpers to get function arguments
def _get_args(function, varargs=False):
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _safe_svd(A, **kwargs):
"""Wrapper to get around the SVD did not converge error of death"""
# Intel has a bug with their GESVD driver:
# https://software.intel.com/en-us/forums/intel-distribution-for-python/topic/628049 # noqa: E501
# For SciPy 0.18 and up, we can work around it by using
# lapack_driver='gesvd' instead.
from scipy import linalg
if kwargs.get('overwrite_a', False):
raise ValueError('Cannot set overwrite_a=True with this function')
try:
return linalg.svd(A, **kwargs)
except np.linalg.LinAlgError as exp:
from .utils import warn
if 'lapack_driver' in _get_args(linalg.svd):
warn('SVD error (%s), attempting to use GESVD instead of GESDD'
% (exp,))
return linalg.svd(A, lapack_driver='gesvd', **kwargs)
else:
raise
def _csc_matrix_cast(x):
from scipy.sparse import csc_matrix
return csc_matrix(x)
###############################################################################
# Backporting nibabel's read_geometry
def _get_read_geometry():
"""Get the geometry reading function."""
try:
import nibabel as nib
has_nibabel = True
except ImportError:
has_nibabel = False
if has_nibabel:
from nibabel.freesurfer import read_geometry
else:
read_geometry = _read_geometry
return read_geometry
def _read_geometry(filepath, read_metadata=False, read_stamp=False):
"""Backport from nibabel."""
from .surface import _fread3, _fread3_many
volume_info = dict()
TRIANGLE_MAGIC = 16777214
QUAD_MAGIC = 16777215
NEW_QUAD_MAGIC = 16777213
with open(filepath, "rb") as fobj:
magic = _fread3(fobj)
if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file
nvert = _fread3(fobj)
nquad = _fread3(fobj)
(fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.)
coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float64) / div
coords = coords.reshape(-1, 3)
quads = _fread3_many(fobj, nquad * 4)
quads = quads.reshape(nquad, 4)
#
# Face splitting follows
#
faces = np.zeros((2 * nquad, 3), dtype=np.int64)
nface = 0
for quad in quads:
if (quad[0] % 2) == 0:
faces[nface] = quad[0], quad[1], quad[3]
nface += 1
faces[nface] = quad[2], quad[3], quad[1]
nface += 1
else:
faces[nface] = quad[0], quad[1], quad[2]
nface += 1
faces[nface] = quad[0], quad[2], quad[3]
nface += 1
elif magic == TRIANGLE_MAGIC: # Triangle file
create_stamp = fobj.readline().rstrip(b'\n').decode('utf-8')
fobj.readline()
vnum = np.fromfile(fobj, ">i4", 1)[0]
fnum = np.fromfile(fobj, ">i4", 1)[0]
coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
if read_metadata:
volume_info = _read_volume_info(fobj)
else:
raise ValueError("File does not appear to be a Freesurfer surface")
coords = coords.astype(np.float64) # XXX: due to mayavi bug on mac 32bits
ret = (coords, faces)
if read_metadata:
if len(volume_info) == 0:
warnings.warn('No volume information contained in the file')
ret += (volume_info,)
if read_stamp:
ret += (create_stamp,)
return ret
###############################################################################
# Triaging FFT functions to get fast pocketfft (SciPy 1.4)
@functools.lru_cache(None)
def _import_fft(name):
single = False
if not isinstance(name, tuple):
name = (name,)
single = True
try:
from scipy.fft import rfft # noqa analysis:ignore
except ImportError:
from numpy import fft # noqa
else:
from scipy import fft # noqa
out = [getattr(fft, n) for n in name]
if single:
out = out[0]
return out
###############################################################################
# NumPy Generator (NumPy 1.17)
def rng_uniform(rng):
"""Get the unform/randint from the rng."""
# prefer Generator.integers, fall back to RandomState.randint
return getattr(rng, 'integers', getattr(rng, 'randint', None))
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
###############################################################################
# Misc utilities
# get_fdata() requires knowing the dtype ahead of time, so let's triage on our
# own instead
def _get_img_fdata(img):
data = np.asanyarray(img.dataobj)
dtype = np.complex128 if np.iscomplexobj(data) else np.float64
return data.astype(dtype)
def _read_volume_info(fobj):
"""An implementation of nibabel.freesurfer.io._read_volume_info, since old
versions of nibabel (<=2.1.0) don't have it.
"""
volume_info = dict()
head = np.fromfile(fobj, '>i4', 1)
if not np.array_equal(head, [20]): # Read two bytes more
head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)])
if not np.array_equal(head, [2, 0, 20]):
warnings.warn("Unknown extension code.")
return volume_info
volume_info['head'] = head
for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
'zras', 'cras']:
pair = fobj.readline().decode('utf-8').split('=')
if pair[0].strip() != key or len(pair) != 2:
raise IOError('Error parsing volume info.')
if key in ('valid', 'filename'):
volume_info[key] = pair[1].strip()
elif key == 'volume':
volume_info[key] = np.array(pair[1].split()).astype(int)
else:
volume_info[key] = np.array(pair[1].split()).astype(float)
# Ignore the rest
return volume_info
def _serialize_volume_info(volume_info):
"""An implementation of nibabel.freesurfer.io._serialize_volume_info, since
old versions of nibabel (<=2.1.0) don't have it."""
keys = ['head', 'valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
'zras', 'cras']
diff = set(volume_info.keys()).difference(keys)
if len(diff) > 0:
raise ValueError('Invalid volume info: %s.' % diff.pop())
strings = list()
for key in keys:
if key == 'head':
if not (np.array_equal(volume_info[key], [20]) or np.array_equal(
volume_info[key], [2, 0, 20])):
warnings.warn("Unknown extension code.")
strings.append(np.array(volume_info[key], dtype='>i4').tobytes())
elif key in ('valid', 'filename'):
val = volume_info[key]
strings.append('{} = {}\n'.format(key, val).encode('utf-8'))
elif key == 'volume':
val = volume_info[key]
strings.append('{} = {} {} {}\n'.format(
key, val[0], val[1], val[2]).encode('utf-8'))
else:
val = volume_info[key]
strings.append('{} = {:0.10g} {:0.10g} {:0.10g}\n'.format(
key.ljust(6), val[0], val[1], val[2]).encode('utf-8'))
return b''.join(strings)
##############################################################################
# adapted from scikit-learn
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
_DEFAULT_TAGS = {
'non_deterministic': False,
'requires_positive_X': False,
'requires_positive_y': False,
'X_types': ['2darray'],
'poor_score': False,
'no_validation': False,
'multioutput': False,
"allow_nan": False,
'stateless': False,
'multilabel': False,
'_skip_test': False,
'_xfail_checks': False,
'multioutput_only': False,
'binary_only': False,
'requires_fit': True,
'preserves_dtype': [np.float64],
'requires_y': False,
'pairwise': False,
}
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn.
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : bool, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Parameters
----------
**params : dict
Parameters.
Returns
-------
inst : instance
The object.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in params.items():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
from sklearn.base import _pprint
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
# __getstate__ and __setstate__ are omitted because they only contain
# conditionals that are not satisfied by our objects (e.g.,
# ``if type(self).__module__.startswith('sklearn.')``.
def _more_tags(self):
return _DEFAULT_TAGS
def _get_tags(self):
collected_tags = {}
for base_class in reversed(inspect.getmro(self.__class__)):
if hasattr(base_class, '_more_tags'):
# need the if because mixins might not have _more_tags
# but might do redundant work in estimators
# (i.e. calling more tags on BaseEstimator multiple times)
more_tags = base_class._more_tags(self)
collected_tags.update(more_tags)
return collected_tags
# newer sklearn deprecates importing from sklearn.metrics.scoring,
# but older sklearn does not expose check_scoring in sklearn.metrics.
def _get_check_scoring():
try:
from sklearn.metrics import check_scoring # noqa
except ImportError:
from sklearn.metrics.scorer import check_scoring # noqa
return check_scoring
def _check_fit_params(X, fit_params, indices=None):
"""Check and validate the parameters passed during `fit`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data array.
fit_params : dict
Dictionary containing the parameters passed at fit.
indices : array-like of shape (n_samples,), default=None
Indices to be selected if the parameter has the same size as
`X`.
Returns
-------
fit_params_validated : dict
Validated parameters. We ensure that the values support
indexing.
"""
try:
from sklearn.utils.validation import \
_check_fit_params as _sklearn_check_fit_params
return _sklearn_check_fit_params(X, fit_params, indices)
except ImportError:
from sklearn.model_selection import _validation
fit_params_validated = \
{k: _validation._index_param_value(X, v, indices)
for k, v in fit_params.items()}
return fit_params_validated
###############################################################################
# Copied from sklearn to simplify code paths
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
precision_ : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
from scipy import linalg
# covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like,
The precision matrix associated to the current covariance object.
"""
from scipy import linalg
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fit the Maximum Likelihood Estimator covariance model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : ndarray | None
Not used, present for API consistency.
Returns
-------
self : object
Returns self.
""" # noqa: E501
# X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Compute the log-likelihood of a Gaussian dataset.
Uses ``self.covariance_`` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : ndarray | None
Not used, present for API consistency.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
from scipy import linalg
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, observations):
"""Computes the squared Mahalanobis distances of given observations.
Parameters
----------
observations : array-like, shape = [n_observations, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
mahalanobis_distance : array, shape = [n_observations,]
Squared Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
centered_obs = observations - self.location_
mahalanobis_dist = np.sum(
np.dot(centered_obs, precision) * centered_obs, 1)
return mahalanobis_dist
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
sample mean of the log-likelihood
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + _logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
# sklearn uses np.linalg for this, but ours is more robust to zero eigenvalues
def _logdet(A):
"""Compute the log det of a positive semidefinite matrix."""
from scipy import linalg
vals = linalg.eigvalsh(A)
# avoid negative (numerical errors) or zero (semi-definite matrix) values
tol = vals.max() * vals.size * np.finfo(np.float64).eps
vals = np.where(vals > tol, vals, tol)
return np.sum(np.log(vals))
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
def _assess_dimension_(spectrum, rank, n_samples, n_features):
from scipy.special import gammaln
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.) -
log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def svd_flip(u, v, u_based_decision=True):
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, np.arange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[np.arange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
axis : int, optional
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol,
atol=atol, equal_nan=True)):
warnings.warn('cumsum was found to be unstable: '
'its last element does not correspond to sum',
RuntimeWarning)
return out
# This shim can be removed once NumPy 1.19.0+ is required (1.18.4 has sign bug)
def svd(a, hermitian=False):
if hermitian: # faster
s, u = np.linalg.eigh(a)
sgn = np.sign(s)
s = np.abs(s)
sidx = np.argsort(s)[..., ::-1]
sgn = take_along_axis(sgn, sidx, axis=-1)
s = take_along_axis(s, sidx, axis=-1)
u = take_along_axis(u, sidx[..., None, :], axis=-1)
# singular values are unsigned, move the sign into v
vt = (u * sgn[..., np.newaxis, :]).swapaxes(-2, -1).conj()
np.abs(s, out=s)
return u, s, vt
else:
return np.linalg.svd(a)
###############################################################################
# NumPy einsum backward compat (allow "optimize" arg and fix 1.14.0 bug)
# XXX eventually we should hand-tune our `einsum` calls given our array sizes!
def einsum(*args, **kwargs):
if 'optimize' not in kwargs:
kwargs['optimize'] = False
return np.einsum(*args, **kwargs)
try:
from numpy import take_along_axis
except ImportError: # NumPy < 1.15
def take_along_axis(arr, indices, axis):
# normalize inputs
if axis is None:
arr = arr.flat
arr_shape = (len(arr),) # flatiter has no .shape
axis = 0
else:
# there is a NumPy function for this, but rather than copy our
# internal uses should be correct, so just normalize quickly
if axis < 0:
axis += arr.ndim
assert 0 <= axis < arr.ndim
arr_shape = arr.shape
# use the fancy index
return arr[_make_along_axis_idx(arr_shape, indices, axis)]
def _make_along_axis_idx(arr_shape, indices, axis):
# compute dimensions to iterate over
if not np.issubdtype(indices.dtype, np.integer):
raise IndexError('`indices` must be an integer array')
if len(arr_shape) != indices.ndim:
raise ValueError(
"`indices` and `arr` must have the same number of dimensions")
shape_ones = (1,) * indices.ndim
dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))
# build a fancy index, consisting of orthogonal aranges, with the
# requested index inserted at the right location
fancy_index = []
for dim, n in zip(dest_dims, arr_shape):
if dim is None:
fancy_index.append(indices)
else:
ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
fancy_index.append(np.arange(n).reshape(ind_shape))
return tuple(fancy_index)
###############################################################################
# From nilearn
def _crop_colorbar(cbar, cbar_vmin, cbar_vmax):
"""
crop a colorbar to show from cbar_vmin to cbar_vmax
Used when symmetric_cbar=False is used.
"""
import matplotlib
if (cbar_vmin is None) and (cbar_vmax is None):
return
cbar_tick_locs = cbar.locator.locs
if cbar_vmax is None:
cbar_vmax = cbar_tick_locs.max()
if cbar_vmin is None:
cbar_vmin = cbar_tick_locs.min()
new_tick_locs = np.linspace(cbar_vmin, cbar_vmax,
len(cbar_tick_locs))
# matplotlib >= 3.2.0 no longer normalizes axes between 0 and 1
# See https://matplotlib.org/3.2.1/api/prev_api_changes/api_changes_3.2.0.html
# _outline was removed in
# https://github.com/matplotlib/matplotlib/commit/03a542e875eba091a027046d5ec652daa8be6863
# so we use the code from there
if LooseVersion(matplotlib.__version__) >= LooseVersion("3.2.0"):
cbar.ax.set_ylim(cbar_vmin, cbar_vmax)
X, _ = cbar._mesh()
X = np.array([X[0], X[-1]])
Y = np.array([[cbar_vmin, cbar_vmin], [cbar_vmax, cbar_vmax]])
N = X.shape[0]
ii = [0, 1, N - 2, N - 1, 2 * N - 1, 2 * N - 2, N + 1, N, 0]
x = X.T.reshape(-1)[ii]
y = Y.T.reshape(-1)[ii]
xy = (np.column_stack([y, x])
if cbar.orientation == 'horizontal' else
np.column_stack([x, y]))
cbar.outline.set_xy(xy)
else:
cbar.ax.set_ylim(cbar.norm(cbar_vmin), cbar.norm(cbar_vmax))
outline = cbar.outline.get_xy()
outline[:2, 1] += cbar.norm(cbar_vmin)
outline[2:6, 1] -= (1. - cbar.norm(cbar_vmax))
outline[6:, 1] += cbar.norm(cbar_vmin)
cbar.outline.set_xy(outline)
cbar.set_ticks(new_tick_locs, update_ticks=True)
###############################################################################
# Matplotlib
def _get_status(checks):
"""Deal with old MPL to get check box statuses."""
try:
return list(checks.get_status())
except AttributeError:
return [x[0].get_visible() for x in checks.lines]
###############################################################################
# Numba (optional requirement)
# Here we choose different defaults to speed things up by default
try:
import numba
if LooseVersion(numba.__version__) < LooseVersion('0.40'):
raise ImportError
prange = numba.prange
def jit(nopython=True, nogil=True, fastmath=True, cache=True,
**kwargs): # noqa
return numba.jit(nopython=nopython, nogil=nogil, fastmath=fastmath,
cache=cache, **kwargs)
except ImportError:
has_numba = False
else:
has_numba = (os.getenv('MNE_USE_NUMBA', 'true').lower() == 'true')
if not has_numba:
def jit(**kwargs): # noqa
def _jit(func):
return func
return _jit
prange = range
bincount = np.bincount
mean = np.mean
else:
@jit()
def bincount(x, weights, minlength): # noqa: D103
out = np.zeros(minlength)
for idx, w in zip(x, weights):
out[idx] += w
return out
# fix because Numba does not support axis kwarg for mean
@jit()
def _np_apply_along_axis(func1d, axis, arr):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result
@jit()
def mean(array, axis):
return _np_apply_along_axis(np.mean, axis, array)
###############################################################################
# Added in Python 3.7 (remove when we drop support for 3.6)
try:
from contextlib import nullcontext
except ImportError:
from contextlib import contextmanager
@contextmanager
def nullcontext(enter_result=None):
yield enter_result
|
kambysese/mne-python
|
mne/fixes.py
|
Python
|
bsd-3-clause
| 37,120
|
[
"Gaussian",
"Mayavi"
] |
5749eedc3aaa4d5079596eed1aa245a58137f8c2bab75bd4a7d6083015aac323
|
""" JobStateUpdateHandler is the implementation of the Job State updating
service in the DISET framework
The following methods are available in the Service interface
setJobStatus()
"""
from __future__ import absolute_import
import six
from six.moves import range
__RCSID__ = "$Id$"
import time
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities import Time
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.ElasticJobDB import ElasticJobDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
# This is a global instance of the JobDB class
jobDB = False
logDB = False
elasticJobDB = False
JOB_FINAL_STATES = ['Done', 'Completed', 'Failed']
def initializeJobStateUpdateHandler(serviceInfo):
global jobDB
global logDB
jobDB = JobDB()
logDB = JobLoggingDB()
return S_OK()
class JobStateUpdateHandler(RequestHandler):
def initialize(self):
"""
Flags gESFlag and gMySQLFlag have bool values (True/False)
derived from dirac.cfg configuration file
Determines the switching of ElasticSearch and MySQL backends
"""
global elasticJobDB, jobDB
gESFlag = self.srv_getCSOption('useES', False)
if gESFlag:
elasticJobDB = ElasticJobDB()
gMySQLFlag = self.srv_getCSOption('useMySQL', True)
if not gMySQLFlag:
jobDB = False
return S_OK()
###########################################################################
types_updateJobFromStager = [[six.string_types, int], six.string_types]
def export_updateJobFromStager(self, jobID, status):
""" Simple call back method to be used by the stager. """
if status == 'Done':
jobStatus = 'Checking'
minorStatus = 'JobScheduling'
elif status == 'Failed':
jobStatus = 'Failed'
minorStatus = 'Staging input files failed'
else:
return S_ERROR("updateJobFromStager: %s status not known." % status)
infoStr = None
trials = 10
for i in range(trials):
result = jobDB.getJobAttributes(jobID, ['Status'])
if not result['OK']:
return result
if not result['Value']:
# if there is no matching Job it returns an empty dictionary
return S_OK('No Matching Job')
status = result['Value']['Status']
if status == 'Staging':
if i:
infoStr = "Found job in Staging after %d seconds" % i
break
time.sleep(1)
if status != 'Staging':
return S_OK('Job is not in Staging after %d seconds' % trials)
result = self.__setJobStatus(int(jobID), jobStatus, minorStatus, 'StagerSystem', None)
if not result['OK']:
if result['Message'].find('does not exist') != -1:
return S_OK()
if infoStr:
return S_OK(infoStr)
return result
###########################################################################
types_setJobStatus = [[six.string_types, int]]
def export_setJobStatus(self, jobID, status='', minorStatus='', source='Unknown', datetime=None):
""" Set the major and minor status for job specified by its JobId.
Set optionally the status date and source component which sends the
status information.
"""
return self.__setJobStatus(int(jobID), status, minorStatus, source, datetime)
###########################################################################
types_setJobsStatus = [list]
def export_setJobsStatus(self, jobIDs, status='', minorStatus='', source='Unknown', datetime=None):
""" Set the major and minor status for job specified by its JobId.
Set optionally the status date and source component which sends the
status information.
"""
for jobID in jobIDs:
self.__setJobStatus(int(jobID), status, minorStatus, source, datetime)
return S_OK()
def __setJobStatus(self, jobID, status, minorStatus, source, datetime):
""" update the job status. """
result = jobDB.setJobStatus(jobID, status, minorStatus)
if not result['OK']:
return result
if status in JOB_FINAL_STATES:
result = jobDB.setEndExecTime(jobID)
if status == 'Running' and minorStatus == 'Application':
result = jobDB.setStartExecTime(jobID)
result = jobDB.getJobAttributes(jobID, ['Status', 'MinorStatus'])
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Job %d does not exist' % int(jobID))
status = result['Value']['Status']
minorStatus = result['Value']['MinorStatus']
if datetime:
result = logDB.addLoggingRecord(jobID, status, minorStatus, datetime, source)
else:
result = logDB.addLoggingRecord(jobID, status, minorStatus, source=source)
return result
###########################################################################
types_setJobStatusBulk = [[six.string_types, int], dict]
def export_setJobStatusBulk(self, jobID, statusDict):
""" Set various status fields for job specified by its JobId.
Set only the last status in the JobDB, updating all the status
logging information in the JobLoggingDB. The statusDict has datetime
as a key and status information dictionary as values
"""
status = ""
minor = ""
application = ""
appCounter = ""
endDate = ''
startDate = ''
startFlag = ''
jobID = int(jobID)
result = jobDB.getJobAttributes(jobID, ['Status'])
if not result['OK']:
return result
if not result['Value']:
# if there is no matching Job it returns an empty dictionary
return S_ERROR('No Matching Job')
new_status = result['Value']['Status']
if new_status == "Stalled":
status = 'Running'
# Get the latest WN time stamps of status updates
result = logDB.getWMSTimeStamps(int(jobID))
if not result['OK']:
return result
lastTime = max([float(t) for s, t in result['Value'].items() if s != 'LastTime'])
lastTime = Time.toString(Time.fromEpoch(lastTime))
# Get the last status values
dates = sorted(statusDict)
# We should only update the status if its time stamp is more recent than the last update
for date in [date for date in dates if date >= lastTime]:
sDict = statusDict[date]
if sDict['Status']:
status = sDict['Status']
if status in JOB_FINAL_STATES:
endDate = date
if status == "Running":
startFlag = 'Running'
if sDict['MinorStatus']:
minor = sDict['MinorStatus']
if minor == "Application" and startFlag == 'Running':
startDate = date
if sDict['ApplicationStatus']:
application = sDict['ApplicationStatus']
counter = sDict.get('ApplicationCounter')
if counter:
appCounter = counter
attrNames = []
attrValues = []
if status:
attrNames.append('Status')
attrValues.append(status)
if minor:
attrNames.append('MinorStatus')
attrValues.append(minor)
if application:
attrNames.append('ApplicationStatus')
attrValues.append(application)
if appCounter:
attrNames.append('ApplicationCounter')
attrValues.append(appCounter)
result = jobDB.setJobAttributes(jobID, attrNames, attrValues, update=True)
if not result['OK']:
return result
if endDate:
result = jobDB.setEndExecTime(jobID, endDate)
if startDate:
result = jobDB.setStartExecTime(jobID, startDate)
# Update the JobLoggingDB records
for date in dates:
sDict = statusDict[date]
status = sDict['Status']
if not status:
status = 'idem'
minor = sDict['MinorStatus']
if not minor:
minor = 'idem'
application = sDict['ApplicationStatus']
if not application:
application = 'idem'
source = sDict['Source']
result = logDB.addLoggingRecord(jobID, status, minor, application, date, source)
if not result['OK']:
return result
return S_OK()
###########################################################################
types_setJobSite = [[six.string_types, int], six.string_types]
def export_setJobSite(self, jobID, site):
"""Allows the site attribute to be set for a job specified by its jobID.
"""
result = jobDB.setJobAttribute(int(jobID), 'Site', site)
return result
###########################################################################
types_setJobFlag = [[six.string_types, int], six.string_types]
def export_setJobFlag(self, jobID, flag):
""" Set job flag for job with jobID
"""
result = jobDB.setJobAttribute(int(jobID), flag, 'True')
return result
###########################################################################
types_unsetJobFlag = [[six.string_types, int], six.string_types]
def export_unsetJobFlag(self, jobID, flag):
""" Unset job flag for job with jobID
"""
result = jobDB.setJobAttribute(int(jobID), flag, 'False')
return result
###########################################################################
types_setJobApplicationStatus = [[six.string_types, int], six.string_types, six.string_types]
def export_setJobApplicationStatus(self, jobID, appStatus, source='Unknown'):
""" Set the application status for job specified by its JobId.
"""
result = jobDB.getJobAttributes(int(jobID), ['Status', 'MinorStatus'])
if not result['OK']:
return result
if not result['Value']:
# if there is no matching Job it returns an empty dictionary
return S_ERROR('No Matching Job')
status = result['Value']['Status']
if status == "Stalled" or status == "Matched":
newStatus = 'Running'
else:
newStatus = status
minorStatus = result['Value']['MinorStatus']
result = jobDB.setJobStatus(int(jobID), status=newStatus, minor=minorStatus, application=appStatus)
if not result['OK']:
return result
result = logDB.addLoggingRecord(int(jobID), newStatus, minorStatus, appStatus, source=source)
return result
###########################################################################
types_setJobParameter = [[six.string_types, int], six.string_types, six.string_types]
def export_setJobParameter(self, jobID, name, value):
""" Set arbitrary parameter specified by name/value pair
for job specified by its JobId
"""
if elasticJobDB:
result = elasticJobDB.setJobParameter(int(jobID), name, value)
if jobDB:
result = jobDB.setJobParameter(int(jobID), name, value)
return result
###########################################################################
types_setJobsParameter = [dict]
def export_setJobsParameter(self, jobsParameterDict):
""" Set arbitrary parameter specified by name/value pair
for job specified by its JobId
"""
for jobID in jobsParameterDict:
if elasticJobDB:
elasticJobDB.setJobParameter(jobID, str(jobsParameterDict[jobID][0]), str(jobsParameterDict[jobID][1]))
if jobDB:
jobDB.setJobParameter(jobID, str(jobsParameterDict[jobID][0]), str(jobsParameterDict[jobID][1]))
return S_OK()
###########################################################################
types_setJobParameters = [[six.string_types, int], list]
def export_setJobParameters(self, jobID, parameters):
""" Set arbitrary parameters specified by a list of name/value pairs
for job specified by its JobId
"""
result = jobDB.setJobParameters(int(jobID), parameters)
if not result['OK']:
return S_ERROR('Failed to store some of the parameters')
return S_OK('All parameters stored for job')
###########################################################################
types_sendHeartBeat = [[six.string_types, int], dict, dict]
def export_sendHeartBeat(self, jobID, dynamicData, staticData):
""" Send a heart beat sign of life for a job jobID
"""
result = jobDB.setHeartBeatData(int(jobID), staticData, dynamicData)
if not result['OK']:
self.log.warn('Failed to set the heart beat data', 'for job %d ' % int(jobID))
# Restore the Running status if necessary
result = jobDB.getJobAttributes(jobID, ['Status'])
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Job %d not found' % jobID)
status = result['Value']['Status']
if status == "Stalled" or status == "Matched":
result = jobDB.setJobAttribute(jobID, 'Status', 'Running', True)
if not result['OK']:
self.log.warn('Failed to restore the job status to Running')
jobMessageDict = {}
result = jobDB.getJobCommand(int(jobID))
if result['OK']:
jobMessageDict = result['Value']
if jobMessageDict:
for key, _value in jobMessageDict.items():
result = jobDB.setJobCommandStatus(int(jobID), key, 'Sent')
return S_OK(jobMessageDict)
|
chaen/DIRAC
|
WorkloadManagementSystem/Service/JobStateUpdateHandler.py
|
Python
|
gpl-3.0
| 12,909
|
[
"DIRAC"
] |
23ff978efd0d67935a66f6d8022dde0fdeaa2ccac3d4c38aa28f066c826fc637
|
#!/usr/bin/env python
# pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2008-15 pywws contributors
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Post weather update to services such as Weather Underground
::
%s
Introduction
------------
There are an increasing number of web sites around the world that
encourage amateur weather station owners to upload data over the
internet.
This module enables pywws to upload readings to these organisations.
It is highly customisable using configuration files. Each 'service'
requires a configuration file and one or two templates in
``pywws/services`` (that should not need to be edited by the user) and
a section in ``weather.ini`` containing user specific data such as
your site ID and password.
See :ref:`How to integrate pywws with various weather services
<guides-integration-other>` for details of the available services.
Configuration
-------------
If you haven't already done so, visit the organisation's web site and
create an account for your weather station. Make a note of any site ID
and password details you are given.
Stop any pywws software that is running and then run ``toservice`` to
create a section in ``weather.ini``::
python -m pywws.toservice data_dir service_name
``service_name`` is the single word service name used by pywws, such
as ``metoffice``, ``data_dir`` is your weather data directory, as
usual.
Edit ``weather.ini`` and find the section corresponding to the service
name, e.g. ``[underground]``. Copy your site details into this
section, for example::
[underground]
password = secret
station = ABCDEFG1A
Now you can test your configuration::
python -m pywws.toservice -vvv data_dir service_name
This should show you the data string that is uploaded. Any failure
should generate an error message.
Upload old data
---------------
Now you can upload your last 7 days' data, if the service supports it.
Run ``toservice`` with the catchup option::
python -m pywws.toservice -cvv data_dir service_name
This may take 20 minutes or more, depending on how much data you have.
Add service(s) upload to regular tasks
--------------------------------------
Edit your ``weather.ini`` again, and add a list of services to the
``[live]``, ``[logged]``, ``[hourly]``, ``[12 hourly]`` or ``[daily]``
section, depending on how often you want to send data. For example::
[live]
twitter = []
plot = []
text = []
services = ['underground_rf', 'cwop']
[logged]
twitter = []
plot = []
text = []
services = ['metoffice', 'cwop']
[hourly]
twitter = []
plot = []
text = []
services = ['underground']
Note that the ``[live]`` section is only used when running
:py:mod:`pywws.LiveLog`. It is a good idea to repeat any
service selected in ``[live]`` in the ``[logged]`` or ``[hourly]``
section in case you switch to running :py:mod:`pywws.Hourly`.
Restart your regular pywws program (:py:mod:`pywws.Hourly` or
:py:mod:`pywws.LiveLog`) and visit the appropriate web site to
see regular updates from your weather station.
Using a different template
--------------------------
For some services (mainly MQTT) you might want to write your own
template to give greater control over the uploaded data. Copy the
default template file from ``pywws/services`` to your template directory
and then edit it to do what you want. Now edit ``weather.ini`` and
change the ``template`` value from ``default`` to the name of your
custom template.
API
---
"""
from __future__ import absolute_import
__docformat__ = "restructuredtext en"
__usage__ = """
usage: python -m pywws.toservice [options] data_dir service_name
options are:
-h or --help display this help
-c or --catchup upload all data since last upload
-v or --verbose increase amount of reassuring messages
data_dir is the root directory of the weather data
service_name is the service to upload to, e.g. underground
"""
__doc__ %= __usage__
__usage__ = __doc__.split('\n')[0] + __usage__
import base64
from ConfigParser import SafeConfigParser
from datetime import datetime, timedelta
import getopt
import logging
import os
import pkg_resources
import re
import socket
import StringIO
import sys
import urllib
import urllib2
import urlparse
from . import DataStore
from .Logger import ApplicationLogger
from . import Template
from . import __version__
PARENT_MARGIN = timedelta(minutes=2)
class ToService(object):
"""Upload weather data to weather services such as Weather
Underground.
"""
def __init__(self, params, status, calib_data, service_name):
"""
:param params: pywws configuration.
:type params: :class:`pywws.DataStore.params`
:param status: pywws status store.
:type status: :class:`pywws.DataStore.status`
:param calib_data: 'calibrated' data.
:type calib_data: :class:`pywws.DataStore.calib_store`
:param service_name: name of service to upload to.
:type service_name: string
"""
self.logger = logging.getLogger('pywws.ToService(%s)' % service_name)
self.params = params
self.status = status
self.data = calib_data
self.service_name = service_name
# 'derived' services such as 'underground_rf' share their
# parent's config and templates
config_section = self.service_name.split('_')[0]
if config_section == self.service_name:
self.parent = None
else:
self.parent = config_section
self.old_response = None
self.old_ex = None
# set default socket timeout, so urlopen calls don't hang forever
socket.setdefaulttimeout(30)
# open params file
service_params = SafeConfigParser()
service_params.optionxform = str
param_string = pkg_resources.resource_string(
'pywws', 'services/%s.ini' % (self.service_name))
if sys.version_info[0] >= 3:
param_string = param_string.decode('utf-8')
service_params.readfp(StringIO.StringIO(param_string))
# get URL
self.server = service_params.get('config', 'url')
parsed_url = urlparse.urlsplit(self.server)
if parsed_url.scheme == 'aprs':
self.send_data = self.aprs_send_data
server, port = parsed_url.netloc.split(':')
self.server = (server, int(port))
elif parsed_url.scheme == 'mqtt':
self.send_data = self.mqtt_send_data
else:
self.send_data = self.http_send_data
self.use_get = eval(service_params.get('config', 'use get'))
# get fixed part of upload data
self.fixed_data = dict()
for name, value in service_params.items('fixed'):
if value[0] == '*':
value = self.params.get(config_section, value[1:], 'unknown')
self.fixed_data[name] = value
# create templater
self.templater = Template.Template(
self.params, self.status, self.data, self.data, None, None,
use_locale=False)
template_name = self.params.get(config_section, 'template', 'default')
if template_name != 'default':
template_dir = self.params.get(
'paths', 'templates', os.path.expanduser('~/weather/templates/'))
self.template_file = open(
os.path.join(template_dir, template_name), 'rb')
else:
template_name = 'services/%s_template_%s.txt' % (
config_section, self.params.get('config', 'ws type'))
if not pkg_resources.resource_exists('pywws', template_name):
template_name = 'services/%s_template_1080.txt' % (config_section)
self.template_file = pkg_resources.resource_stream(
'pywws', template_name)
# get other parameters
self.auth_type = service_params.get('config', 'auth_type')
if self.auth_type == 'basic':
user = self.params.get(config_section, 'user', 'unknown')
password = self.params.get(config_section, 'password', 'unknown')
details = user + ':' + password
self.auth = 'Basic ' + base64.b64encode(details.encode('utf-8')).decode('utf-8')
elif self.auth_type == 'mqtt':
self.user = self.params.get(config_section, 'user', 'unknown')
self.password = self.params.get(config_section, 'password', 'unknown')
self.catchup = eval(service_params.get('config', 'catchup'))
self.expected_result = eval(service_params.get('config', 'result'))
self.interval = eval(service_params.get('config', 'interval'))
self.interval = max(self.interval, 40)
self.interval = timedelta(seconds=self.interval)
# move 'last update' from params to status
last_update = self.params.get_datetime(self.service_name, 'last update')
if last_update:
self.params.unset(self.service_name, 'last update')
self.status.set(
'last update', self.service_name, last_update.isoformat(' '))
# set timestamp of first data to upload
self.next_update = datetime.utcnow() - max(
timedelta(days=self.catchup), self.interval)
def prepare_data(self, data):
"""Prepare a weather data record.
The :obj:`data` parameter contains the data to be encoded. It
should be a 'calibrated' data record, as stored in
:class:`pywws.DataStore.calib_store`. The relevant data items
are extracted and converted to strings using a template, then
merged with the station's "fixed" data.
:param data: the weather data record.
:type data: dict
:return: dict.
:rtype: string
"""
# check we have external data
if data['temp_out'] is None:
return None
# convert data
data_str = self.templater.make_text(self.template_file, data)
self.template_file.seek(0)
if not data_str:
return None
prepared_data = eval(data_str)
prepared_data.update(self.fixed_data)
return prepared_data
def mqtt_send_data(self, timestamp, prepared_data, ignore_last_update=False):
import paho.mqtt.client as mosquitto
import time
import json
topic = prepared_data['topic']
hostname = prepared_data['hostname']
port = prepared_data['port']
client_id = prepared_data['client_id']
retain = prepared_data['retain'] == 'True'
auth = prepared_data['auth'] == 'True'
# clean up the object
del prepared_data['topic']
del prepared_data['hostname']
del prepared_data['port']
del prepared_data['client_id']
del prepared_data['retain']
del prepared_data['auth']
mosquitto_client = mosquitto.Mosquitto(client_id)
if auth:
self.logger.debug("Username and password configured")
mosquitto_client.username_pw_set(self.user, self.password)
else:
self.logger.debug("Username and password unconfigured, ignoring")
self.logger.debug(
"timestamp: %s. publishing on topic [%s] to hostname [%s] and " +
"port [%s] with a client_id [%s] and retain is %s",
timestamp.isoformat(' '), topic, hostname, port, client_id, retain)
mosquitto_client.connect(hostname, port)
mosquitto_client.publish(topic, json.dumps(prepared_data), retain=retain)
## commented out as sending the data as a json object (above)
## for item in prepared_data:
## if prepared_data[item] == '':
## prepared_data[item] = 'None'
## mosquitto_client.publish(
## topic + "/" + item + "/" + str(timestamp), prepared_data[item])
## time.sleep(0.200)
self.logger.debug("published data: %s", prepared_data)
mosquitto_client.disconnect()
return True
def aprs_send_data(self, timestamp, prepared_data, ignore_last_update=False):
"""Upload a weather data record using APRS.
The :obj:`prepared_data` parameter contains the data to be uploaded.
It should be a dictionary of string keys and string values.
:param timestamp: the timestamp of the data to upload.
:type timestamp: datetime
:param prepared_data: the data to upload.
:type prepared_data: dict
:param ignore_last_update: don't get or set the 'last update'
status.ini entry.
:type ignore_last_update: bool
:return: success status
:rtype: bool
"""
login = 'user %s pass %s vers pywws %s\n' % (
prepared_data['designator'], prepared_data['passcode'], __version__)
packet = '%s>APRS,TCPIP*:@%sz%s/%s_%s/%sg%st%sr%sP%sb%sh%s.pywws-%s\n' % (
prepared_data['designator'], prepared_data['idx'],
prepared_data['latitude'], prepared_data['longitude'],
prepared_data['wind_dir'], prepared_data['wind_ave'],
prepared_data['wind_gust'], prepared_data['temp_out'],
prepared_data['rain_hour'], prepared_data['rain_day'],
prepared_data['rel_pressure'], prepared_data['hum_out'],
__version__
)
self.logger.debug('packet: "%s"', packet)
login = login.encode('ASCII')
packet = packet.encode('ASCII')
sock = socket.socket()
try:
sock.connect(self.server)
try:
response = sock.recv(4096)
self.logger.debug('server software: %s', response.strip())
sock.sendall(login)
response = sock.recv(4096)
self.logger.debug('server login ack: %s', response.strip())
sock.sendall(packet)
sock.shutdown(socket.SHUT_RDWR)
finally:
sock.close()
except Exception, ex:
e = str(ex)
if e != self.old_ex:
self.logger.error(e)
self.old_ex = e
return False
if not ignore_last_update:
self.set_last_update(timestamp)
return True
def http_send_data(self, timestamp, prepared_data, ignore_last_update=False):
"""Upload a weather data record using HTTP.
The :obj:`prepared_data` parameter contains the data to be uploaded.
It should be a dictionary of string keys and string values.
:param timestamp: the timestamp of the data to upload.
:type timestamp: datetime
:param prepared_data: the data to upload.
:type prepared_data: dict
:param ignore_last_update: don't get or set the 'last update'
status.ini entry.
:type ignore_last_update: bool
:return: success status
:rtype: bool
"""
coded_data = urllib.urlencode(prepared_data)
self.logger.debug(coded_data)
new_ex = self.old_ex
extra_ex = []
try:
try:
if self.use_get:
request = urllib2.Request(self.server + '?' + coded_data)
else:
request = urllib2.Request(self.server, coded_data.encode('ASCII'))
if self.auth_type == 'basic':
request.add_header('Authorization', self.auth)
wudata = urllib2.urlopen(request)
except urllib2.HTTPError, ex:
if ex.code != 400:
raise
wudata = ex
response = wudata.readlines()
wudata.close()
if len(response) == len(self.expected_result):
for actual, expected in zip(response, self.expected_result):
actual = actual.decode('utf-8')
if not re.match(expected, actual):
break
else:
self.old_response = response
if not ignore_last_update:
self.set_last_update(timestamp)
return True
if response != self.old_response:
for line in response:
self.logger.error(line.strip())
self.old_response = response
except urllib2.HTTPError, ex:
new_ex = str(ex)
extra_ex = str(ex.info()).split('\n')
for line in ex.readlines():
line = line.decode('utf-8')
extra_ex.append(re.sub('<.+?>', '', line))
except Exception, ex:
new_ex = str(ex)
if new_ex == self.old_ex:
log = self.logger.debug
else:
log = self.logger.error
self.old_ex = new_ex
log(new_ex)
for extra in extra_ex:
extra = extra.strip()
if extra:
log(extra)
return False
def next_data(self, catchup, live_data, ignore_last_update=False):
"""Get weather data records to upload.
This method returns either the most recent weather data
record, or all records since the last upload, according to
the value of :obj:`catchup`.
:param catchup: ``True`` to get all records since last upload,
or ``False`` to get most recent data only.
:type catchup: boolean
:param live_data: a current 'live' data record, or ``None``.
:type live_data: dict
:param ignore_last_update: don't get the 'last update'
status.ini entry.
:type ignore_last_update: bool
:return: yields weather data records.
:rtype: dict
"""
if ignore_last_update:
last_update = None
else:
last_update = self.status.get_datetime(
'last update', self.service_name)
if last_update:
self.next_update = max(self.next_update,
last_update + self.interval)
if catchup:
start = self.next_update
else:
start = self.data.before(datetime.max)
if live_data:
stop = live_data['idx'] - self.interval
else:
stop = None
for data in self.data[start:stop]:
if data['idx'] >= self.next_update:
self.next_update = data['idx'] + self.interval
yield data
if live_data and live_data['idx'] >= self.next_update:
self.next_update = live_data['idx'] + self.interval
yield live_data
def set_last_update(self, timestamp):
self.status.set(
'last update', self.service_name, timestamp.isoformat(' '))
if self.parent:
last_update = self.status.get_datetime('last update', self.parent)
if last_update and last_update >= timestamp - PARENT_MARGIN:
self.status.set('last update', self.parent,
(timestamp + PARENT_MARGIN).isoformat(' '))
def Upload(self, catchup=True, live_data=None, ignore_last_update=False):
"""Upload one or more weather data records.
This method uploads either the most recent weather data
record, or all records since the last upload (up to 7 days),
according to the value of :obj:`catchup`.
It sets the ``last update`` configuration value to the time
stamp of the most recent record successfully uploaded.
:param catchup: upload all data since last upload.
:type catchup: bool
:param live_data: current 'live' data. If not present the most
recent logged data is uploaded.
:type live_data: dict
:param ignore_last_update: don't get or set the 'last update'
status.ini entry.
:type ignore_last_update: bool
:return: success status
:rtype: bool
"""
count = 0
for data in self.next_data(catchup, live_data, ignore_last_update):
prepared_data = self.prepare_data(data)
if not prepared_data:
continue
if not self.send_data(data['idx'], prepared_data, ignore_last_update):
return False
count += 1
if count > 1:
self.logger.info('%d records sent', count)
return True
def main(argv=None):
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(
argv[1:], "hcv", ['help', 'catchup', 'verbose'])
except getopt.error, msg:
print >>sys.stderr, 'Error: %s\n' % msg
print >>sys.stderr, __usage__.strip()
return 1
# process options
catchup = False
verbose = 0
for o, a in opts:
if o == '-h' or o == '--help':
print __usage__.strip()
return 0
elif o == '-c' or o == '--catchup':
catchup = True
elif o == '-v' or o == '--verbose':
verbose += 1
# check arguments
if len(args) != 2:
print >>sys.stderr, "Error: 2 arguments required"
print >>sys.stderr, __usage__.strip()
return 2
logger = ApplicationLogger(verbose)
return ToService(
DataStore.params(args[0]), DataStore.status(args[0]),
DataStore.calib_store(args[0]), args[1]).Upload(
catchup=catchup, ignore_last_update=not catchup)
if __name__ == "__main__":
sys.exit(main())
|
miguelalonso/pywws
|
src/pywws/toservice.py
|
Python
|
gpl-2.0
| 22,250
|
[
"VisIt"
] |
345de33f9bc43fe102b84305c21fee5bf3944f417f93271e714086c6d68cd77e
|
import os, sys
from distutils.core import setup
from setuptools import find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(name='SeqSero2',
version=open("version.py").readlines()[-1].split()[-1].strip("\"'"),
description='Salmonella serotyping',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python :: 3',
'Topic :: Text Processing :: Linguistic',
],
keywords='Salmonella serotyping bioinformatics WGS',
url='https://github.com/denglab/SeqSero2/',
author='Shaokang Zhang, Hendrik C Den-Bakker and Xiangyu Deng',
author_email='zskzsk@uga.edu, Hendrik.DenBakker@uga.edu, xdeng@uga.edu',
license='GPLv2',
scripts=["bin/deinterleave_fastq.sh","bin/Initial_Conditions.py","bin/SeqSero2_package.py","bin/SeqSero2_update_kmer_database.py"],
packages=[""],
include_package_data = True,
install_requires=['biopython==1.73'],
data_files=[("seqsero2_db",["seqsero2_db/antigens.pickle","seqsero2_db/H_and_O_and_specific_genes.fasta","seqsero2_db/invA_mers_dict","seqsero2_db/special.pickle"])],
zip_safe=False,
)
|
denglab/SeqSero2
|
setup.py
|
Python
|
gpl-2.0
| 1,265
|
[
"Biopython"
] |
8f2a06b0e375e6db859207a49f8c6c984d3c0201722be9708ad0b8d8d9b36506
|
# Copyright (c) 2010 Howard Hughes Medical Institute.
# All rights reserved.
# Use is subject to Janelia Farm Research Campus Software Copyright 1.1 license terms.
# http://license.janelia.org/license/jfrc_copyright_1_1.html
"""
A custom centrality script for the C. elegans network.
"""
import networkx
# Load the neurons and their interconnections if needed.
if not any(network.objects):
execfile('Connectivity.py')
def progressCallback(fraction_complete = None):
return updateProgress('Calculating centrality...', fraction_complete)
# Compute the centrality of each node in the graph. (uncomment one of the following)
#centralities = networkx.degree_centrality(network.simplifiedGraph())
#centralities = networkx.closeness_centrality(network.simplifiedGraph(), weighted_edges = True, progress_callback = progressCallback)
centralities = networkx.betweenness_centrality(network.simplifiedGraph(), weighted_edges = True, progress_callback = progressCallback)
#centralities = networkx.load_centrality(network.simplifiedGraph(), weighted_edges = True, progress_callback = progressCallback)
if any(centralities):
# Compute the maximum centrality so we can normalize.
maxCentrality = max(centralities.itervalues())
# Alter the visualization of each node based on its centrality.
objectCentralities = {}
for node, centrality in centralities.iteritems():
object = network.objectWithId(node)
objectCentralities[object] = centrality / maxCentrality
diameter = 0.001 + objectCentralities[object] * 0.029
display.setVisibleSize(object, [diameter] * 3)
for synapse in network.synapses():
centrality = objectCentralities[synapse.preSynapticNeurite.neuron()]
for partner in synapse.postSynapticPartners:
centrality += objectCentralities[partner if isinstance(partner, Neuron) else partner.neuron()]
centrality /= 1 + len(synapse.postSynapticPartners)
display.setVisibleOpacity(synapse, centrality)
for gapJunction in network.gapJunctions():
centrality = 0.0
for neurite in gapJunction.neurites():
centrality += objectCentralities[neurite.neuron()]
centrality /= 2.0
display.setVisibleOpacity(gapJunction, centrality)
for innervation in network.innervations():
centrality = (objectCentralities[innervation.neurite.neuron()] + objectCentralities[innervation.muscle]) / 2.0
display.setVisibleOpacity(innervation, centrality)
|
JaneliaSciComp/Neuroptikon
|
Source/Scripts/C. elegans/Centrality.py
|
Python
|
bsd-3-clause
| 2,516
|
[
"NEURON"
] |
7420e85a1fc62b0ce1bdce8d1bc52f6ddbe88b32beb5dd8b7ff5ab1fa427b001
|
#!/usr/bin/env python
#Copyright 2013 Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000, there is a non-exclusive license for use of this work by or on behalf of the U.S. Government. Export of this program may require a license from the United States Government.
from mpi4py import MPI
import pymongo
import sys, string, time
import unicodedata
import datetime
from vtk import *
from titan.TextAnalysis import *
from titan.DataAnalysis import *
from titan.MachineLearning import *
if len (sys.argv) < 2:
print "Usage: " + sys.argv[0] + " database"
exit ()
from optparse import OptionParser
parser = OptionParser ()
parser.add_option ("-d", "--db", dest="database",
help="Specify the mongo database to work with")
parser.add_option ("-k", "--clusters", dest="clusters", type="int", default=5,
help="Specify the number of user groups to find (default 5)")
(options, args) = parser.parse_args ()
if (options.database == None):
print "You must specify a database. Use -h for help"
exit ();
mpicomm = MPI.COMM_WORLD
mpisize = mpicomm.Get_size ()
mpirank = mpicomm.Get_rank ()
controller = vtkMPIController ()
controller.Initialize ()
connection = pymongo.Connection ()
db = connection[options.database]
models = db['models']
phcModel = models.find_one({'model' : 'phc'})
if (len (phcModel['initial']) == 0):
print "Cluster model is too small"
exit ()
userTable = vtkTable ()
for i in range (0, len (phcModel['initial'])):
column = vtkDoubleArray ()
column.SetName (str(i))
userTable.AddColumn (column)
users = db['users']
posts = db['posts']
users_len = users.count ()
limit = int(users_len / mpisize + 1)
skip = limit * mpirank
users_internal = []
for user in users.find (skip=skip, limit=limit):
users_internal.append (user['_id'])
clusterValues = [0]*len (phcModel['initial'])
for post in posts.find ({'name' : user['name']}):
try:
cluster = int(post['cluster_assignment'][0])
except:
print "post " + repr(post)
if (cluster >= 0 and cluster < len(clusterValues)):
clusterValues[cluster] += post['cluster_proximity'][0]
user['post_clusters'] = clusterValues
users.update ({'_id' : user['_id']}, user)
for i in range(0, len(clusterValues)):
userTable.GetColumn (i).InsertNextValue (clusterValues[i])
cluster = vtkPPHClustering ()
cluster.SetController (controller)
cluster.SetInputData (userTable)
cluster.SetNumberOfClusters (options.clusters)
cluster.SetNumberOfTrials (10)
cluster.Update ()
clusterTable = cluster.GetOutput (0)
hierarchicalTable = cluster.GetOutput (1)
assignmentTable = cluster.GetOutput (2)
aArr = assignmentTable.GetColumn (0);
assignmentsPython = [
[ aArr.GetComponent (i, j) for j in range (0, aArr.GetNumberOfComponents ()) ]
for i in range (0, aArr.GetNumberOfTuples ()) ]
apArr = assignmentTable.GetColumn (1);
assignmentProxPython = [
[ apArr.GetComponent (i, j) for j in range (0, apArr.GetNumberOfComponents ()) ]
for i in range (0, apArr.GetNumberOfTuples ()) ]
doc = 0
for userid in users_internal:
user = users.find_one ({'_id' : userid})
if (doc >= len(assignmentsPython)):
break
user['cluster_assignment'] = assignmentsPython[doc]
user['cluster_proximity'] = assignmentProxPython[doc]
users.update ({'_id' : user['_id']}, user)
doc += 1
if (doc != len(assignmentsPython)):
print "Error assignments and users don't match"
exit ()
if (mpirank == 0):
cArr = clusterTable.GetColumn (0)
clusters = [
[ cArr.GetComponent (i, j) for j in range (0, cArr.GetNumberOfComponents ()) ]
for i in range (0, cArr.GetNumberOfTuples ()) ]
pArr = hierarchicalTable.GetColumn (1);
clusterProximities = [
[ pArr.GetComponent (i, j) for j in range (0, pArr.GetNumberOfComponents ()) ]
for i in range (0, pArr.GetNumberOfTuples ()) ]
clusterModel = models.find_one({'model' : 'upc'})
if (clusterModel == None):
clusterModel = {'model': 'upc',
'updated': datetime.datetime.utcnow (),
'cluster': clusters,
'proximities': clusterProximities }
print "Inserting a new UPC model"
models.insert (clusterModel)
else:
updatedModel = {'model': 'upc',
'updated': datetime.datetime.utcnow (),
'cluster': clusters,
'proximities': clusterProximities }
print "Updating the UPC model"
models.update ({'model' : 'upc'}, updatedModel)
for model in models.find ({'model' : 'upc'}):
print "model: " + model['model'] + " updated: " + str(model['updated'])
controller.Finalize ()
|
sandialabs/grandmaster
|
processing/updatePUPCmodel.py
|
Python
|
apache-2.0
| 4,638
|
[
"VTK"
] |
c9ad6eb9c4915dc46903dbdad75f869ea0d44fec1be3e2ab4b7703c81ea5eee6
|
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Parser for PDB files."""
from __future__ import print_function
import warnings
try:
import numpy
except:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NumPy if you want to use the PDB parser.")
from Bio.File import as_handle
from Bio.PDB.PDBExceptions import PDBConstructionException
from Bio.PDB.PDBExceptions import PDBConstructionWarning
from Bio.PDB.StructureBuilder import StructureBuilder
from Bio.PDB.parse_pdb_header import _parse_pdb_header_list
# If PDB spec says "COLUMNS 18-20" this means line[17:20]
class PDBParser(object):
"""
Parse a PDB file and return a Structure object.
"""
def __init__(self, PERMISSIVE=True, get_header=False,
structure_builder=None, QUIET=False):
"""
The PDB parser call a number of standard methods in an aggregated
StructureBuilder object. Normally this object is instanciated by the
PDBParser object itself, but if the user provides his/her own
StructureBuilder object, the latter is used instead.
Arguments:
o PERMISSIVE - Evaluated as a Boolean. If false, exceptions in
constructing the SMCRA data structure are fatal. If true (DEFAULT),
the exceptions are caught, but some residues or atoms will be missing.
THESE EXCEPTIONS ARE DUE TO PROBLEMS IN THE PDB FILE!.
o structure_builder - an optional user implemented StructureBuilder class.
o QUIET - Evaluated as a Boolean. If true, warnings issued in constructing
the SMCRA data will be suppressed. If false (DEFAULT), they will be shown.
These warnings might be indicative of problems in the PDB file!
"""
if structure_builder is not None:
self.structure_builder = structure_builder
else:
self.structure_builder = StructureBuilder()
self.header = None
self.trailer = None
self.line_counter = 0
self.PERMISSIVE = bool(PERMISSIVE)
self.QUIET = bool(QUIET)
# Public methods
def get_structure(self, id, file):
"""Return the structure.
Arguments:
o id - string, the id that will be used for the structure
o file - name of the PDB file OR an open filehandle
"""
with warnings.catch_warnings():
if self.QUIET:
warnings.filterwarnings("ignore", category=PDBConstructionWarning)
self.header = None
self.trailer = None
# Make a StructureBuilder instance (pass id of structure as parameter)
self.structure_builder.init_structure(id)
with as_handle(file) as handle:
self._parse(handle.readlines())
self.structure_builder.set_header(self.header)
# Return the Structure instance
structure = self.structure_builder.get_structure()
return structure
def get_header(self):
"Return the header."
return self.header
def get_trailer(self):
"Return the trailer."
return self.trailer
# Private methods
def _parse(self, header_coords_trailer):
"Parse the PDB file."
# Extract the header; return the rest of the file
self.header, coords_trailer = self._get_header(header_coords_trailer)
# Parse the atomic data; return the PDB file trailer
self.trailer = self._parse_coordinates(coords_trailer)
def _get_header(self, header_coords_trailer):
"Get the header of the PDB file, return the rest."
structure_builder = self.structure_builder
i = 0
for i in range(0, len(header_coords_trailer)):
structure_builder.set_line_counter(i + 1)
line = header_coords_trailer[i]
record_type = line[0:6]
if record_type == "ATOM " or record_type == "HETATM" or record_type == "MODEL ":
break
header = header_coords_trailer[0:i]
# Return the rest of the coords+trailer for further processing
self.line_counter = i
coords_trailer = header_coords_trailer[i:]
header_dict = _parse_pdb_header_list(header)
return header_dict, coords_trailer
def _parse_coordinates(self, coords_trailer):
"Parse the atomic data in the PDB file."
local_line_counter = 0
structure_builder = self.structure_builder
current_model_id = 0
# Flag we have an open model
model_open = 0
current_chain_id = None
current_segid = None
current_residue_id = None
current_resname = None
for i in range(0, len(coords_trailer)):
line = coords_trailer[i]
record_type = line[0:6]
global_line_counter = self.line_counter + local_line_counter + 1
structure_builder.set_line_counter(global_line_counter)
if record_type == "ATOM " or record_type == "HETATM":
# Initialize the Model - there was no explicit MODEL record
if not model_open:
structure_builder.init_model(current_model_id)
current_model_id += 1
model_open = 1
fullname = line[12:16]
# get rid of whitespace in atom names
split_list = fullname.split()
if len(split_list) != 1:
# atom name has internal spaces, e.g. " N B ", so
# we do not strip spaces
name = fullname
else:
# atom name is like " CA ", so we can strip spaces
name = split_list[0]
altloc = line[16]
resname = line[17:20]
chainid = line[21]
try:
serial_number = int(line[6:11])
except:
serial_number = 0
resseq = int(line[22:26].split()[0]) # sequence identifier
icode = line[26] # insertion code
if record_type == "HETATM": # hetero atom flag
if resname == "HOH" or resname == "WAT":
hetero_flag = "W"
else:
hetero_flag = "H"
else:
hetero_flag = " "
residue_id = (hetero_flag, resseq, icode)
# atomic coordinates
try:
x = float(line[30:38])
y = float(line[38:46])
z = float(line[46:54])
except:
# Should we allow parsing to continue in permissive mode?
# If so, what coordinates should we default to? Easier to abort!
raise PDBConstructionException("Invalid or missing coordinate(s) at line %i."
% global_line_counter)
coord = numpy.array((x, y, z), "f")
# occupancy & B factor
try:
occupancy = float(line[54:60])
except:
self._handle_PDB_exception("Invalid or missing occupancy",
global_line_counter)
occupancy = None # Rather than arbitrary zero or one
try:
bfactor = float(line[60:66])
except:
self._handle_PDB_exception("Invalid or missing B factor",
global_line_counter)
bfactor = 0.0 # The PDB use a default of zero if the data is missing
segid = line[72:76]
element = line[76:78].strip()
if current_segid != segid:
current_segid = segid
structure_builder.init_seg(current_segid)
if current_chain_id != chainid:
current_chain_id = chainid
structure_builder.init_chain(current_chain_id)
current_residue_id = residue_id
current_resname = resname
try:
structure_builder.init_residue(resname, hetero_flag, resseq, icode)
except PDBConstructionException as message:
self._handle_PDB_exception(message, global_line_counter)
elif current_residue_id != residue_id or current_resname != resname:
current_residue_id = residue_id
current_resname = resname
try:
structure_builder.init_residue(resname, hetero_flag, resseq, icode)
except PDBConstructionException as message:
self._handle_PDB_exception(message, global_line_counter)
# init atom
try:
structure_builder.init_atom(name, coord, bfactor, occupancy, altloc,
fullname, serial_number, element)
except PDBConstructionException as message:
self._handle_PDB_exception(message, global_line_counter)
elif record_type == "ANISOU":
anisou = [float(x) for x in (line[28:35], line[35:42], line[43:49],
line[49:56], line[56:63], line[63:70])]
# U's are scaled by 10^4
anisou_array = (numpy.array(anisou, "f") / 10000.0).astype("f")
structure_builder.set_anisou(anisou_array)
elif record_type == "MODEL ":
try:
serial_num = int(line[10:14])
except:
self._handle_PDB_exception("Invalid or missing model serial number",
global_line_counter)
serial_num = 0
structure_builder.init_model(current_model_id, serial_num)
current_model_id += 1
model_open = 1
current_chain_id = None
current_residue_id = None
elif record_type == "END " or record_type == "CONECT":
# End of atomic data, return the trailer
self.line_counter += local_line_counter
return coords_trailer[local_line_counter:]
elif record_type == "ENDMDL":
model_open = 0
current_chain_id = None
current_residue_id = None
elif record_type == "SIGUIJ":
# standard deviation of anisotropic B factor
siguij = [float(x) for x in (line[28:35], line[35:42], line[42:49],
line[49:56], line[56:63], line[63:70])]
# U sigma's are scaled by 10^4
siguij_array = (numpy.array(siguij, "f") / 10000.0).astype("f")
structure_builder.set_siguij(siguij_array)
elif record_type == "SIGATM":
# standard deviation of atomic positions
sigatm = [float(x) for x in (line[30:38], line[38:45], line[46:54],
line[54:60], line[60:66])]
sigatm_array = numpy.array(sigatm, "f")
structure_builder.set_sigatm(sigatm_array)
local_line_counter += 1
# EOF (does not end in END or CONECT)
self.line_counter = self.line_counter + local_line_counter
return []
def _handle_PDB_exception(self, message, line_counter):
"""
This method catches an exception that occurs in the StructureBuilder
object (if PERMISSIVE), or raises it again, this time adding the
PDB line number to the error message.
"""
message = "%s at line %i." % (message, line_counter)
if self.PERMISSIVE:
# just print a warning - some residues/atoms may be missing
warnings.warn("PDBConstructionException: %s\n"
"Exception ignored.\n"
"Some atoms or residues may be missing in the data structure."
% message, PDBConstructionWarning)
else:
# exceptions are fatal - raise again with new message (including line nr)
raise PDBConstructionException(message)
if __name__ == "__main__":
import sys
p = PDBParser(PERMISSIVE=True)
filename = sys.argv[1]
s = p.get_structure("scr", filename)
for m in s:
p = m.get_parent()
assert(p is s)
for c in m:
p = c.get_parent()
assert(p is m)
for r in c:
print(r)
p = r.get_parent()
assert(p is c)
for a in r:
p = a.get_parent()
if p is not r:
print("%s %s" % (p, r))
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/PDB/PDBParser.py
|
Python
|
gpl-2.0
| 13,243
|
[
"Biopython"
] |
738ca6a3159b057ef3118948228ff106f225c98207643ecd6c9746668c0df33a
|
""" Class that contains client access to the job monitoring handler. """
from __future__ import absolute_import
__RCSID__ = "$Id$"
from DIRAC.Core.Base.Client import Client, createClient
@createClient('WorkloadManagement/JobMonitoring')
class JobMonitoringClient(Client):
def __init__(self, **kwargs):
super(JobMonitoringClient, self).__init__(**kwargs)
self.setServer('WorkloadManagement/JobMonitoring')
def traceJobParameters(self, site, localID, parameterList=None, attributeList=None, date=None, until=None):
return self._getRPC().traceJobParameters(site, localID, parameterList, attributeList, date, until)
def traceJobParameter(self, site, localID, parameter, date=None, until=None):
return self._getRPC().traceJobParameter(site, localID, parameter, date, until)
|
petricm/DIRAC
|
WorkloadManagementSystem/Client/JobMonitoringClient.py
|
Python
|
gpl-3.0
| 800
|
[
"DIRAC"
] |
b7bf641a1df445837989cbf99837a76f360c8794cca0ce8147ba2334e016122e
|
from __future__ import absolute_import
import cython
cython.declare(PyrexTypes=object, Naming=object, ExprNodes=object, Nodes=object,
Options=object, UtilNodes=object, LetNode=object,
LetRefNode=object, TreeFragment=object, EncodedString=object,
error=object, warning=object, copy=object, _unicode=object)
import copy
from . import PyrexTypes
from . import Naming
from . import ExprNodes
from . import Nodes
from . import Options
from . import Builtin
from .Visitor import VisitorTransform, TreeVisitor
from .Visitor import CythonTransform, EnvTransform, ScopeTrackingTransform
from .UtilNodes import LetNode, LetRefNode
from .TreeFragment import TreeFragment
from .StringEncoding import EncodedString, _unicode
from .Errors import error, warning, CompileError, InternalError
from .Code import UtilityCode
class NameNodeCollector(TreeVisitor):
"""Collect all NameNodes of a (sub-)tree in the ``name_nodes``
attribute.
"""
def __init__(self):
super(NameNodeCollector, self).__init__()
self.name_nodes = []
def visit_NameNode(self, node):
self.name_nodes.append(node)
def visit_Node(self, node):
self._visitchildren(node, None)
class SkipDeclarations(object):
"""
Variable and function declarations can often have a deep tree structure,
and yet most transformations don't need to descend to this depth.
Declaration nodes are removed after AnalyseDeclarationsTransform, so there
is no need to use this for transformations after that point.
"""
def visit_CTypeDefNode(self, node):
return node
def visit_CVarDefNode(self, node):
return node
def visit_CDeclaratorNode(self, node):
return node
def visit_CBaseTypeNode(self, node):
return node
def visit_CEnumDefNode(self, node):
return node
def visit_CStructOrUnionDefNode(self, node):
return node
class NormalizeTree(CythonTransform):
"""
This transform fixes up a few things after parsing
in order to make the parse tree more suitable for
transforms.
a) After parsing, blocks with only one statement will
be represented by that statement, not by a StatListNode.
When doing transforms this is annoying and inconsistent,
as one cannot in general remove a statement in a consistent
way and so on. This transform wraps any single statements
in a StatListNode containing a single statement.
b) The PassStatNode is a noop and serves no purpose beyond
plugging such one-statement blocks; i.e., once parsed a
` "pass" can just as well be represented using an empty
StatListNode. This means less special cases to worry about
in subsequent transforms (one always checks to see if a
StatListNode has no children to see if the block is empty).
"""
def __init__(self, context):
super(NormalizeTree, self).__init__(context)
self.is_in_statlist = False
self.is_in_expr = False
def visit_ExprNode(self, node):
stacktmp = self.is_in_expr
self.is_in_expr = True
self.visitchildren(node)
self.is_in_expr = stacktmp
return node
def visit_StatNode(self, node, is_listcontainer=False):
stacktmp = self.is_in_statlist
self.is_in_statlist = is_listcontainer
self.visitchildren(node)
self.is_in_statlist = stacktmp
if not self.is_in_statlist and not self.is_in_expr:
return Nodes.StatListNode(pos=node.pos, stats=[node])
else:
return node
def visit_StatListNode(self, node):
self.is_in_statlist = True
self.visitchildren(node)
self.is_in_statlist = False
return node
def visit_ParallelAssignmentNode(self, node):
return self.visit_StatNode(node, True)
def visit_CEnumDefNode(self, node):
return self.visit_StatNode(node, True)
def visit_CStructOrUnionDefNode(self, node):
return self.visit_StatNode(node, True)
def visit_PassStatNode(self, node):
"""Eliminate PassStatNode"""
if not self.is_in_statlist:
return Nodes.StatListNode(pos=node.pos, stats=[])
else:
return []
def visit_ExprStatNode(self, node):
"""Eliminate useless string literals"""
if node.expr.is_string_literal:
return self.visit_PassStatNode(node)
else:
return self.visit_StatNode(node)
def visit_CDeclaratorNode(self, node):
return node
class PostParseError(CompileError): pass
# error strings checked by unit tests, so define them
ERR_CDEF_INCLASS = 'Cannot assign default value to fields in cdef classes, structs or unions'
ERR_BUF_DEFAULTS = 'Invalid buffer defaults specification (see docs)'
ERR_INVALID_SPECIALATTR_TYPE = 'Special attributes must not have a type declared'
class PostParse(ScopeTrackingTransform):
"""
Basic interpretation of the parse tree, as well as validity
checking that can be done on a very basic level on the parse
tree (while still not being a problem with the basic syntax,
as such).
Specifically:
- Default values to cdef assignments are turned into single
assignments following the declaration (everywhere but in class
bodies, where they raise a compile error)
- Interpret some node structures into Python runtime values.
Some nodes take compile-time arguments (currently:
TemplatedTypeNode[args] and __cythonbufferdefaults__ = {args}),
which should be interpreted. This happens in a general way
and other steps should be taken to ensure validity.
Type arguments cannot be interpreted in this way.
- For __cythonbufferdefaults__ the arguments are checked for
validity.
TemplatedTypeNode has its directives interpreted:
Any first positional argument goes into the "dtype" attribute,
any "ndim" keyword argument goes into the "ndim" attribute and
so on. Also it is checked that the directive combination is valid.
- __cythonbufferdefaults__ attributes are parsed and put into the
type information.
Note: Currently Parsing.py does a lot of interpretation and
reorganization that can be refactored into this transform
if a more pure Abstract Syntax Tree is wanted.
"""
def __init__(self, context):
super(PostParse, self).__init__(context)
self.specialattribute_handlers = {
'__cythonbufferdefaults__' : self.handle_bufferdefaults
}
def visit_LambdaNode(self, node):
# unpack a lambda expression into the corresponding DefNode
collector = YieldNodeCollector()
collector.visitchildren(node.result_expr)
if collector.yields or collector.awaits or isinstance(node.result_expr, ExprNodes.YieldExprNode):
body = Nodes.ExprStatNode(
node.result_expr.pos, expr=node.result_expr)
else:
body = Nodes.ReturnStatNode(
node.result_expr.pos, value=node.result_expr)
node.def_node = Nodes.DefNode(
node.pos, name=node.name,
args=node.args, star_arg=node.star_arg,
starstar_arg=node.starstar_arg,
body=body, doc=None)
self.visitchildren(node)
return node
def visit_GeneratorExpressionNode(self, node):
# unpack a generator expression into the corresponding DefNode
node.def_node = Nodes.DefNode(node.pos, name=node.name,
doc=None,
args=[], star_arg=None,
starstar_arg=None,
body=node.loop)
self.visitchildren(node)
return node
# cdef variables
def handle_bufferdefaults(self, decl):
if not isinstance(decl.default, ExprNodes.DictNode):
raise PostParseError(decl.pos, ERR_BUF_DEFAULTS)
self.scope_node.buffer_defaults_node = decl.default
self.scope_node.buffer_defaults_pos = decl.pos
def visit_CVarDefNode(self, node):
# This assumes only plain names and pointers are assignable on
# declaration. Also, it makes use of the fact that a cdef decl
# must appear before the first use, so we don't have to deal with
# "i = 3; cdef int i = i" and can simply move the nodes around.
try:
self.visitchildren(node)
stats = [node]
newdecls = []
for decl in node.declarators:
declbase = decl
while isinstance(declbase, Nodes.CPtrDeclaratorNode):
declbase = declbase.base
if isinstance(declbase, Nodes.CNameDeclaratorNode):
if declbase.default is not None:
if self.scope_type in ('cclass', 'pyclass', 'struct'):
if isinstance(self.scope_node, Nodes.CClassDefNode):
handler = self.specialattribute_handlers.get(decl.name)
if handler:
if decl is not declbase:
raise PostParseError(decl.pos, ERR_INVALID_SPECIALATTR_TYPE)
handler(decl)
continue # Remove declaration
raise PostParseError(decl.pos, ERR_CDEF_INCLASS)
first_assignment = self.scope_type != 'module'
stats.append(Nodes.SingleAssignmentNode(node.pos,
lhs=ExprNodes.NameNode(node.pos, name=declbase.name),
rhs=declbase.default, first=first_assignment))
declbase.default = None
newdecls.append(decl)
node.declarators = newdecls
return stats
except PostParseError as e:
# An error in a cdef clause is ok, simply remove the declaration
# and try to move on to report more errors
self.context.nonfatal_error(e)
return None
# Split parallel assignments (a,b = b,a) into separate partial
# assignments that are executed rhs-first using temps. This
# restructuring must be applied before type analysis so that known
# types on rhs and lhs can be matched directly. It is required in
# the case that the types cannot be coerced to a Python type in
# order to assign from a tuple.
def visit_SingleAssignmentNode(self, node):
self.visitchildren(node)
return self._visit_assignment_node(node, [node.lhs, node.rhs])
def visit_CascadedAssignmentNode(self, node):
self.visitchildren(node)
return self._visit_assignment_node(node, node.lhs_list + [node.rhs])
def _visit_assignment_node(self, node, expr_list):
"""Flatten parallel assignments into separate single
assignments or cascaded assignments.
"""
if sum([ 1 for expr in expr_list
if expr.is_sequence_constructor or expr.is_string_literal ]) < 2:
# no parallel assignments => nothing to do
return node
expr_list_list = []
flatten_parallel_assignments(expr_list, expr_list_list)
temp_refs = []
eliminate_rhs_duplicates(expr_list_list, temp_refs)
nodes = []
for expr_list in expr_list_list:
lhs_list = expr_list[:-1]
rhs = expr_list[-1]
if len(lhs_list) == 1:
node = Nodes.SingleAssignmentNode(rhs.pos,
lhs = lhs_list[0], rhs = rhs)
else:
node = Nodes.CascadedAssignmentNode(rhs.pos,
lhs_list = lhs_list, rhs = rhs)
nodes.append(node)
if len(nodes) == 1:
assign_node = nodes[0]
else:
assign_node = Nodes.ParallelAssignmentNode(nodes[0].pos, stats = nodes)
if temp_refs:
duplicates_and_temps = [ (temp.expression, temp)
for temp in temp_refs ]
sort_common_subsequences(duplicates_and_temps)
for _, temp_ref in duplicates_and_temps[::-1]:
assign_node = LetNode(temp_ref, assign_node)
return assign_node
def _flatten_sequence(self, seq, result):
for arg in seq.args:
if arg.is_sequence_constructor:
self._flatten_sequence(arg, result)
else:
result.append(arg)
return result
def visit_DelStatNode(self, node):
self.visitchildren(node)
node.args = self._flatten_sequence(node, [])
return node
def visit_ExceptClauseNode(self, node):
if node.is_except_as:
# except-as must delete NameNode target at the end
del_target = Nodes.DelStatNode(
node.pos,
args=[ExprNodes.NameNode(
node.target.pos, name=node.target.name)],
ignore_nonexisting=True)
node.body = Nodes.StatListNode(
node.pos,
stats=[Nodes.TryFinallyStatNode(
node.pos,
body=node.body,
finally_clause=Nodes.StatListNode(
node.pos,
stats=[del_target]))])
self.visitchildren(node)
return node
def eliminate_rhs_duplicates(expr_list_list, ref_node_sequence):
"""Replace rhs items by LetRefNodes if they appear more than once.
Creates a sequence of LetRefNodes that set up the required temps
and appends them to ref_node_sequence. The input list is modified
in-place.
"""
seen_nodes = set()
ref_nodes = {}
def find_duplicates(node):
if node.is_literal or node.is_name:
# no need to replace those; can't include attributes here
# as their access is not necessarily side-effect free
return
if node in seen_nodes:
if node not in ref_nodes:
ref_node = LetRefNode(node)
ref_nodes[node] = ref_node
ref_node_sequence.append(ref_node)
else:
seen_nodes.add(node)
if node.is_sequence_constructor:
for item in node.args:
find_duplicates(item)
for expr_list in expr_list_list:
rhs = expr_list[-1]
find_duplicates(rhs)
if not ref_nodes:
return
def substitute_nodes(node):
if node in ref_nodes:
return ref_nodes[node]
elif node.is_sequence_constructor:
node.args = list(map(substitute_nodes, node.args))
return node
# replace nodes inside of the common subexpressions
for node in ref_nodes:
if node.is_sequence_constructor:
node.args = list(map(substitute_nodes, node.args))
# replace common subexpressions on all rhs items
for expr_list in expr_list_list:
expr_list[-1] = substitute_nodes(expr_list[-1])
def sort_common_subsequences(items):
"""Sort items/subsequences so that all items and subsequences that
an item contains appear before the item itself. This is needed
because each rhs item must only be evaluated once, so its value
must be evaluated first and then reused when packing sequences
that contain it.
This implies a partial order, and the sort must be stable to
preserve the original order as much as possible, so we use a
simple insertion sort (which is very fast for short sequences, the
normal case in practice).
"""
def contains(seq, x):
for item in seq:
if item is x:
return True
elif item.is_sequence_constructor and contains(item.args, x):
return True
return False
def lower_than(a,b):
return b.is_sequence_constructor and contains(b.args, a)
for pos, item in enumerate(items):
key = item[1] # the ResultRefNode which has already been injected into the sequences
new_pos = pos
for i in range(pos-1, -1, -1):
if lower_than(key, items[i][0]):
new_pos = i
if new_pos != pos:
for i in range(pos, new_pos, -1):
items[i] = items[i-1]
items[new_pos] = item
def unpack_string_to_character_literals(literal):
chars = []
pos = literal.pos
stype = literal.__class__
sval = literal.value
sval_type = sval.__class__
for char in sval:
cval = sval_type(char)
chars.append(stype(pos, value=cval, constant_result=cval))
return chars
def flatten_parallel_assignments(input, output):
# The input is a list of expression nodes, representing the LHSs
# and RHS of one (possibly cascaded) assignment statement. For
# sequence constructors, rearranges the matching parts of both
# sides into a list of equivalent assignments between the
# individual elements. This transformation is applied
# recursively, so that nested structures get matched as well.
rhs = input[-1]
if (not (rhs.is_sequence_constructor or isinstance(rhs, ExprNodes.UnicodeNode))
or not sum([lhs.is_sequence_constructor for lhs in input[:-1]])):
output.append(input)
return
complete_assignments = []
if rhs.is_sequence_constructor:
rhs_args = rhs.args
elif rhs.is_string_literal:
rhs_args = unpack_string_to_character_literals(rhs)
rhs_size = len(rhs_args)
lhs_targets = [[] for _ in range(rhs_size)]
starred_assignments = []
for lhs in input[:-1]:
if not lhs.is_sequence_constructor:
if lhs.is_starred:
error(lhs.pos, "starred assignment target must be in a list or tuple")
complete_assignments.append(lhs)
continue
lhs_size = len(lhs.args)
starred_targets = sum([1 for expr in lhs.args if expr.is_starred])
if starred_targets > 1:
error(lhs.pos, "more than 1 starred expression in assignment")
output.append([lhs,rhs])
continue
elif lhs_size - starred_targets > rhs_size:
error(lhs.pos, "need more than %d value%s to unpack"
% (rhs_size, (rhs_size != 1) and 's' or ''))
output.append([lhs,rhs])
continue
elif starred_targets:
map_starred_assignment(lhs_targets, starred_assignments,
lhs.args, rhs_args)
elif lhs_size < rhs_size:
error(lhs.pos, "too many values to unpack (expected %d, got %d)"
% (lhs_size, rhs_size))
output.append([lhs,rhs])
continue
else:
for targets, expr in zip(lhs_targets, lhs.args):
targets.append(expr)
if complete_assignments:
complete_assignments.append(rhs)
output.append(complete_assignments)
# recursively flatten partial assignments
for cascade, rhs in zip(lhs_targets, rhs_args):
if cascade:
cascade.append(rhs)
flatten_parallel_assignments(cascade, output)
# recursively flatten starred assignments
for cascade in starred_assignments:
if cascade[0].is_sequence_constructor:
flatten_parallel_assignments(cascade, output)
else:
output.append(cascade)
def map_starred_assignment(lhs_targets, starred_assignments, lhs_args, rhs_args):
# Appends the fixed-position LHS targets to the target list that
# appear left and right of the starred argument.
#
# The starred_assignments list receives a new tuple
# (lhs_target, rhs_values_list) that maps the remaining arguments
# (those that match the starred target) to a list.
# left side of the starred target
for i, (targets, expr) in enumerate(zip(lhs_targets, lhs_args)):
if expr.is_starred:
starred = i
lhs_remaining = len(lhs_args) - i - 1
break
targets.append(expr)
else:
raise InternalError("no starred arg found when splitting starred assignment")
# right side of the starred target
for i, (targets, expr) in enumerate(zip(lhs_targets[-lhs_remaining:],
lhs_args[starred + 1:])):
targets.append(expr)
# the starred target itself, must be assigned a (potentially empty) list
target = lhs_args[starred].target # unpack starred node
starred_rhs = rhs_args[starred:]
if lhs_remaining:
starred_rhs = starred_rhs[:-lhs_remaining]
if starred_rhs:
pos = starred_rhs[0].pos
else:
pos = target.pos
starred_assignments.append([
target, ExprNodes.ListNode(pos=pos, args=starred_rhs)])
class PxdPostParse(CythonTransform, SkipDeclarations):
"""
Basic interpretation/validity checking that should only be
done on pxd trees.
A lot of this checking currently happens in the parser; but
what is listed below happens here.
- "def" functions are let through only if they fill the
getbuffer/releasebuffer slots
- cdef functions are let through only if they are on the
top level and are declared "inline"
"""
ERR_INLINE_ONLY = "function definition in pxd file must be declared 'cdef inline'"
ERR_NOGO_WITH_INLINE = "inline function definition in pxd file cannot be '%s'"
def __call__(self, node):
self.scope_type = 'pxd'
return super(PxdPostParse, self).__call__(node)
def visit_CClassDefNode(self, node):
old = self.scope_type
self.scope_type = 'cclass'
self.visitchildren(node)
self.scope_type = old
return node
def visit_FuncDefNode(self, node):
# FuncDefNode always come with an implementation (without
# an imp they are CVarDefNodes..)
err = self.ERR_INLINE_ONLY
if (isinstance(node, Nodes.DefNode) and self.scope_type == 'cclass'
and node.name in ('__getbuffer__', '__releasebuffer__')):
err = None # allow these slots
if isinstance(node, Nodes.CFuncDefNode):
if (u'inline' in node.modifiers and
self.scope_type in ('pxd', 'cclass')):
node.inline_in_pxd = True
if node.visibility != 'private':
err = self.ERR_NOGO_WITH_INLINE % node.visibility
elif node.api:
err = self.ERR_NOGO_WITH_INLINE % 'api'
else:
err = None # allow inline function
else:
err = self.ERR_INLINE_ONLY
if err:
self.context.nonfatal_error(PostParseError(node.pos, err))
return None
else:
return node
class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
"""
After parsing, directives can be stored in a number of places:
- #cython-comments at the top of the file (stored in ModuleNode)
- Command-line arguments overriding these
- @cython.directivename decorators
- with cython.directivename: statements
This transform is responsible for interpreting these various sources
and store the directive in two ways:
- Set the directives attribute of the ModuleNode for global directives.
- Use a CompilerDirectivesNode to override directives for a subtree.
(The first one is primarily to not have to modify with the tree
structure, so that ModuleNode stay on top.)
The directives are stored in dictionaries from name to value in effect.
Each such dictionary is always filled in for all possible directives,
using default values where no value is given by the user.
The available directives are controlled in Options.py.
Note that we have to run this prior to analysis, and so some minor
duplication of functionality has to occur: We manually track cimports
and which names the "cython" module may have been imported to.
"""
unop_method_nodes = {
'typeof': ExprNodes.TypeofNode,
'operator.address': ExprNodes.AmpersandNode,
'operator.dereference': ExprNodes.DereferenceNode,
'operator.preincrement' : ExprNodes.inc_dec_constructor(True, '++'),
'operator.predecrement' : ExprNodes.inc_dec_constructor(True, '--'),
'operator.postincrement': ExprNodes.inc_dec_constructor(False, '++'),
'operator.postdecrement': ExprNodes.inc_dec_constructor(False, '--'),
# For backwards compatibility.
'address': ExprNodes.AmpersandNode,
}
binop_method_nodes = {
'operator.comma' : ExprNodes.c_binop_constructor(','),
}
special_methods = set(['declare', 'union', 'struct', 'typedef',
'sizeof', 'cast', 'pointer', 'compiled',
'NULL', 'fused_type', 'parallel'])
special_methods.update(unop_method_nodes)
valid_parallel_directives = set([
"parallel",
"prange",
"threadid",
#"threadsavailable",
])
def __init__(self, context, compilation_directive_defaults):
super(InterpretCompilerDirectives, self).__init__(context)
self.cython_module_names = set()
self.directive_names = {'staticmethod': 'staticmethod'}
self.parallel_directives = {}
directives = copy.deepcopy(Options.get_directive_defaults())
for key, value in compilation_directive_defaults.items():
directives[_unicode(key)] = copy.deepcopy(value)
self.directives = directives
def check_directive_scope(self, pos, directive, scope):
legal_scopes = Options.directive_scopes.get(directive, None)
if legal_scopes and scope not in legal_scopes:
self.context.nonfatal_error(PostParseError(pos, 'The %s compiler directive '
'is not allowed in %s scope' % (directive, scope)))
return False
else:
if directive not in Options.directive_types:
error(pos, "Invalid directive: '%s'." % (directive,))
return True
# Set up processing and handle the cython: comments.
def visit_ModuleNode(self, node):
for key in sorted(node.directive_comments):
if not self.check_directive_scope(node.pos, key, 'module'):
self.wrong_scope_error(node.pos, key, 'module')
del node.directive_comments[key]
self.module_scope = node.scope
self.directives.update(node.directive_comments)
node.directives = self.directives
node.parallel_directives = self.parallel_directives
self.visitchildren(node)
node.cython_module_names = self.cython_module_names
return node
# The following four functions track imports and cimports that
# begin with "cython"
def is_cython_directive(self, name):
return (name in Options.directive_types or
name in self.special_methods or
PyrexTypes.parse_basic_type(name))
def is_parallel_directive(self, full_name, pos):
"""
Checks to see if fullname (e.g. cython.parallel.prange) is a valid
parallel directive. If it is a star import it also updates the
parallel_directives.
"""
result = (full_name + ".").startswith("cython.parallel.")
if result:
directive = full_name.split('.')
if full_name == u"cython.parallel":
self.parallel_directives[u"parallel"] = u"cython.parallel"
elif full_name == u"cython.parallel.*":
for name in self.valid_parallel_directives:
self.parallel_directives[name] = u"cython.parallel.%s" % name
elif (len(directive) != 3 or
directive[-1] not in self.valid_parallel_directives):
error(pos, "No such directive: %s" % full_name)
self.module_scope.use_utility_code(
UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c"))
return result
def visit_CImportStatNode(self, node):
if node.module_name == u"cython":
self.cython_module_names.add(node.as_name or u"cython")
elif node.module_name.startswith(u"cython."):
if node.module_name.startswith(u"cython.parallel."):
error(node.pos, node.module_name + " is not a module")
if node.module_name == u"cython.parallel":
if node.as_name and node.as_name != u"cython":
self.parallel_directives[node.as_name] = node.module_name
else:
self.cython_module_names.add(u"cython")
self.parallel_directives[
u"cython.parallel"] = node.module_name
self.module_scope.use_utility_code(
UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c"))
elif node.as_name:
self.directive_names[node.as_name] = node.module_name[7:]
else:
self.cython_module_names.add(u"cython")
# if this cimport was a compiler directive, we don't
# want to leave the cimport node sitting in the tree
return None
return node
def visit_FromCImportStatNode(self, node):
if not node.relative_level and (
node.module_name == u"cython" or node.module_name.startswith(u"cython.")):
submodule = (node.module_name + u".")[7:]
newimp = []
for pos, name, as_name, kind in node.imported_names:
full_name = submodule + name
qualified_name = u"cython." + full_name
if self.is_parallel_directive(qualified_name, node.pos):
# from cython cimport parallel, or
# from cython.parallel cimport parallel, prange, ...
self.parallel_directives[as_name or name] = qualified_name
elif self.is_cython_directive(full_name):
self.directive_names[as_name or name] = full_name
if kind is not None:
self.context.nonfatal_error(PostParseError(pos,
"Compiler directive imports must be plain imports"))
else:
newimp.append((pos, name, as_name, kind))
if not newimp:
return None
node.imported_names = newimp
return node
def visit_FromImportStatNode(self, node):
if (node.module.module_name.value == u"cython") or \
node.module.module_name.value.startswith(u"cython."):
submodule = (node.module.module_name.value + u".")[7:]
newimp = []
for name, name_node in node.items:
full_name = submodule + name
qualified_name = u"cython." + full_name
if self.is_parallel_directive(qualified_name, node.pos):
self.parallel_directives[name_node.name] = qualified_name
elif self.is_cython_directive(full_name):
self.directive_names[name_node.name] = full_name
else:
newimp.append((name, name_node))
if not newimp:
return None
node.items = newimp
return node
def visit_SingleAssignmentNode(self, node):
if isinstance(node.rhs, ExprNodes.ImportNode):
module_name = node.rhs.module_name.value
is_parallel = (module_name + u".").startswith(u"cython.parallel.")
if module_name != u"cython" and not is_parallel:
return node
module_name = node.rhs.module_name.value
as_name = node.lhs.name
node = Nodes.CImportStatNode(node.pos,
module_name = module_name,
as_name = as_name)
node = self.visit_CImportStatNode(node)
else:
self.visitchildren(node)
return node
def visit_NameNode(self, node):
if node.name in self.cython_module_names:
node.is_cython_module = True
else:
node.cython_attribute = self.directive_names.get(node.name)
return node
def try_to_parse_directives(self, node):
# If node is the contents of an directive (in a with statement or
# decorator), returns a list of (directivename, value) pairs.
# Otherwise, returns None
if isinstance(node, ExprNodes.CallNode):
self.visit(node.function)
optname = node.function.as_cython_attribute()
if optname:
directivetype = Options.directive_types.get(optname)
if directivetype:
args, kwds = node.explicit_args_kwds()
directives = []
key_value_pairs = []
if kwds is not None and directivetype is not dict:
for keyvalue in kwds.key_value_pairs:
key, value = keyvalue
sub_optname = "%s.%s" % (optname, key.value)
if Options.directive_types.get(sub_optname):
directives.append(self.try_to_parse_directive(sub_optname, [value], None, keyvalue.pos))
else:
key_value_pairs.append(keyvalue)
if not key_value_pairs:
kwds = None
else:
kwds.key_value_pairs = key_value_pairs
if directives and not kwds and not args:
return directives
directives.append(self.try_to_parse_directive(optname, args, kwds, node.function.pos))
return directives
elif isinstance(node, (ExprNodes.AttributeNode, ExprNodes.NameNode)):
self.visit(node)
optname = node.as_cython_attribute()
if optname:
directivetype = Options.directive_types.get(optname)
if directivetype is bool:
return [(optname, True)]
elif directivetype is None:
return [(optname, None)]
else:
raise PostParseError(
node.pos, "The '%s' directive should be used as a function call." % optname)
return None
def try_to_parse_directive(self, optname, args, kwds, pos):
directivetype = Options.directive_types.get(optname)
if len(args) == 1 and isinstance(args[0], ExprNodes.NoneNode):
return optname, Options.get_directive_defaults()[optname]
elif directivetype is bool:
if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.BoolNode):
raise PostParseError(pos,
'The %s directive takes one compile-time boolean argument' % optname)
return (optname, args[0].value)
elif directivetype is int:
if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.IntNode):
raise PostParseError(pos,
'The %s directive takes one compile-time integer argument' % optname)
return (optname, int(args[0].value))
elif directivetype is str:
if kwds is not None or len(args) != 1 or not isinstance(
args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)):
raise PostParseError(pos,
'The %s directive takes one compile-time string argument' % optname)
return (optname, str(args[0].value))
elif directivetype is type:
if kwds is not None or len(args) != 1:
raise PostParseError(pos,
'The %s directive takes one type argument' % optname)
return (optname, args[0])
elif directivetype is dict:
if len(args) != 0:
raise PostParseError(pos,
'The %s directive takes no prepositional arguments' % optname)
return optname, dict([(key.value, value) for key, value in kwds.key_value_pairs])
elif directivetype is list:
if kwds and len(kwds) != 0:
raise PostParseError(pos,
'The %s directive takes no keyword arguments' % optname)
return optname, [ str(arg.value) for arg in args ]
elif callable(directivetype):
if kwds is not None or len(args) != 1 or not isinstance(
args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)):
raise PostParseError(pos,
'The %s directive takes one compile-time string argument' % optname)
return (optname, directivetype(optname, str(args[0].value)))
else:
assert False
def visit_with_directives(self, body, directives):
olddirectives = self.directives
newdirectives = copy.copy(olddirectives)
newdirectives.update(directives)
self.directives = newdirectives
assert isinstance(body, Nodes.StatListNode), body
retbody = self.visit_Node(body)
directive = Nodes.CompilerDirectivesNode(pos=retbody.pos, body=retbody,
directives=newdirectives)
self.directives = olddirectives
return directive
# Handle decorators
def visit_FuncDefNode(self, node):
directives = self._extract_directives(node, 'function')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_CVarDefNode(self, node):
directives = self._extract_directives(node, 'function')
if not directives:
return node
for name, value in directives.items():
if name == 'locals':
node.directive_locals = value
elif name not in ('final', 'staticmethod'):
self.context.nonfatal_error(PostParseError(
node.pos,
"Cdef functions can only take cython.locals(), "
"staticmethod, or final decorators, got %s." % name))
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_CClassDefNode(self, node):
directives = self._extract_directives(node, 'cclass')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_CppClassNode(self, node):
directives = self._extract_directives(node, 'cppclass')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_PyClassDefNode(self, node):
directives = self._extract_directives(node, 'class')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def _extract_directives(self, node, scope_name):
if not node.decorators:
return {}
# Split the decorators into two lists -- real decorators and directives
directives = []
realdecs = []
both = []
for dec in node.decorators:
new_directives = self.try_to_parse_directives(dec.decorator)
if new_directives is not None:
for directive in new_directives:
if self.check_directive_scope(node.pos, directive[0], scope_name):
name, value = directive
if self.directives.get(name, object()) != value:
directives.append(directive)
if directive[0] == 'staticmethod':
both.append(dec)
else:
realdecs.append(dec)
if realdecs and isinstance(node, (Nodes.CFuncDefNode, Nodes.CClassDefNode, Nodes.CVarDefNode)):
raise PostParseError(realdecs[0].pos, "Cdef functions/classes cannot take arbitrary decorators.")
else:
node.decorators = realdecs + both
# merge or override repeated directives
optdict = {}
directives.reverse() # Decorators coming first take precedence
for directive in directives:
name, value = directive
if name in optdict:
old_value = optdict[name]
# keywords and arg lists can be merged, everything
# else overrides completely
if isinstance(old_value, dict):
old_value.update(value)
elif isinstance(old_value, list):
old_value.extend(value)
else:
optdict[name] = value
else:
optdict[name] = value
return optdict
# Handle with statements
def visit_WithStatNode(self, node):
directive_dict = {}
for directive in self.try_to_parse_directives(node.manager) or []:
if directive is not None:
if node.target is not None:
self.context.nonfatal_error(
PostParseError(node.pos, "Compiler directive with statements cannot contain 'as'"))
else:
name, value = directive
if name in ('nogil', 'gil'):
# special case: in pure mode, "with nogil" spells "with cython.nogil"
node = Nodes.GILStatNode(node.pos, state = name, body = node.body)
return self.visit_Node(node)
if self.check_directive_scope(node.pos, name, 'with statement'):
directive_dict[name] = value
if directive_dict:
return self.visit_with_directives(node.body, directive_dict)
return self.visit_Node(node)
class ParallelRangeTransform(CythonTransform, SkipDeclarations):
"""
Transform cython.parallel stuff. The parallel_directives come from the
module node, set there by InterpretCompilerDirectives.
x = cython.parallel.threadavailable() -> ParallelThreadAvailableNode
with nogil, cython.parallel.parallel(): -> ParallelWithBlockNode
print cython.parallel.threadid() -> ParallelThreadIdNode
for i in cython.parallel.prange(...): -> ParallelRangeNode
...
"""
# a list of names, maps 'cython.parallel.prange' in the code to
# ['cython', 'parallel', 'prange']
parallel_directive = None
# Indicates whether a namenode in an expression is the cython module
namenode_is_cython_module = False
# Keep track of whether we are the context manager of a 'with' statement
in_context_manager_section = False
# One of 'prange' or 'with parallel'. This is used to disallow closely
# nested 'with parallel:' blocks
state = None
directive_to_node = {
u"cython.parallel.parallel": Nodes.ParallelWithBlockNode,
# u"cython.parallel.threadsavailable": ExprNodes.ParallelThreadsAvailableNode,
u"cython.parallel.threadid": ExprNodes.ParallelThreadIdNode,
u"cython.parallel.prange": Nodes.ParallelRangeNode,
}
def node_is_parallel_directive(self, node):
return node.name in self.parallel_directives or node.is_cython_module
def get_directive_class_node(self, node):
"""
Figure out which parallel directive was used and return the associated
Node class.
E.g. for a cython.parallel.prange() call we return ParallelRangeNode
"""
if self.namenode_is_cython_module:
directive = '.'.join(self.parallel_directive)
else:
directive = self.parallel_directives[self.parallel_directive[0]]
directive = '%s.%s' % (directive,
'.'.join(self.parallel_directive[1:]))
directive = directive.rstrip('.')
cls = self.directive_to_node.get(directive)
if cls is None and not (self.namenode_is_cython_module and
self.parallel_directive[0] != 'parallel'):
error(node.pos, "Invalid directive: %s" % directive)
self.namenode_is_cython_module = False
self.parallel_directive = None
return cls
def visit_ModuleNode(self, node):
"""
If any parallel directives were imported, copy them over and visit
the AST
"""
if node.parallel_directives:
self.parallel_directives = node.parallel_directives
return self.visit_Node(node)
# No parallel directives were imported, so they can't be used :)
return node
def visit_NameNode(self, node):
if self.node_is_parallel_directive(node):
self.parallel_directive = [node.name]
self.namenode_is_cython_module = node.is_cython_module
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
if self.parallel_directive:
self.parallel_directive.append(node.attribute)
return node
def visit_CallNode(self, node):
self.visit(node.function)
if not self.parallel_directive:
return node
# We are a parallel directive, replace this node with the
# corresponding ParallelSomethingSomething node
if isinstance(node, ExprNodes.GeneralCallNode):
args = node.positional_args.args
kwargs = node.keyword_args
else:
args = node.args
kwargs = {}
parallel_directive_class = self.get_directive_class_node(node)
if parallel_directive_class:
# Note: in case of a parallel() the body is set by
# visit_WithStatNode
node = parallel_directive_class(node.pos, args=args, kwargs=kwargs)
return node
def visit_WithStatNode(self, node):
"Rewrite with cython.parallel.parallel() blocks"
newnode = self.visit(node.manager)
if isinstance(newnode, Nodes.ParallelWithBlockNode):
if self.state == 'parallel with':
error(node.manager.pos,
"Nested parallel with blocks are disallowed")
self.state = 'parallel with'
body = self.visit(node.body)
self.state = None
newnode.body = body
return newnode
elif self.parallel_directive:
parallel_directive_class = self.get_directive_class_node(node)
if not parallel_directive_class:
# There was an error, stop here and now
return None
if parallel_directive_class is Nodes.ParallelWithBlockNode:
error(node.pos, "The parallel directive must be called")
return None
node.body = self.visit(node.body)
return node
def visit_ForInStatNode(self, node):
"Rewrite 'for i in cython.parallel.prange(...):'"
self.visit(node.iterator)
self.visit(node.target)
in_prange = isinstance(node.iterator.sequence,
Nodes.ParallelRangeNode)
previous_state = self.state
if in_prange:
# This will replace the entire ForInStatNode, so copy the
# attributes
parallel_range_node = node.iterator.sequence
parallel_range_node.target = node.target
parallel_range_node.body = node.body
parallel_range_node.else_clause = node.else_clause
node = parallel_range_node
if not isinstance(node.target, ExprNodes.NameNode):
error(node.target.pos,
"Can only iterate over an iteration variable")
self.state = 'prange'
self.visit(node.body)
self.state = previous_state
self.visit(node.else_clause)
return node
def visit(self, node):
"Visit a node that may be None"
if node is not None:
return super(ParallelRangeTransform, self).visit(node)
class WithTransform(CythonTransform, SkipDeclarations):
def visit_WithStatNode(self, node):
self.visitchildren(node, 'body')
pos = node.pos
is_async = node.is_async
body, target, manager = node.body, node.target, node.manager
node.enter_call = ExprNodes.SimpleCallNode(
pos, function=ExprNodes.AttributeNode(
pos, obj=ExprNodes.CloneNode(manager),
attribute=EncodedString('__aenter__' if is_async else '__enter__'),
is_special_lookup=True),
args=[],
is_temp=True)
if is_async:
node.enter_call = ExprNodes.AwaitExprNode(pos, arg=node.enter_call)
if target is not None:
body = Nodes.StatListNode(
pos, stats=[
Nodes.WithTargetAssignmentStatNode(
pos, lhs=target, with_node=node),
body])
excinfo_target = ExprNodes.TupleNode(pos, slow=True, args=[
ExprNodes.ExcValueNode(pos) for _ in range(3)])
except_clause = Nodes.ExceptClauseNode(
pos, body=Nodes.IfStatNode(
pos, if_clauses=[
Nodes.IfClauseNode(
pos, condition=ExprNodes.NotNode(
pos, operand=ExprNodes.WithExitCallNode(
pos, with_stat=node,
test_if_run=False,
args=excinfo_target,
await=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
body=Nodes.ReraiseStatNode(pos),
),
],
else_clause=None),
pattern=None,
target=None,
excinfo_target=excinfo_target,
)
node.body = Nodes.TryFinallyStatNode(
pos, body=Nodes.TryExceptStatNode(
pos, body=body,
except_clauses=[except_clause],
else_clause=None,
),
finally_clause=Nodes.ExprStatNode(
pos, expr=ExprNodes.WithExitCallNode(
pos, with_stat=node,
test_if_run=True,
args=ExprNodes.TupleNode(
pos, args=[ExprNodes.NoneNode(pos) for _ in range(3)]),
await=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
handle_error_case=False,
)
return node
def visit_ExprNode(self, node):
# With statements are never inside expressions.
return node
class DecoratorTransform(ScopeTrackingTransform, SkipDeclarations):
"""
Transforms method decorators in cdef classes into nested calls or properties.
Python-style decorator properties are transformed into a PropertyNode
with up to the three getter, setter and deleter DefNodes.
The functional style isn't supported yet.
"""
_properties = None
_map_property_attribute = {
'getter': '__get__',
'setter': '__set__',
'deleter': '__del__',
}.get
def visit_CClassDefNode(self, node):
if self._properties is None:
self._properties = []
self._properties.append({})
super(DecoratorTransform, self).visit_CClassDefNode(node)
self._properties.pop()
return node
def visit_PropertyNode(self, node):
# Low-level warning for other code until we can convert all our uses over.
level = 2 if isinstance(node.pos[0], str) else 0
warning(node.pos, "'property %s:' syntax is deprecated, use '@property'" % node.name, level)
return node
def visit_DefNode(self, node):
scope_type = self.scope_type
node = self.visit_FuncDefNode(node)
if scope_type != 'cclass' or not node.decorators:
return node
# transform @property decorators
properties = self._properties[-1]
for decorator_node in node.decorators[::-1]:
decorator = decorator_node.decorator
if decorator.is_name and decorator.name == 'property':
if len(node.decorators) > 1:
return self._reject_decorated_property(node, decorator_node)
name = node.name
node.name = '__get__'
node.decorators.remove(decorator_node)
stat_list = [node]
if name in properties:
prop = properties[name]
prop.pos = node.pos
prop.doc = node.doc
prop.body.stats = stat_list
return []
prop = Nodes.PropertyNode(node.pos, name=name)
prop.doc = node.doc
prop.body = Nodes.StatListNode(node.pos, stats=stat_list)
properties[name] = prop
return [prop]
elif decorator.is_attribute and decorator.obj.name in properties:
handler_name = self._map_property_attribute(decorator.attribute)
if handler_name:
assert decorator.obj.name == node.name
if len(node.decorators) > 1:
return self._reject_decorated_property(node, decorator_node)
return self._add_to_property(properties, node, handler_name, decorator_node)
# transform normal decorators
return self.chain_decorators(node, node.decorators, node.name)
@staticmethod
def _reject_decorated_property(node, decorator_node):
# restrict transformation to outermost decorator as wrapped properties will probably not work
for deco in node.decorators:
if deco != decorator_node:
error(deco.pos, "Property methods with additional decorators are not supported")
return node
@staticmethod
def _add_to_property(properties, node, name, decorator):
prop = properties[node.name]
node.name = name
node.decorators.remove(decorator)
stats = prop.body.stats
for i, stat in enumerate(stats):
if stat.name == name:
stats[i] = node
break
else:
stats.append(node)
return []
@staticmethod
def chain_decorators(node, decorators, name):
"""
Decorators are applied directly in DefNode and PyClassDefNode to avoid
reassignments to the function/class name - except for cdef class methods.
For those, the reassignment is required as methods are originally
defined in the PyMethodDef struct.
The IndirectionNode allows DefNode to override the decorator.
"""
decorator_result = ExprNodes.NameNode(node.pos, name=name)
for decorator in decorators[::-1]:
decorator_result = ExprNodes.SimpleCallNode(
decorator.pos,
function=decorator.decorator,
args=[decorator_result])
name_node = ExprNodes.NameNode(node.pos, name=name)
reassignment = Nodes.SingleAssignmentNode(
node.pos,
lhs=name_node,
rhs=decorator_result)
reassignment = Nodes.IndirectionNode([reassignment])
node.decorator_indirection = reassignment
return [node, reassignment]
class CnameDirectivesTransform(CythonTransform, SkipDeclarations):
"""
Only part of the CythonUtilityCode pipeline. Must be run before
DecoratorTransform in case this is a decorator for a cdef class.
It filters out @cname('my_cname') decorators and rewrites them to
CnameDecoratorNodes.
"""
def handle_function(self, node):
if not getattr(node, 'decorators', None):
return self.visit_Node(node)
for i, decorator in enumerate(node.decorators):
decorator = decorator.decorator
if (isinstance(decorator, ExprNodes.CallNode) and
decorator.function.is_name and
decorator.function.name == 'cname'):
args, kwargs = decorator.explicit_args_kwds()
if kwargs:
raise AssertionError(
"cname decorator does not take keyword arguments")
if len(args) != 1:
raise AssertionError(
"cname decorator takes exactly one argument")
if not (args[0].is_literal and
args[0].type == Builtin.str_type):
raise AssertionError(
"argument to cname decorator must be a string literal")
cname = args[0].compile_time_value(None)
del node.decorators[i]
node = Nodes.CnameDecoratorNode(pos=node.pos, node=node,
cname=cname)
break
return self.visit_Node(node)
visit_FuncDefNode = handle_function
visit_CClassDefNode = handle_function
visit_CEnumDefNode = handle_function
visit_CStructOrUnionDefNode = handle_function
class ForwardDeclareTypes(CythonTransform):
def visit_CompilerDirectivesNode(self, node):
env = self.module_scope
old = env.directives
env.directives = node.directives
self.visitchildren(node)
env.directives = old
return node
def visit_ModuleNode(self, node):
self.module_scope = node.scope
self.module_scope.directives = node.directives
self.visitchildren(node)
return node
def visit_CDefExternNode(self, node):
old_cinclude_flag = self.module_scope.in_cinclude
self.module_scope.in_cinclude = 1
self.visitchildren(node)
self.module_scope.in_cinclude = old_cinclude_flag
return node
def visit_CEnumDefNode(self, node):
node.declare(self.module_scope)
return node
def visit_CStructOrUnionDefNode(self, node):
if node.name not in self.module_scope.entries:
node.declare(self.module_scope)
return node
def visit_CClassDefNode(self, node):
if node.class_name not in self.module_scope.entries:
node.declare(self.module_scope)
return node
class AnalyseDeclarationsTransform(EnvTransform):
basic_property = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
def __set__(self, value):
ATTR = value
""", level='c_class', pipeline=[NormalizeTree(None)])
basic_pyobject_property = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
def __set__(self, value):
ATTR = value
def __del__(self):
ATTR = None
""", level='c_class', pipeline=[NormalizeTree(None)])
basic_property_ro = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
""", level='c_class', pipeline=[NormalizeTree(None)])
struct_or_union_wrapper = TreeFragment(u"""
cdef class NAME:
cdef TYPE value
def __init__(self, MEMBER=None):
cdef int count
count = 0
INIT_ASSIGNMENTS
if IS_UNION and count > 1:
raise ValueError, "At most one union member should be specified."
def __str__(self):
return STR_FORMAT % MEMBER_TUPLE
def __repr__(self):
return REPR_FORMAT % MEMBER_TUPLE
""", pipeline=[NormalizeTree(None)])
init_assignment = TreeFragment(u"""
if VALUE is not None:
ATTR = VALUE
count += 1
""", pipeline=[NormalizeTree(None)])
fused_function = None
in_lambda = 0
def __call__(self, root):
# needed to determine if a cdef var is declared after it's used.
self.seen_vars_stack = []
self.fused_error_funcs = set()
super_class = super(AnalyseDeclarationsTransform, self)
self._super_visit_FuncDefNode = super_class.visit_FuncDefNode
return super_class.__call__(root)
def visit_NameNode(self, node):
self.seen_vars_stack[-1].add(node.name)
return node
def visit_ModuleNode(self, node):
self.seen_vars_stack.append(set())
node.analyse_declarations(self.current_env())
self.visitchildren(node)
self.seen_vars_stack.pop()
return node
def visit_LambdaNode(self, node):
self.in_lambda += 1
node.analyse_declarations(self.current_env())
self.visitchildren(node)
self.in_lambda -= 1
return node
def visit_CClassDefNode(self, node):
node = self.visit_ClassDefNode(node)
if node.scope and node.scope.implemented and node.body:
stats = []
for entry in node.scope.var_entries:
if entry.needs_property:
property = self.create_Property(entry)
property.analyse_declarations(node.scope)
self.visit(property)
stats.append(property)
if stats:
node.body.stats += stats
return node
def _handle_fused_def_decorators(self, old_decorators, env, node):
"""
Create function calls to the decorators and reassignments to
the function.
"""
# Delete staticmethod and classmethod decorators, this is
# handled directly by the fused function object.
decorators = []
for decorator in old_decorators:
func = decorator.decorator
if (not func.is_name or
func.name not in ('staticmethod', 'classmethod') or
env.lookup_here(func.name)):
# not a static or classmethod
decorators.append(decorator)
if decorators:
transform = DecoratorTransform(self.context)
def_node = node.node
_, reassignments = transform.chain_decorators(
def_node, decorators, def_node.name)
reassignments.analyse_declarations(env)
node = [node, reassignments]
return node
def _handle_def(self, decorators, env, node):
"Handle def or cpdef fused functions"
# Create PyCFunction nodes for each specialization
node.stats.insert(0, node.py_func)
node.py_func = self.visit(node.py_func)
node.update_fused_defnode_entry(env)
pycfunc = ExprNodes.PyCFunctionNode.from_defnode(node.py_func, binding=True)
pycfunc = ExprNodes.ProxyNode(pycfunc.coerce_to_temp(env))
node.resulting_fused_function = pycfunc
# Create assignment node for our def function
node.fused_func_assignment = self._create_assignment(
node.py_func, ExprNodes.CloneNode(pycfunc), env)
if decorators:
node = self._handle_fused_def_decorators(decorators, env, node)
return node
def _create_fused_function(self, env, node):
"Create a fused function for a DefNode with fused arguments"
from . import FusedNode
if self.fused_function or self.in_lambda:
if self.fused_function not in self.fused_error_funcs:
if self.in_lambda:
error(node.pos, "Fused lambdas not allowed")
else:
error(node.pos, "Cannot nest fused functions")
self.fused_error_funcs.add(self.fused_function)
node.body = Nodes.PassStatNode(node.pos)
for arg in node.args:
if arg.type.is_fused:
arg.type = arg.type.get_fused_types()[0]
return node
decorators = getattr(node, 'decorators', None)
node = FusedNode.FusedCFuncDefNode(node, env)
self.fused_function = node
self.visitchildren(node)
self.fused_function = None
if node.py_func:
node = self._handle_def(decorators, env, node)
return node
def _handle_nogil_cleanup(self, lenv, node):
"Handle cleanup for 'with gil' blocks in nogil functions."
if lenv.nogil and lenv.has_with_gil_block:
# Acquire the GIL for cleanup in 'nogil' functions, by wrapping
# the entire function body in try/finally.
# The corresponding release will be taken care of by
# Nodes.FuncDefNode.generate_function_definitions()
node.body = Nodes.NogilTryFinallyStatNode(
node.body.pos,
body=node.body,
finally_clause=Nodes.EnsureGILNode(node.body.pos),
finally_except_clause=Nodes.EnsureGILNode(node.body.pos))
def _handle_fused(self, node):
if node.is_generator and node.has_fused_arguments:
node.has_fused_arguments = False
error(node.pos, "Fused generators not supported")
node.gbody = Nodes.StatListNode(node.pos,
stats=[],
body=Nodes.PassStatNode(node.pos))
return node.has_fused_arguments
def visit_FuncDefNode(self, node):
"""
Analyse a function and its body, as that hasn't happend yet. Also
analyse the directive_locals set by @cython.locals().
Then, if we are a function with fused arguments, replace the function
(after it has declared itself in the symbol table!) with a
FusedCFuncDefNode, and analyse its children (which are in turn normal
functions). If we're a normal function, just analyse the body of the
function.
"""
env = self.current_env()
self.seen_vars_stack.append(set())
lenv = node.local_scope
node.declare_arguments(lenv)
# @cython.locals(...)
for var, type_node in node.directive_locals.items():
if not lenv.lookup_here(var): # don't redeclare args
type = type_node.analyse_as_type(lenv)
if type:
lenv.declare_var(var, type, type_node.pos)
else:
error(type_node.pos, "Not a type")
if self._handle_fused(node):
node = self._create_fused_function(env, node)
else:
node.body.analyse_declarations(lenv)
self._handle_nogil_cleanup(lenv, node)
self._super_visit_FuncDefNode(node)
self.seen_vars_stack.pop()
return node
def visit_DefNode(self, node):
node = self.visit_FuncDefNode(node)
env = self.current_env()
if (not isinstance(node, Nodes.DefNode) or
node.fused_py_func or node.is_generator_body or
not node.needs_assignment_synthesis(env)):
return node
return [node, self._synthesize_assignment(node, env)]
def visit_GeneratorBodyDefNode(self, node):
return self.visit_FuncDefNode(node)
def _synthesize_assignment(self, node, env):
# Synthesize assignment node and put it right after defnode
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
if genv.is_closure_scope:
rhs = node.py_cfunc_node = ExprNodes.InnerFunctionNode(
node.pos, def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
code_object=ExprNodes.CodeObjectNode(node))
else:
binding = self.current_directives.get('binding')
rhs = ExprNodes.PyCFunctionNode.from_defnode(node, binding)
node.code_object = rhs.code_object
if env.is_py_class_scope:
rhs.binding = True
node.is_cyfunction = rhs.binding
return self._create_assignment(node, rhs, env)
def _create_assignment(self, def_node, rhs, env):
if def_node.decorators:
for decorator in def_node.decorators[::-1]:
rhs = ExprNodes.SimpleCallNode(
decorator.pos,
function = decorator.decorator,
args = [rhs])
def_node.decorators = None
assmt = Nodes.SingleAssignmentNode(
def_node.pos,
lhs=ExprNodes.NameNode(def_node.pos, name=def_node.name),
rhs=rhs)
assmt.analyse_declarations(env)
return assmt
def visit_ScopedExprNode(self, node):
env = self.current_env()
node.analyse_declarations(env)
# the node may or may not have a local scope
if node.has_local_scope:
self.seen_vars_stack.append(set(self.seen_vars_stack[-1]))
self.enter_scope(node, node.expr_scope)
node.analyse_scoped_declarations(node.expr_scope)
self.visitchildren(node)
self.exit_scope()
self.seen_vars_stack.pop()
else:
node.analyse_scoped_declarations(env)
self.visitchildren(node)
return node
def visit_TempResultFromStatNode(self, node):
self.visitchildren(node)
node.analyse_declarations(self.current_env())
return node
def visit_CppClassNode(self, node):
if node.visibility == 'extern':
return None
else:
return self.visit_ClassDefNode(node)
def visit_CStructOrUnionDefNode(self, node):
# Create a wrapper node if needed.
# We want to use the struct type information (so it can't happen
# before this phase) but also create new objects to be declared
# (so it can't happen later).
# Note that we don't return the original node, as it is
# never used after this phase.
if True: # private (default)
return None
self_value = ExprNodes.AttributeNode(
pos = node.pos,
obj = ExprNodes.NameNode(pos=node.pos, name=u"self"),
attribute = EncodedString(u"value"))
var_entries = node.entry.type.scope.var_entries
attributes = []
for entry in var_entries:
attributes.append(ExprNodes.AttributeNode(pos = entry.pos,
obj = self_value,
attribute = entry.name))
# __init__ assignments
init_assignments = []
for entry, attr in zip(var_entries, attributes):
# TODO: branch on visibility
init_assignments.append(self.init_assignment.substitute({
u"VALUE": ExprNodes.NameNode(entry.pos, name = entry.name),
u"ATTR": attr,
}, pos = entry.pos))
# create the class
str_format = u"%s(%s)" % (node.entry.type.name, ("%s, " * len(attributes))[:-2])
wrapper_class = self.struct_or_union_wrapper.substitute({
u"INIT_ASSIGNMENTS": Nodes.StatListNode(node.pos, stats = init_assignments),
u"IS_UNION": ExprNodes.BoolNode(node.pos, value = not node.entry.type.is_struct),
u"MEMBER_TUPLE": ExprNodes.TupleNode(node.pos, args=attributes),
u"STR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format)),
u"REPR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format.replace("%s", "%r"))),
}, pos = node.pos).stats[0]
wrapper_class.class_name = node.name
wrapper_class.shadow = True
class_body = wrapper_class.body.stats
# fix value type
assert isinstance(class_body[0].base_type, Nodes.CSimpleBaseTypeNode)
class_body[0].base_type.name = node.name
# fix __init__ arguments
init_method = class_body[1]
assert isinstance(init_method, Nodes.DefNode) and init_method.name == '__init__'
arg_template = init_method.args[1]
if not node.entry.type.is_struct:
arg_template.kw_only = True
del init_method.args[1]
for entry, attr in zip(var_entries, attributes):
arg = copy.deepcopy(arg_template)
arg.declarator.name = entry.name
init_method.args.append(arg)
# setters/getters
for entry, attr in zip(var_entries, attributes):
# TODO: branch on visibility
if entry.type.is_pyobject:
template = self.basic_pyobject_property
else:
template = self.basic_property
property = template.substitute({
u"ATTR": attr,
}, pos = entry.pos).stats[0]
property.name = entry.name
wrapper_class.body.stats.append(property)
wrapper_class.analyse_declarations(self.current_env())
return self.visit_CClassDefNode(wrapper_class)
# Some nodes are no longer needed after declaration
# analysis and can be dropped. The analysis was performed
# on these nodes in a seperate recursive process from the
# enclosing function or module, so we can simply drop them.
def visit_CDeclaratorNode(self, node):
# necessary to ensure that all CNameDeclaratorNodes are visited.
self.visitchildren(node)
return node
def visit_CTypeDefNode(self, node):
return node
def visit_CBaseTypeNode(self, node):
return None
def visit_CEnumDefNode(self, node):
if node.visibility == 'public':
return node
else:
return None
def visit_CNameDeclaratorNode(self, node):
if node.name in self.seen_vars_stack[-1]:
entry = self.current_env().lookup(node.name)
if (entry is None or entry.visibility != 'extern'
and not entry.scope.is_c_class_scope):
warning(node.pos, "cdef variable '%s' declared after it is used" % node.name, 2)
self.visitchildren(node)
return node
def visit_CVarDefNode(self, node):
# to ensure all CNameDeclaratorNodes are visited.
self.visitchildren(node)
return None
def visit_CnameDecoratorNode(self, node):
child_node = self.visit(node.node)
if not child_node:
return None
if type(child_node) is list: # Assignment synthesized
node.child_node = child_node[0]
return [node] + child_node[1:]
node.node = child_node
return node
def create_Property(self, entry):
if entry.visibility == 'public':
if entry.type.is_pyobject:
template = self.basic_pyobject_property
else:
template = self.basic_property
elif entry.visibility == 'readonly':
template = self.basic_property_ro
property = template.substitute({
u"ATTR": ExprNodes.AttributeNode(pos=entry.pos,
obj=ExprNodes.NameNode(pos=entry.pos, name="self"),
attribute=entry.name),
}, pos=entry.pos).stats[0]
property.name = entry.name
property.doc = entry.doc
return property
class CalculateQualifiedNamesTransform(EnvTransform):
"""
Calculate and store the '__qualname__' and the global
module name on some nodes.
"""
def visit_ModuleNode(self, node):
self.module_name = self.global_scope().qualified_name
self.qualified_name = []
_super = super(CalculateQualifiedNamesTransform, self)
self._super_visit_FuncDefNode = _super.visit_FuncDefNode
self._super_visit_ClassDefNode = _super.visit_ClassDefNode
self.visitchildren(node)
return node
def _set_qualname(self, node, name=None):
if name:
qualname = self.qualified_name[:]
qualname.append(name)
else:
qualname = self.qualified_name
node.qualname = EncodedString('.'.join(qualname))
node.module_name = self.module_name
def _append_entry(self, entry):
if entry.is_pyglobal and not entry.is_pyclass_attr:
self.qualified_name = [entry.name]
else:
self.qualified_name.append(entry.name)
def visit_ClassNode(self, node):
self._set_qualname(node, node.name)
self.visitchildren(node)
return node
def visit_PyClassNamespaceNode(self, node):
# class name was already added by parent node
self._set_qualname(node)
self.visitchildren(node)
return node
def visit_PyCFunctionNode(self, node):
self._set_qualname(node, node.def_node.name)
self.visitchildren(node)
return node
def visit_DefNode(self, node):
self._set_qualname(node, node.name)
return self.visit_FuncDefNode(node)
def visit_FuncDefNode(self, node):
orig_qualified_name = self.qualified_name[:]
if getattr(node, 'name', None) == '<lambda>':
self.qualified_name.append('<lambda>')
else:
self._append_entry(node.entry)
self.qualified_name.append('<locals>')
self._super_visit_FuncDefNode(node)
self.qualified_name = orig_qualified_name
return node
def visit_ClassDefNode(self, node):
orig_qualified_name = self.qualified_name[:]
entry = (getattr(node, 'entry', None) or # PyClass
self.current_env().lookup_here(node.name)) # CClass
self._append_entry(entry)
self._super_visit_ClassDefNode(node)
self.qualified_name = orig_qualified_name
return node
class AnalyseExpressionsTransform(CythonTransform):
def visit_ModuleNode(self, node):
node.scope.infer_types()
node.body = node.body.analyse_expressions(node.scope)
self.visitchildren(node)
return node
def visit_FuncDefNode(self, node):
node.local_scope.infer_types()
node.body = node.body.analyse_expressions(node.local_scope)
self.visitchildren(node)
return node
def visit_ScopedExprNode(self, node):
if node.has_local_scope:
node.expr_scope.infer_types()
node = node.analyse_scoped_expressions(node.expr_scope)
self.visitchildren(node)
return node
def visit_IndexNode(self, node):
"""
Replace index nodes used to specialize cdef functions with fused
argument types with the Attribute- or NameNode referring to the
function. We then need to copy over the specialization properties to
the attribute or name node.
Because the indexing might be a Python indexing operation on a fused
function, or (usually) a Cython indexing operation, we need to
re-analyse the types.
"""
self.visit_Node(node)
if node.is_fused_index and not node.type.is_error:
node = node.base
return node
class FindInvalidUseOfFusedTypes(CythonTransform):
def visit_FuncDefNode(self, node):
# Errors related to use in functions with fused args will already
# have been detected
if not node.has_fused_arguments:
if not node.is_generator_body and node.return_type.is_fused:
error(node.pos, "Return type is not specified as argument type")
else:
self.visitchildren(node)
return node
def visit_ExprNode(self, node):
if node.type and node.type.is_fused:
error(node.pos, "Invalid use of fused types, type cannot be specialized")
else:
self.visitchildren(node)
return node
class ExpandInplaceOperators(EnvTransform):
def visit_InPlaceAssignmentNode(self, node):
lhs = node.lhs
rhs = node.rhs
if lhs.type.is_cpp_class:
# No getting around this exact operator here.
return node
if isinstance(lhs, ExprNodes.BufferIndexNode):
# There is code to handle this case in InPlaceAssignmentNode
return node
env = self.current_env()
def side_effect_free_reference(node, setting=False):
if node.is_name:
return node, []
elif node.type.is_pyobject and not setting:
node = LetRefNode(node)
return node, [node]
elif node.is_subscript:
base, temps = side_effect_free_reference(node.base)
index = LetRefNode(node.index)
return ExprNodes.IndexNode(node.pos, base=base, index=index), temps + [index]
elif node.is_attribute:
obj, temps = side_effect_free_reference(node.obj)
return ExprNodes.AttributeNode(node.pos, obj=obj, attribute=node.attribute), temps
elif isinstance(node, ExprNodes.BufferIndexNode):
raise ValueError("Don't allow things like attributes of buffer indexing operations")
else:
node = LetRefNode(node)
return node, [node]
try:
lhs, let_ref_nodes = side_effect_free_reference(lhs, setting=True)
except ValueError:
return node
dup = lhs.__class__(**lhs.__dict__)
binop = ExprNodes.binop_node(node.pos,
operator = node.operator,
operand1 = dup,
operand2 = rhs,
inplace=True)
# Manually analyse types for new node.
lhs.analyse_target_types(env)
dup.analyse_types(env)
binop.analyse_operation(env)
node = Nodes.SingleAssignmentNode(
node.pos,
lhs = lhs,
rhs=binop.coerce_to(lhs.type, env))
# Use LetRefNode to avoid side effects.
let_ref_nodes.reverse()
for t in let_ref_nodes:
node = LetNode(t, node)
return node
def visit_ExprNode(self, node):
# In-place assignments can't happen within an expression.
return node
class AdjustDefByDirectives(CythonTransform, SkipDeclarations):
"""
Adjust function and class definitions by the decorator directives:
@cython.cfunc
@cython.cclass
@cython.ccall
@cython.inline
"""
def visit_ModuleNode(self, node):
self.directives = node.directives
self.in_py_class = False
self.visitchildren(node)
return node
def visit_CompilerDirectivesNode(self, node):
old_directives = self.directives
self.directives = node.directives
self.visitchildren(node)
self.directives = old_directives
return node
def visit_DefNode(self, node):
modifiers = []
if 'inline' in self.directives:
modifiers.append('inline')
if 'ccall' in self.directives:
node = node.as_cfunction(
overridable=True, returns=self.directives.get('returns'), modifiers=modifiers)
return self.visit(node)
if 'cfunc' in self.directives:
if self.in_py_class:
error(node.pos, "cfunc directive is not allowed here")
else:
node = node.as_cfunction(
overridable=False, returns=self.directives.get('returns'), modifiers=modifiers)
return self.visit(node)
if 'inline' in modifiers:
error(node.pos, "Python functions cannot be declared 'inline'")
self.visitchildren(node)
return node
def visit_PyClassDefNode(self, node):
if 'cclass' in self.directives:
node = node.as_cclass()
return self.visit(node)
else:
old_in_pyclass = self.in_py_class
self.in_py_class = True
self.visitchildren(node)
self.in_py_class = old_in_pyclass
return node
def visit_CClassDefNode(self, node):
old_in_pyclass = self.in_py_class
self.in_py_class = False
self.visitchildren(node)
self.in_py_class = old_in_pyclass
return node
class AlignFunctionDefinitions(CythonTransform):
"""
This class takes the signatures from a .pxd file and applies them to
the def methods in a .py file.
"""
def visit_ModuleNode(self, node):
self.scope = node.scope
self.directives = node.directives
self.imported_names = set() # hack, see visit_FromImportStatNode()
self.visitchildren(node)
return node
def visit_PyClassDefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def:
if pxd_def.is_cclass:
return self.visit_CClassDefNode(node.as_cclass(), pxd_def)
elif not pxd_def.scope or not pxd_def.scope.is_builtin_scope:
error(node.pos, "'%s' redeclared" % node.name)
if pxd_def.pos:
error(pxd_def.pos, "previous declaration here")
return None
return node
def visit_CClassDefNode(self, node, pxd_def=None):
if pxd_def is None:
pxd_def = self.scope.lookup(node.class_name)
if pxd_def:
if not pxd_def.defined_in_pxd:
return node
outer_scope = self.scope
self.scope = pxd_def.type.scope
self.visitchildren(node)
if pxd_def:
self.scope = outer_scope
return node
def visit_DefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def and (not pxd_def.scope or not pxd_def.scope.is_builtin_scope):
if not pxd_def.is_cfunction:
error(node.pos, "'%s' redeclared" % node.name)
if pxd_def.pos:
error(pxd_def.pos, "previous declaration here")
return None
node = node.as_cfunction(pxd_def)
elif (self.scope.is_module_scope and self.directives['auto_cpdef']
and not node.name in self.imported_names
and node.is_cdef_func_compatible()):
# FIXME: cpdef-ing should be done in analyse_declarations()
node = node.as_cfunction(scope=self.scope)
# Enable this when nested cdef functions are allowed.
# self.visitchildren(node)
return node
def visit_FromImportStatNode(self, node):
# hack to prevent conditional import fallback functions from
# being cdpef-ed (global Python variables currently conflict
# with imports)
if self.scope.is_module_scope:
for name, _ in node.items:
self.imported_names.add(name)
return node
def visit_ExprNode(self, node):
# ignore lambdas and everything else that appears in expressions
return node
class RemoveUnreachableCode(CythonTransform):
def visit_StatListNode(self, node):
if not self.current_directives['remove_unreachable']:
return node
self.visitchildren(node)
for idx, stat in enumerate(node.stats):
idx += 1
if stat.is_terminator:
if idx < len(node.stats):
if self.current_directives['warn.unreachable']:
warning(node.stats[idx].pos, "Unreachable code", 2)
node.stats = node.stats[:idx]
node.is_terminator = True
break
return node
def visit_IfClauseNode(self, node):
self.visitchildren(node)
if node.body.is_terminator:
node.is_terminator = True
return node
def visit_IfStatNode(self, node):
self.visitchildren(node)
if node.else_clause and node.else_clause.is_terminator:
for clause in node.if_clauses:
if not clause.is_terminator:
break
else:
node.is_terminator = True
return node
def visit_TryExceptStatNode(self, node):
self.visitchildren(node)
if node.body.is_terminator and node.else_clause:
if self.current_directives['warn.unreachable']:
warning(node.else_clause.pos, "Unreachable code", 2)
node.else_clause = None
return node
class YieldNodeCollector(TreeVisitor):
def __init__(self):
super(YieldNodeCollector, self).__init__()
self.yields = []
self.awaits = []
self.returns = []
self.has_return_value = False
def visit_Node(self, node):
self.visitchildren(node)
def visit_YieldExprNode(self, node):
self.yields.append(node)
self.visitchildren(node)
def visit_AwaitExprNode(self, node):
self.awaits.append(node)
self.visitchildren(node)
def visit_ReturnStatNode(self, node):
self.visitchildren(node)
if node.value:
self.has_return_value = True
self.returns.append(node)
def visit_ClassDefNode(self, node):
pass
def visit_FuncDefNode(self, node):
pass
def visit_LambdaNode(self, node):
pass
def visit_GeneratorExpressionNode(self, node):
pass
def visit_CArgDeclNode(self, node):
# do not look into annotations
# FIXME: support (yield) in default arguments (currently crashes)
pass
class MarkClosureVisitor(CythonTransform):
def visit_ModuleNode(self, node):
self.needs_closure = False
self.visitchildren(node)
return node
def visit_FuncDefNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
collector = YieldNodeCollector()
collector.visitchildren(node)
if node.is_async_def:
if collector.yields:
error(collector.yields[0].pos, "'yield' not allowed in async coroutines (use 'await')")
yields = collector.awaits
elif collector.yields:
if collector.awaits:
error(collector.yields[0].pos, "'await' not allowed in generators (use 'yield')")
yields = collector.yields
else:
return node
for i, yield_expr in enumerate(yields, 1):
yield_expr.label_num = i
for retnode in collector.returns:
retnode.in_generator = True
gbody = Nodes.GeneratorBodyDefNode(
pos=node.pos, name=node.name, body=node.body)
coroutine = (Nodes.AsyncDefNode if node.is_async_def else Nodes.GeneratorDefNode)(
pos=node.pos, name=node.name, args=node.args,
star_arg=node.star_arg, starstar_arg=node.starstar_arg,
doc=node.doc, decorators=node.decorators,
gbody=gbody, lambda_name=node.lambda_name)
return coroutine
def visit_CFuncDefNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
if node.needs_closure and node.overridable:
error(node.pos, "closures inside cpdef functions not yet supported")
return node
def visit_LambdaNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
return node
def visit_ClassDefNode(self, node):
self.visitchildren(node)
self.needs_closure = True
return node
class CreateClosureClasses(CythonTransform):
# Output closure classes in module scope for all functions
# that really need it.
def __init__(self, context):
super(CreateClosureClasses, self).__init__(context)
self.path = []
self.in_lambda = False
def visit_ModuleNode(self, node):
self.module_scope = node.scope
self.visitchildren(node)
return node
def find_entries_used_in_closures(self, node):
from_closure = []
in_closure = []
for name, entry in node.local_scope.entries.items():
if entry.from_closure:
from_closure.append((name, entry))
elif entry.in_closure:
in_closure.append((name, entry))
return from_closure, in_closure
def create_class_from_scope(self, node, target_module_scope, inner_node=None):
# move local variables into closure
if node.is_generator:
for entry in node.local_scope.entries.values():
if not entry.from_closure:
entry.in_closure = True
from_closure, in_closure = self.find_entries_used_in_closures(node)
in_closure.sort()
# Now from the begining
node.needs_closure = False
node.needs_outer_scope = False
func_scope = node.local_scope
cscope = node.entry.scope
while cscope.is_py_class_scope or cscope.is_c_class_scope:
cscope = cscope.outer_scope
if not from_closure and (self.path or inner_node):
if not inner_node:
if not node.py_cfunc_node:
raise InternalError("DefNode does not have assignment node")
inner_node = node.py_cfunc_node
inner_node.needs_self_code = False
node.needs_outer_scope = False
if node.is_generator:
pass
elif not in_closure and not from_closure:
return
elif not in_closure:
func_scope.is_passthrough = True
func_scope.scope_class = cscope.scope_class
node.needs_outer_scope = True
return
as_name = '%s_%s' % (
target_module_scope.next_id(Naming.closure_class_prefix),
node.entry.cname)
entry = target_module_scope.declare_c_class(
name=as_name, pos=node.pos, defining=True,
implementing=True)
entry.type.is_final_type = True
func_scope.scope_class = entry
class_scope = entry.type.scope
class_scope.is_internal = True
if Options.closure_freelist_size:
class_scope.directives['freelist'] = Options.closure_freelist_size
if from_closure:
assert cscope.is_closure_scope
class_scope.declare_var(pos=node.pos,
name=Naming.outer_scope_cname,
cname=Naming.outer_scope_cname,
type=cscope.scope_class.type,
is_cdef=True)
node.needs_outer_scope = True
for name, entry in in_closure:
closure_entry = class_scope.declare_var(pos=entry.pos,
name=entry.name,
cname=entry.cname,
type=entry.type,
is_cdef=True)
if entry.is_declared_generic:
closure_entry.is_declared_generic = 1
node.needs_closure = True
# Do it here because other classes are already checked
target_module_scope.check_c_class(func_scope.scope_class)
def visit_LambdaNode(self, node):
if not isinstance(node.def_node, Nodes.DefNode):
# fused function, an error has been previously issued
return node
was_in_lambda = self.in_lambda
self.in_lambda = True
self.create_class_from_scope(node.def_node, self.module_scope, node)
self.visitchildren(node)
self.in_lambda = was_in_lambda
return node
def visit_FuncDefNode(self, node):
if self.in_lambda:
self.visitchildren(node)
return node
if node.needs_closure or self.path:
self.create_class_from_scope(node, self.module_scope)
self.path.append(node)
self.visitchildren(node)
self.path.pop()
return node
def visit_GeneratorBodyDefNode(self, node):
self.visitchildren(node)
return node
def visit_CFuncDefNode(self, node):
if not node.overridable:
return self.visit_FuncDefNode(node)
else:
self.visitchildren(node)
return node
class GilCheck(VisitorTransform):
"""
Call `node.gil_check(env)` on each node to make sure we hold the
GIL when we need it. Raise an error when on Python operations
inside a `nogil` environment.
Additionally, raise exceptions for closely nested with gil or with nogil
statements. The latter would abort Python.
"""
def __call__(self, root):
self.env_stack = [root.scope]
self.nogil = False
# True for 'cdef func() nogil:' functions, as the GIL may be held while
# calling this function (thus contained 'nogil' blocks may be valid).
self.nogil_declarator_only = False
return super(GilCheck, self).__call__(root)
def visit_FuncDefNode(self, node):
self.env_stack.append(node.local_scope)
was_nogil = self.nogil
self.nogil = node.local_scope.nogil
if self.nogil:
self.nogil_declarator_only = True
if self.nogil and node.nogil_check:
node.nogil_check(node.local_scope)
self.visitchildren(node)
# This cannot be nested, so it doesn't need backup/restore
self.nogil_declarator_only = False
self.env_stack.pop()
self.nogil = was_nogil
return node
def visit_GILStatNode(self, node):
if self.nogil and node.nogil_check:
node.nogil_check()
was_nogil = self.nogil
self.nogil = (node.state == 'nogil')
if was_nogil == self.nogil and not self.nogil_declarator_only:
if not was_nogil:
error(node.pos, "Trying to acquire the GIL while it is "
"already held.")
else:
error(node.pos, "Trying to release the GIL while it was "
"previously released.")
if isinstance(node.finally_clause, Nodes.StatListNode):
# The finally clause of the GILStatNode is a GILExitNode,
# which is wrapped in a StatListNode. Just unpack that.
node.finally_clause, = node.finally_clause.stats
self.visitchildren(node)
self.nogil = was_nogil
return node
def visit_ParallelRangeNode(self, node):
if node.nogil:
node.nogil = False
node = Nodes.GILStatNode(node.pos, state='nogil', body=node)
return self.visit_GILStatNode(node)
if not self.nogil:
error(node.pos, "prange() can only be used without the GIL")
# Forget about any GIL-related errors that may occur in the body
return None
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
return node
def visit_ParallelWithBlockNode(self, node):
if not self.nogil:
error(node.pos, "The parallel section may only be used without "
"the GIL")
return None
if node.nogil_check:
# It does not currently implement this, but test for it anyway to
# avoid potential future surprises
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
return node
def visit_TryFinallyStatNode(self, node):
"""
Take care of try/finally statements in nogil code sections.
"""
if not self.nogil or isinstance(node, Nodes.GILStatNode):
return self.visit_Node(node)
node.nogil_check = None
node.is_try_finally_in_nogil = True
self.visitchildren(node)
return node
def visit_Node(self, node):
if self.env_stack and self.nogil and node.nogil_check:
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
node.in_nogil_context = self.nogil
return node
class TransformBuiltinMethods(EnvTransform):
"""
Replace Cython's own cython.* builtins by the corresponding tree nodes.
"""
def visit_SingleAssignmentNode(self, node):
if node.declaration_only:
return None
else:
self.visitchildren(node)
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
return self.visit_cython_attribute(node)
def visit_NameNode(self, node):
return self.visit_cython_attribute(node)
def visit_cython_attribute(self, node):
attribute = node.as_cython_attribute()
if attribute:
if attribute == u'compiled':
node = ExprNodes.BoolNode(node.pos, value=True)
elif attribute == u'__version__':
from .. import __version__ as version
node = ExprNodes.StringNode(node.pos, value=EncodedString(version))
elif attribute == u'NULL':
node = ExprNodes.NullNode(node.pos)
elif attribute in (u'set', u'frozenset', u'staticmethod'):
node = ExprNodes.NameNode(node.pos, name=EncodedString(attribute),
entry=self.current_env().builtin_scope().lookup_here(attribute))
elif PyrexTypes.parse_basic_type(attribute):
pass
elif self.context.cython_scope.lookup_qualified_name(attribute):
pass
else:
error(node.pos, u"'%s' not a valid cython attribute or is being used incorrectly" % attribute)
return node
def visit_ExecStatNode(self, node):
lenv = self.current_env()
self.visitchildren(node)
if len(node.args) == 1:
node.args.append(ExprNodes.GlobalsExprNode(node.pos))
if not lenv.is_module_scope:
node.args.append(
ExprNodes.LocalsExprNode(
node.pos, self.current_scope_node(), lenv))
return node
def _inject_locals(self, node, func_name):
# locals()/dir()/vars() builtins
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry:
# not the builtin
return node
pos = node.pos
if func_name in ('locals', 'vars'):
if func_name == 'locals' and len(node.args) > 0:
error(self.pos, "Builtin 'locals()' called with wrong number of args, expected 0, got %d"
% len(node.args))
return node
elif func_name == 'vars':
if len(node.args) > 1:
error(self.pos, "Builtin 'vars()' called with wrong number of args, expected 0-1, got %d"
% len(node.args))
if len(node.args) > 0:
return node # nothing to do
return ExprNodes.LocalsExprNode(pos, self.current_scope_node(), lenv)
else: # dir()
if len(node.args) > 1:
error(self.pos, "Builtin 'dir()' called with wrong number of args, expected 0-1, got %d"
% len(node.args))
if len(node.args) > 0:
# optimised in Builtin.py
return node
if lenv.is_py_class_scope or lenv.is_module_scope:
if lenv.is_py_class_scope:
pyclass = self.current_scope_node()
locals_dict = ExprNodes.CloneNode(pyclass.dict)
else:
locals_dict = ExprNodes.GlobalsExprNode(pos)
return ExprNodes.SortedDictKeysNode(locals_dict)
local_names = sorted(var.name for var in lenv.entries.values() if var.name)
items = [ExprNodes.IdentifierStringNode(pos, value=var)
for var in local_names]
return ExprNodes.ListNode(pos, args=items)
def visit_PrimaryCmpNode(self, node):
# special case: for in/not-in test, we do not need to sort locals()
self.visitchildren(node)
if node.operator in 'not_in': # in/not_in
if isinstance(node.operand2, ExprNodes.SortedDictKeysNode):
arg = node.operand2.arg
if isinstance(arg, ExprNodes.NoneCheckNode):
arg = arg.arg
node.operand2 = arg
return node
def visit_CascadedCmpNode(self, node):
return self.visit_PrimaryCmpNode(node)
def _inject_eval(self, node, func_name):
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry or len(node.args) != 1:
return node
# Inject globals and locals
node.args.append(ExprNodes.GlobalsExprNode(node.pos))
if not lenv.is_module_scope:
node.args.append(
ExprNodes.LocalsExprNode(
node.pos, self.current_scope_node(), lenv))
return node
def _inject_super(self, node, func_name):
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry or node.args:
return node
# Inject no-args super
def_node = self.current_scope_node()
if (not isinstance(def_node, Nodes.DefNode) or not def_node.args or
len(self.env_stack) < 2):
return node
class_node, class_scope = self.env_stack[-2]
if class_scope.is_py_class_scope:
def_node.requires_classobj = True
class_node.class_cell.is_active = True
node.args = [
ExprNodes.ClassCellNode(
node.pos, is_generator=def_node.is_generator),
ExprNodes.NameNode(node.pos, name=def_node.args[0].name)
]
elif class_scope.is_c_class_scope:
node.args = [
ExprNodes.NameNode(
node.pos, name=class_node.scope.name,
entry=class_node.entry),
ExprNodes.NameNode(node.pos, name=def_node.args[0].name)
]
return node
def visit_SimpleCallNode(self, node):
# cython.foo
function = node.function.as_cython_attribute()
if function:
if function in InterpretCompilerDirectives.unop_method_nodes:
if len(node.args) != 1:
error(node.function.pos, u"%s() takes exactly one argument" % function)
else:
node = InterpretCompilerDirectives.unop_method_nodes[function](
node.function.pos, operand=node.args[0])
elif function in InterpretCompilerDirectives.binop_method_nodes:
if len(node.args) != 2:
error(node.function.pos, u"%s() takes exactly two arguments" % function)
else:
node = InterpretCompilerDirectives.binop_method_nodes[function](
node.function.pos, operand1=node.args[0], operand2=node.args[1])
elif function == u'cast':
if len(node.args) != 2:
error(node.function.pos,
u"cast() takes exactly two arguments and an optional typecheck keyword")
else:
type = node.args[0].analyse_as_type(self.current_env())
if type:
node = ExprNodes.TypecastNode(
node.function.pos, type=type, operand=node.args[1], typecheck=False)
else:
error(node.args[0].pos, "Not a type")
elif function == u'sizeof':
if len(node.args) != 1:
error(node.function.pos, u"sizeof() takes exactly one argument")
else:
type = node.args[0].analyse_as_type(self.current_env())
if type:
node = ExprNodes.SizeofTypeNode(node.function.pos, arg_type=type)
else:
node = ExprNodes.SizeofVarNode(node.function.pos, operand=node.args[0])
elif function == 'cmod':
if len(node.args) != 2:
error(node.function.pos, u"cmod() takes exactly two arguments")
else:
node = ExprNodes.binop_node(node.function.pos, '%', node.args[0], node.args[1])
node.cdivision = True
elif function == 'cdiv':
if len(node.args) != 2:
error(node.function.pos, u"cdiv() takes exactly two arguments")
else:
node = ExprNodes.binop_node(node.function.pos, '/', node.args[0], node.args[1])
node.cdivision = True
elif function == u'set':
node.function = ExprNodes.NameNode(node.pos, name=EncodedString('set'))
elif function == u'staticmethod':
node.function = ExprNodes.NameNode(node.pos, name=EncodedString('staticmethod'))
elif self.context.cython_scope.lookup_qualified_name(function):
pass
else:
error(node.function.pos,
u"'%s' not a valid cython language construct" % function)
self.visitchildren(node)
if isinstance(node, ExprNodes.SimpleCallNode) and node.function.is_name:
func_name = node.function.name
if func_name in ('dir', 'locals', 'vars'):
return self._inject_locals(node, func_name)
if func_name == 'eval':
return self._inject_eval(node, func_name)
if func_name == 'super':
return self._inject_super(node, func_name)
return node
def visit_GeneralCallNode(self, node):
function = node.function.as_cython_attribute()
if function:
args = node.positional_args.args
kwargs = node.keyword_args.compile_time_value(None)
if function == u'cast':
if (len(args) != 2 or len(kwargs) > 1 or
(len(kwargs) == 1 and 'typecheck' not in kwargs)):
error(node.function.pos,
u"cast() takes exactly two arguments and an optional typecheck keyword")
else:
type = args[0].analyse_as_type(self.current_env())
if type:
typecheck = kwargs.get('typecheck', False)
node = ExprNodes.TypecastNode(
node.function.pos, type=type, operand=args[1], typecheck=typecheck)
else:
error(args[0].pos, "Not a type")
self.visitchildren(node)
return node
class ReplaceFusedTypeChecks(VisitorTransform):
"""
This is not a transform in the pipeline. It is invoked on the specific
versions of a cdef function with fused argument types. It filters out any
type branches that don't match. e.g.
if fused_t is mytype:
...
elif fused_t in other_fused_type:
...
"""
def __init__(self, local_scope):
super(ReplaceFusedTypeChecks, self).__init__()
self.local_scope = local_scope
# defer the import until now to avoid circular import time dependencies
from .Optimize import ConstantFolding
self.transform = ConstantFolding(reevaluate=True)
def visit_IfStatNode(self, node):
"""
Filters out any if clauses with false compile time type check
expression.
"""
self.visitchildren(node)
return self.transform(node)
def visit_PrimaryCmpNode(self, node):
type1 = node.operand1.analyse_as_type(self.local_scope)
type2 = node.operand2.analyse_as_type(self.local_scope)
if type1 and type2:
false_node = ExprNodes.BoolNode(node.pos, value=False)
true_node = ExprNodes.BoolNode(node.pos, value=True)
type1 = self.specialize_type(type1, node.operand1.pos)
op = node.operator
if op in ('is', 'is_not', '==', '!='):
type2 = self.specialize_type(type2, node.operand2.pos)
is_same = type1.same_as(type2)
eq = op in ('is', '==')
if (is_same and eq) or (not is_same and not eq):
return true_node
elif op in ('in', 'not_in'):
# We have to do an instance check directly, as operand2
# needs to be a fused type and not a type with a subtype
# that is fused. First unpack the typedef
if isinstance(type2, PyrexTypes.CTypedefType):
type2 = type2.typedef_base_type
if type1.is_fused:
error(node.operand1.pos, "Type is fused")
elif not type2.is_fused:
error(node.operand2.pos,
"Can only use 'in' or 'not in' on a fused type")
else:
types = PyrexTypes.get_specialized_types(type2)
for specialized_type in types:
if type1.same_as(specialized_type):
if op == 'in':
return true_node
else:
return false_node
if op == 'not_in':
return true_node
return false_node
return node
def specialize_type(self, type, pos):
try:
return type.specialize(self.local_scope.fused_to_specific)
except KeyError:
error(pos, "Type is not specific")
return type
def visit_Node(self, node):
self.visitchildren(node)
return node
class DebugTransform(CythonTransform):
"""
Write debug information for this Cython module.
"""
def __init__(self, context, options, result):
super(DebugTransform, self).__init__(context)
self.visited = set()
# our treebuilder and debug output writer
# (see Cython.Debugger.debug_output.CythonDebugWriter)
self.tb = self.context.gdb_debug_outputwriter
#self.c_output_file = options.output_file
self.c_output_file = result.c_file
# Closure support, basically treat nested functions as if the AST were
# never nested
self.nested_funcdefs = []
# tells visit_NameNode whether it should register step-into functions
self.register_stepinto = False
def visit_ModuleNode(self, node):
self.tb.module_name = node.full_module_name
attrs = dict(
module_name=node.full_module_name,
filename=node.pos[0].filename,
c_filename=self.c_output_file)
self.tb.start('Module', attrs)
# serialize functions
self.tb.start('Functions')
# First, serialize functions normally...
self.visitchildren(node)
# ... then, serialize nested functions
for nested_funcdef in self.nested_funcdefs:
self.visit_FuncDefNode(nested_funcdef)
self.register_stepinto = True
self.serialize_modulenode_as_function(node)
self.register_stepinto = False
self.tb.end('Functions')
# 2.3 compatibility. Serialize global variables
self.tb.start('Globals')
entries = {}
for k, v in node.scope.entries.items():
if (v.qualified_name not in self.visited and not
v.name.startswith('__pyx_') and not
v.type.is_cfunction and not
v.type.is_extension_type):
entries[k]= v
self.serialize_local_variables(entries)
self.tb.end('Globals')
# self.tb.end('Module') # end Module after the line number mapping in
# Cython.Compiler.ModuleNode.ModuleNode._serialize_lineno_map
return node
def visit_FuncDefNode(self, node):
self.visited.add(node.local_scope.qualified_name)
if getattr(node, 'is_wrapper', False):
return node
if self.register_stepinto:
self.nested_funcdefs.append(node)
return node
# node.entry.visibility = 'extern'
if node.py_func is None:
pf_cname = ''
else:
pf_cname = node.py_func.entry.func_cname
attrs = dict(
name=node.entry.name or getattr(node, 'name', '<unknown>'),
cname=node.entry.func_cname,
pf_cname=pf_cname,
qualified_name=node.local_scope.qualified_name,
lineno=str(node.pos[1]))
self.tb.start('Function', attrs=attrs)
self.tb.start('Locals')
self.serialize_local_variables(node.local_scope.entries)
self.tb.end('Locals')
self.tb.start('Arguments')
for arg in node.local_scope.arg_entries:
self.tb.start(arg.name)
self.tb.end(arg.name)
self.tb.end('Arguments')
self.tb.start('StepIntoFunctions')
self.register_stepinto = True
self.visitchildren(node)
self.register_stepinto = False
self.tb.end('StepIntoFunctions')
self.tb.end('Function')
return node
def visit_NameNode(self, node):
if (self.register_stepinto and
node.type is not None and
node.type.is_cfunction and
getattr(node, 'is_called', False) and
node.entry.func_cname is not None):
# don't check node.entry.in_cinclude, as 'cdef extern: ...'
# declared functions are not 'in_cinclude'.
# This means we will list called 'cdef' functions as
# "step into functions", but this is not an issue as they will be
# recognized as Cython functions anyway.
attrs = dict(name=node.entry.func_cname)
self.tb.start('StepIntoFunction', attrs=attrs)
self.tb.end('StepIntoFunction')
self.visitchildren(node)
return node
def serialize_modulenode_as_function(self, node):
"""
Serialize the module-level code as a function so the debugger will know
it's a "relevant frame" and it will know where to set the breakpoint
for 'break modulename'.
"""
name = node.full_module_name.rpartition('.')[-1]
cname_py2 = 'init' + name
cname_py3 = 'PyInit_' + name
py2_attrs = dict(
name=name,
cname=cname_py2,
pf_cname='',
# Ignore the qualified_name, breakpoints should be set using
# `cy break modulename:lineno` for module-level breakpoints.
qualified_name='',
lineno='1',
is_initmodule_function="True",
)
py3_attrs = dict(py2_attrs, cname=cname_py3)
self._serialize_modulenode_as_function(node, py2_attrs)
self._serialize_modulenode_as_function(node, py3_attrs)
def _serialize_modulenode_as_function(self, node, attrs):
self.tb.start('Function', attrs=attrs)
self.tb.start('Locals')
self.serialize_local_variables(node.scope.entries)
self.tb.end('Locals')
self.tb.start('Arguments')
self.tb.end('Arguments')
self.tb.start('StepIntoFunctions')
self.register_stepinto = True
self.visitchildren(node)
self.register_stepinto = False
self.tb.end('StepIntoFunctions')
self.tb.end('Function')
def serialize_local_variables(self, entries):
for entry in entries.values():
if not entry.cname:
# not a local variable
continue
if entry.type.is_pyobject:
vartype = 'PythonObject'
else:
vartype = 'CObject'
if entry.from_closure:
# We're dealing with a closure where a variable from an outer
# scope is accessed, get it from the scope object.
cname = '%s->%s' % (Naming.cur_scope_cname,
entry.outer_entry.cname)
qname = '%s.%s.%s' % (entry.scope.outer_scope.qualified_name,
entry.scope.name,
entry.name)
elif entry.in_closure:
cname = '%s->%s' % (Naming.cur_scope_cname,
entry.cname)
qname = entry.qualified_name
else:
cname = entry.cname
qname = entry.qualified_name
if not entry.pos:
# this happens for variables that are not in the user's code,
# e.g. for the global __builtins__, __doc__, etc. We can just
# set the lineno to 0 for those.
lineno = '0'
else:
lineno = str(entry.pos[1])
attrs = dict(
name=entry.name,
cname=cname,
qualified_name=qname,
type=vartype,
lineno=lineno)
self.tb.start('LocalVar', attrs)
self.tb.end('LocalVar')
|
c-blake/cython
|
Cython/Compiler/ParseTreeTransforms.py
|
Python
|
apache-2.0
| 121,925
|
[
"VisIt"
] |
db92a389f97e19f06eafa97b3f3b6c1a444117f24344d4b4ada5ea83b70c7020
|
import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerDrawerCloseEnvV2(SawyerXYZEnv):
_TARGET_RADIUS = 0.04
def __init__(self):
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.9, 0.0)
obj_high = (0.1, 0.9, 0.0)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_angle': np.array([0.3, ], dtype=np.float32),
'obj_init_pos': np.array([0., 0.9, 0.0], dtype=np.float32),
'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32),
}
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
goal_low = self.hand_low
goal_high = self.hand_high
self._random_reset_space = Box(
np.array(obj_low),
np.array(obj_high),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self.maxDist = 0.15
self.target_reward = 1000 * self.maxDist + 1000 * 2
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_drawer.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
(reward,
tcp_to_obj,
_,
target_to_obj,
object_grasped,
in_place) = self.compute_reward(action, obs)
info = {
'success': float(target_to_obj <= self.TARGET_RADIUS + 0.015),
'near_object': float(tcp_to_obj <= 0.01),
'grasp_success': 1.,
'grasp_reward': object_grasped,
'in_place_reward': in_place,
'obj_to_target': target_to_obj,
'unscaled_reward': reward,
}
return reward, info
def _get_pos_objects(self):
return self.get_body_com('drawer_link') + np.array([.0, -.16, .05])
def _get_quat_objects(self):
return np.zeros(4)
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
# Compute nightstand position
self.obj_init_pos = self._get_state_rand_vec() if self.random_init \
else self.init_config['obj_init_pos']
# Set mujoco body to computed position
self.sim.model.body_pos[self.model.body_name2id(
'drawer'
)] = self.obj_init_pos
# Set _target_pos to current drawer position (closed)
self._target_pos = self.obj_init_pos + np.array([.0, -.16, .09])
# Pull drawer out all the way and mark its starting position
self._set_obj_xyz(-self.maxDist)
self.obj_init_pos = self._get_pos_objects()
return self._get_obs()
def compute_reward(self, action, obs):
obj = obs[4:7]
tcp = self.tcp_center
target = self._target_pos.copy()
target_to_obj = (obj - target)
target_to_obj = np.linalg.norm(target_to_obj)
target_to_obj_init = (self.obj_init_pos - target)
target_to_obj_init = np.linalg.norm(target_to_obj_init)
in_place = reward_utils.tolerance(
target_to_obj,
bounds=(0, self.TARGET_RADIUS),
margin=abs(target_to_obj_init - self.TARGET_RADIUS),
sigmoid='long_tail',
)
handle_reach_radius = 0.005
tcp_to_obj = np.linalg.norm(obj - tcp)
tcp_to_obj_init = np.linalg.norm(self.obj_init_pos - self.init_tcp)
reach = reward_utils.tolerance(
tcp_to_obj,
bounds=(0, handle_reach_radius),
margin=abs(tcp_to_obj_init-handle_reach_radius),
sigmoid='gaussian',
)
gripper_closed = min(max(0, action[-1]), 1)
reach = reward_utils.hamacher_product(reach, gripper_closed)
tcp_opened = 0
object_grasped = reach
reward = reward_utils.hamacher_product(reach, in_place)
if target_to_obj <= self.TARGET_RADIUS + 0.015:
reward = 1.
reward *= 10
return (reward,
tcp_to_obj,
tcp_opened,
target_to_obj,
object_grasped,
in_place)
|
rlworkgroup/metaworld
|
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_drawer_close_v2.py
|
Python
|
mit
| 4,612
|
[
"Gaussian"
] |
006207fb67b2d8f3b7238f6fb28e33fd9977d84d12d308cc9d4d29f25373571c
|
# $Id$
#
# Copyright (c) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" utility functionality for fingerprinting sets of molecules
includes a command line app for working with fingerprints
and databases
Sample Usage:
python FingerprintMols.py -d data.gdb \
-t 'raw_dop_data' --smilesName="Structure" --idName="Mol_ID" \
--outTable="daylight_sig"
"""
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import MACCSkeys
from rdkit.ML.Cluster import Murtagh
from rdkit import DataStructs
import sys
from rdkit.six.moves import cPickle
_cvsVersion="$Id$"
idx1 = _cvsVersion.find(':')+1
idx2 = _cvsVersion.rfind('$')
__VERSION_STRING="%s"%(_cvsVersion[idx1:idx2])
def error(msg):
sys.stderr.write(msg)
def message(msg):
sys.stderr.write(msg)
def GetRDKFingerprint(mol):
""" uses default parameters """
details = FingerprinterDetails()
return apply(FingerprintMol,(mol,),details.__dict__)
def FoldFingerprintToTargetDensity(fp,**fpArgs):
nOn = fp.GetNumOnBits()
nTot = fp.GetNumBits()
while( float(nOn)/nTot < fpArgs['tgtDensity'] ):
if nTot / 2 > fpArgs['minSize']:
fp = DataStructs.FoldFingerprint(fp,2)
nOn = fp.GetNumOnBits()
nTot = fp.GetNumBits()
else:
break
return fp
def FingerprintMol(mol,
fingerprinter=Chem.RDKFingerprint,
**fpArgs):
if not fpArgs:
details = FingerprinterDetails()
fpArgs = details.__dict__
if fingerprinter != Chem.RDKFingerprint:
fp = fingerprinter(mol,**fpArgs)
fp = FoldFingerprintToTargetDensity(fp,**fpArgs)
else:
fp = fingerprinter(mol,fpArgs['minPath'],fpArgs['maxPath'],
fpArgs['fpSize'],fpArgs['bitsPerHash'],
fpArgs['useHs'],fpArgs['tgtDensity'],
fpArgs['minSize'])
return fp
def FingerprintsFromSmiles(dataSource,idCol,smiCol,
fingerprinter=Chem.RDKFingerprint,
reportFreq=10,maxMols=-1,
**fpArgs):
""" fpArgs are passed as keyword arguments to the fingerprinter
Returns a list of 2-tuples: (id,fp)
"""
res = []
nDone = 0
for entry in dataSource:
id,smi = str(entry[idCol]),str(entry[smiCol])
try:
mol = Chem.MolFromSmiles(smi)
except:
mol = None
if mol:
fp = FingerprintMol(mol,fingerprinter,**fpArgs)
res.append((id,fp))
nDone += 1
if reportFreq>0 and not nDone % reportFreq:
message('Done %d molecules\n'%(nDone))
if maxMols > 0 and nDone >= maxMols:
break
else:
error('Problems parsing SMILES: %s\n'%smi)
return res
def FingerprintsFromMols(mols,
fingerprinter=Chem.RDKFingerprint,
reportFreq=10,maxMols=-1,
**fpArgs):
""" fpArgs are passed as keyword arguments to the fingerprinter
Returns a list of 2-tuples: (id,fp)
"""
res = []
nDone = 0
for id,mol in mols:
if mol:
fp = FingerprintMol(mol,fingerprinter,**fpArgs)
res.append((id,fp))
nDone += 1
if reportFreq>0 and not nDone % reportFreq:
message('Done %d molecules\n'%(nDone))
if maxMols > 0 and nDone >= maxMols:
break
else:
error('Problems parsing SMILES: %s\n'%smi)
return res
def FingerprintsFromPickles(dataSource,idCol,pklCol,
fingerprinter=Chem.RDKFingerprint,
reportFreq=10,maxMols=-1,
**fpArgs):
""" fpArgs are passed as keyword arguments to the fingerprinter
Returns a list of 2-tuples: (id,fp)
"""
res = []
nDone = 0
for entry in dataSource:
id,pkl = str(entry[idCol]),str(entry[pklCol])
try:
mol = Chem.Mol(pkl)
except:
mol = None
if mol:
fp = FingerprintMol(mol,fingerprinter,**fpArgs)
res.append((id,fp))
nDone += 1
if reportFreq>0 and not nDone % reportFreq:
message('Done %d molecules\n'%(nDone))
if maxMols > 0 and nDone >= maxMols:
break
else:
error('Problems parsing pickle for id: %s\n'%id)
return res
def FingerprintsFromDetails(details,reportFreq=10):
data = None
if details.dbName and details.tableName:
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.Dbase import DbInfo
from rdkit.ML.Data import DataUtils
try:
conn = DbConnect(details.dbName,details.tableName)
except:
import traceback
error('Problems establishing connection to database: %s|%s\n'%(details.dbName,
details.tableName))
traceback.print_exc()
if not details.idName:
details.idName=DbInfo.GetColumnNames(details.dbName,details.tableName)[0]
dataSet = DataUtils.DBToData(details.dbName,details.tableName,
what='%s,%s'%(details.idName,details.smilesName))
idCol = 0
smiCol = 1
elif details.inFileName and details.useSmiles:
from rdkit.ML.Data import DataUtils
conn = None
if not details.idName:
details.idName='ID'
try:
dataSet = DataUtils.TextFileToData(details.inFileName,
onlyCols=[details.idName,details.smilesName])
except IOError:
import traceback
error('Problems reading from file %s\n'%(details.inFileName))
traceback.print_exc()
idCol = 0
smiCol = 1
elif details.inFileName and details.useSD:
conn = None
dataset=None
if not details.idName:
details.idName='ID'
dataSet = []
try:
s = Chem.SDMolSupplier(details.inFileName)
except:
import traceback
error('Problems reading from file %s\n'%(details.inFileName))
traceback.print_exc()
else:
while 1:
try:
m = s.next()
except StopIteration:
break
if m:
dataSet.append(m)
if reportFreq>0 and not len(dataSet) % reportFreq:
message('Read %d molecules\n'%(len(dataSet)))
if details.maxMols > 0 and len(dataSet) >= details.maxMols:
break
for i,mol in enumerate(dataSet):
if mol.HasProp(details.idName):
nm = mol.GetProp(details.idName)
else:
nm = mol.GetProp('_Name')
dataSet[i] = (nm,mol)
else:
dataSet = None
fps = None
if dataSet and not details.useSD:
data = dataSet.GetNamedData()
if not details.molPklName:
fps = apply(FingerprintsFromSmiles,(data,idCol,smiCol),
details.__dict__)
else:
fps = apply(FingerprintsFromPickles,(data,idCol,smiCol),
details.__dict__)
elif dataSet and details.useSD:
fps = apply(FingerprintsFromMols,(dataSet,),details.__dict__)
if fps:
if details.outFileName:
outF = open(details.outFileName,'wb+')
for i in range(len(fps)):
cPickle.dump(fps[i],outF)
outF.close()
dbName = details.outDbName or details.dbName
if details.outTableName and dbName:
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.Dbase import DbInfo,DbUtils,DbModule
conn = DbConnect(dbName)
#
# We don't have a db open already, so we'll need to figure out
# the types of our columns...
#
colTypes = DbUtils.TypeFinder(data,len(data),len(data[0]))
typeStrs = DbUtils.GetTypeStrings([details.idName,details.smilesName],colTypes,
keyCol=details.idName)
cols = '%s, %s %s'%(typeStrs[0],details.fpColName,DbModule.binaryTypeName)
# FIX: we should really check to see if the table
# is already there and, if so, add the appropriate
# column.
#
# create the new table
#
if details.replaceTable or \
details.outTableName.upper() not in [x.upper() for x in conn.GetTableNames()]:
conn.AddTable(details.outTableName,cols)
#
# And add the data
#
for id,fp in fps:
tpl = id,DbModule.binaryHolder(fp.ToBinary())
conn.InsertData(details.outTableName,tpl)
conn.Commit()
return fps
# ------------------------------------------------
#
# Command line parsing stuff
#
# ------------------------------------------------
class FingerprinterDetails(object):
""" class for storing the details of a fingerprinting run,
generates sensible defaults on construction
"""
def __init__(self):
self._fingerprinterInit()
self._screenerInit()
self._clusterInit()
def _fingerprinterInit(self):
self.fingerprinter = Chem.RDKFingerprint
self.fpColName="AutoFragmentFP"
self.idName=''
self.dbName=''
self.outDbName=''
self.tableName=''
self.minSize=64
self.fpSize=2048
self.tgtDensity=0.3
self.minPath=1
self.maxPath=7
self.discrimHash=0
self.useHs=0
self.useValence=0
self.bitsPerHash=2
self.smilesName='SMILES'
self.maxMols=-1
self.outFileName=''
self.outTableName=''
self.inFileName=''
self.replaceTable=True
self.molPklName=''
self.useSmiles=True
self.useSD=False
def _screenerInit(self):
self.metric = DataStructs.TanimotoSimilarity
self.doScreen=''
self.topN=10
self.screenThresh=0.75
self.doThreshold=0
self.smilesTableName=''
self.probeSmiles=''
self.probeMol=None
self.noPickle=0
def _clusterInit(self):
self.clusterAlgo = Murtagh.WARDS
self.actTableName = ''
self.actName = ''
def GetMetricName(self):
if self.metric == DataStructs.TanimotoSimilarity:
return 'Tanimoto'
elif self.metric == DataStructs.DiceSimilarity:
return 'Dice'
elif self.metric == DataStructs.CosineSimilarity:
return 'Cosine'
elif self.metric:
return self.metric
else:
return 'Unknown'
def SetMetricFromName(self,name):
name = name.upper()
if name=="TANIMOTO":
self.metric = DataStructs.TanimotoSimilarity
elif name=="DICE":
self.metric = DataStructs.DiceSimilarity
elif name=="COSINE":
self.metric = DataStructs.CosineSimilarity
def Usage():
""" prints a usage string and exits
"""
print(_usageDoc)
sys.exit(-1)
_usageDoc="""
Usage: FingerprintMols.py [args] <fName>
If <fName> is provided and no tableName is specified (see below),
data will be read from the text file <fName>. Text files delimited
with either commas (extension .csv) or tabs (extension .txt) are
supported.
Command line arguments are:
- -d _dbName_: set the name of the database from which
to pull input molecule information. If output is
going to a database, this will also be used for that
unless the --outDbName option is used.
- -t _tableName_: set the name of the database table
from which to pull input molecule information
- --smilesName=val: sets the name of the SMILES column
in the input database. Default is *SMILES*.
- --useSD: Assume that the input file is an SD file, not a SMILES
table.
- --idName=val: sets the name of the id column in the input
database. Defaults to be the name of the first db column
(or *ID* for text files).
- -o _outFileName_: name of the output file (output will
be a pickle file with one label,fingerprint entry for each
molecule).
- --outTable=val: name of the output db table used to store
fingerprints. If this table already exists, it will be
replaced.
- --outDbName: name of output database, if it's being used.
Defaults to be the same as the input db.
- --fpColName=val: name to use for the column which stores
fingerprints (in pickled format) in the output db table.
Default is *AutoFragmentFP*
- --maxSize=val: base size of the fingerprints to be generated
Default is *2048*
- --minSize=val: minimum size of the fingerprints to be generated
(limits the amount of folding that happens). Default is *64*
- --density=val: target bit density in the fingerprint. The
fingerprint will be folded until this density is
reached. Default is *0.3*
- --minPath=val: minimum path length to be included in
fragment-based fingerprints. Default is *1*.
- --maxPath=val: maximum path length to be included in
fragment-based fingerprints. Default is *7*.
- --nBitsPerHash: number of bits to be set in the output
fingerprint for each fragment. Default is *2*.
- --discrim: use of path-based discriminators to hash bits.
Default is *false*.
- -V: include valence information in the fingerprints
Default is *false*.
- -H: include Hs in the fingerprint
Default is *false*.
- --maxMols=val: sets the maximum number of molecules to be
fingerprinted.
- --useMACCS: use the public MACCS keys to do the fingerprinting
(instead of a daylight-type fingerprint)
"""
def ParseArgs(details=None):
""" parses the command line arguments and returns a
_FingerprinterDetails_ instance with the results.
**Note**:
- If you make modifications here, please update the global
_usageDoc string so the Usage message is up to date.
- This routine is used by both the fingerprinter, the clusterer and the
screener; not all arguments make sense for all applications.
"""
import sys,getopt
try:
args = sys.argv[1:]
except:
Usage()
try:
args,extras = getopt.getopt(args,'HVs:d:t:o:h',
[
'minSize=','maxSize=',
'density=',
'minPath=','maxPath=',
'bitsPerHash=',
'smilesName=',
'molPkl=',
'useSD',
'idName=',
'discrim',
'outTable=',
'outDbName=',
'fpColName=',
'maxMols=',
'useMACCS',
'keepTable',
# SCREENING:
'smilesTable=',
'doScreen=',
'topN=',
'thresh=',
'smiles=',
'dice',
'cosine',
# CLUSTERING:
'actTable=',
'actName=',
'SLINK',
'CLINK',
'UPGMA',
])
except:
import traceback
traceback.print_exc()
Usage()
if details is None:
details = FingerprinterDetails()
if len(extras):
details.inFileName=extras[0]
for arg,val in args:
if arg=='-H':
details.useHs=1
elif arg=='-V':
details.useValence=1
elif arg=='-d':
details.dbName = val
elif arg=='-t':
details.tableName = val
elif arg=='-o':
details.outFileName = val
elif arg=='--minSize':
details.minSize= int(val)
elif arg=='--maxSize':
details.fpSize= int(val)
elif arg=='--density':
details.tgtDensity = float(val)
elif arg=='--outTable':
details.outTableName = val
elif arg=='--outDbName':
details.outDbName = val
elif arg=='--fpColName':
details.fpColName = val
elif arg=='--minPath':
details.minPath= int(val)
elif arg=='--maxPath':
details.maxPath= int(val)
elif arg=='--nBitsPerHash':
details.bitsPerHash= int(val)
elif arg=='--discrim':
details.discrimHash=1
elif arg=='--smilesName':
details.smilesName = val
elif arg=='--molPkl':
details.molPklName = val
elif arg=='--useSD':
details.useSmiles=False
details.useSD=True
elif arg=='--idName':
details.idName = val
elif arg=='--maxMols':
details.maxMols = int(val)
elif arg=='--useMACCS':
details.fingerprinter = MACCSkeys.GenMACCSKeys
elif arg=='--keepTable':
details.replaceTable=False
# SCREENER:
elif arg=='--smilesTable':
details.smilesTableName=val;
elif arg=='--topN':
details.doThreshold=0
details.topN=int(val)
elif arg=='--thresh':
details.doThreshold=1
details.screenThresh=float(val)
elif arg=='--smiles':
details.probeSmiles=val;
elif arg=='--dice':
details.metric = DataStructs.DiceSimilarity
elif arg=='--cosine':
details.metric = DataStructs.CosineSimilarity
# CLUSTERS:
elif arg=='--SLINK':
details.clusterAlgo = Murtagh.SLINK
elif arg=='--CLINK':
details.clusterAlgo = Murtagh.CLINK
elif arg=='--UPGMA':
details.clusterAlgo = Murtagh.UPGMA
elif arg=='--actTable':
details.actTableName = val
elif arg=='--actName':
details.actName = val
elif arg=='-h':
Usage()
return details
if __name__ == '__main__':
message("This is FingerprintMols version %s\n\n"%(__VERSION_STRING))
details = ParseArgs()
FingerprintsFromDetails(details)
|
soerendip42/rdkit
|
rdkit/Chem/Fingerprints/FingerprintMols.py
|
Python
|
bsd-3-clause
| 17,794
|
[
"RDKit"
] |
2ca81a67d8dc015ce500c2afec0fec497ca46afa2772217aeb17dd8131b02ea8
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Buganizer tests for yapf.reformatter."""
import textwrap
import unittest
from yapf.yapflib import reformatter
from yapf.yapflib import style
from yapftests import yapf_test_helper
class BuganizerFixes(yapf_test_helper.YAPFTest):
@classmethod
def setUpClass(cls):
style.SetGlobalStyle(style.CreateChromiumStyle())
def testB77923341(self):
code = """\
def f():
if (aaaaaaaaaaaaaa.bbbbbbbbbbbb.ccccc <= 0 and # pytype: disable=attribute-error
ddddddddddd.eeeeeeeee == constants.FFFFFFFFFFFFFF):
raise "yo"
"""
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB77329955(self):
code = """\
class _():
@parameterized.named_parameters(
('ReadyExpiredSuccess', True, True, True, None, None),
('SpannerUpdateFails', True, False, True, None, None),
('ReadyNotExpired', False, True, True, True, None),
# ('ReadyNotExpiredNotHealthy', False, True, True, False, True),
# ('ReadyNotExpiredNotHealthyErrorFails', False, True, True, False, False
# ('ReadyNotExpiredNotHealthyUpdateFails', False, False, True, False, True
)
def _():
pass
"""
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB65197969(self):
unformatted_code = """\
class _():
def _():
return timedelta(seconds=max(float(time_scale), small_interval) *
1.41 ** min(num_attempts, 9))
"""
expected_formatted_code = """\
class _():
def _():
return timedelta(
seconds=max(float(time_scale), small_interval) *
1.41**min(num_attempts, 9))
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB65546221(self):
unformatted_code = """\
SUPPORTED_PLATFORMS = (
"centos-6",
"centos-7",
"ubuntu-1204-precise",
"ubuntu-1404-trusty",
"ubuntu-1604-xenial",
"debian-7-wheezy",
"debian-8-jessie",
"debian-9-stretch",)
"""
expected_formatted_code = """\
SUPPORTED_PLATFORMS = (
"centos-6",
"centos-7",
"ubuntu-1204-precise",
"ubuntu-1404-trusty",
"ubuntu-1604-xenial",
"debian-7-wheezy",
"debian-8-jessie",
"debian-9-stretch",
)
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB30500455(self):
unformatted_code = """\
INITIAL_SYMTAB = dict([(name, 'exception#' + name) for name in INITIAL_EXCEPTIONS
] * [(name, 'type#' + name) for name in INITIAL_TYPES] + [
(name, 'function#' + name) for name in INITIAL_FUNCTIONS
] + [(name, 'const#' + name) for name in INITIAL_CONSTS])
"""
expected_formatted_code = """\
INITIAL_SYMTAB = dict(
[(name, 'exception#' + name) for name in INITIAL_EXCEPTIONS] *
[(name, 'type#' + name) for name in INITIAL_TYPES] +
[(name, 'function#' + name) for name in INITIAL_FUNCTIONS] +
[(name, 'const#' + name) for name in INITIAL_CONSTS])
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB38343525(self):
code = """\
# This does foo.
@arg.String('some_path_to_a_file', required=True)
# This does bar.
@arg.String('some_path_to_a_file', required=True)
def f():
print 1
"""
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB37099651(self):
unformatted_code = """\
_MEMCACHE = lazy.MakeLazy(
# pylint: disable=g-long-lambda
lambda: function.call.mem.clients(FLAGS.some_flag_thingy, default_namespace=_LAZY_MEM_NAMESPACE, allow_pickle=True)
# pylint: enable=g-long-lambda
)
"""
expected_formatted_code = """\
_MEMCACHE = lazy.MakeLazy(
# pylint: disable=g-long-lambda
lambda: function.call.mem.clients(
FLAGS.some_flag_thingy,
default_namespace=_LAZY_MEM_NAMESPACE,
allow_pickle=True)
# pylint: enable=g-long-lambda
)
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB33228502(self):
unformatted_code = """\
def _():
success_rate_stream_table = module.Precompute(
query_function=module.DefineQueryFunction(
name='Response error ratio',
expression=((m.Fetch(
m.Raw('monarch.BorgTask',
'/corp/travel/trips2/dispatcher/email/response'),
{'borg_job': module_config.job, 'metric:response_type': 'SUCCESS'}),
m.Fetch(m.Raw('monarch.BorgTask', '/corp/travel/trips2/dispatcher/email/response'), {'borg_job': module_config.job}))
| m.Window(m.Delta('1h'))
| m.Join('successes', 'total')
| m.Point(m.VAL['successes'] / m.VAL['total']))))
"""
expected_formatted_code = """\
def _():
success_rate_stream_table = module.Precompute(
query_function=module.DefineQueryFunction(
name='Response error ratio',
expression=(
(m.Fetch(
m.Raw('monarch.BorgTask',
'/corp/travel/trips2/dispatcher/email/response'), {
'borg_job': module_config.job,
'metric:response_type': 'SUCCESS'
}),
m.Fetch(
m.Raw('monarch.BorgTask',
'/corp/travel/trips2/dispatcher/email/response'),
{'borg_job': module_config.job}))
| m.Window(m.Delta('1h'))
| m.Join('successes', 'total')
| m.Point(m.VAL['successes'] / m.VAL['total']))))
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB30394228(self):
code = """\
class _():
def _(self):
return some.randome.function.calling(
wf, None, alert.Format(alert.subject, alert=alert, threshold=threshold),
alert.Format(alert.body, alert=alert, threshold=threshold),
alert.html_formatting)
"""
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB65246454(self):
unformatted_code = """\
class _():
def _(self):
self.assertEqual({i.id
for i in successful_instances},
{i.id
for i in self._statuses.successful_instances})
"""
expected_formatted_code = """\
class _():
def _(self):
self.assertEqual({i.id for i in successful_instances},
{i.id for i in self._statuses.successful_instances})
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB67935450(self):
unformatted_code = """\
def _():
return (
(Gauge(
metric='aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
group_by=group_by + ['metric:process_name'],
metric_filter={'metric:process_name': process_name_re}),
Gauge(
metric='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb',
group_by=group_by + ['metric:process_name'],
metric_filter={'metric:process_name': process_name_re}))
| expr.Join(
left_name='start', left_default=0, right_name='end', right_default=0)
| m.Point(
m.Cond(m.VAL['end'] != 0, m.VAL['end'], k.TimestampMicros() /
1000000L) - m.Cond(m.VAL['start'] != 0, m.VAL['start'],
m.TimestampMicros() / 1000000L)))
"""
expected_formatted_code = """\
def _():
return (
(Gauge(
metric='aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
group_by=group_by + ['metric:process_name'],
metric_filter={'metric:process_name': process_name_re}),
Gauge(
metric='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb',
group_by=group_by + ['metric:process_name'],
metric_filter={'metric:process_name': process_name_re}))
| expr.Join(
left_name='start', left_default=0, right_name='end', right_default=0)
| m.Point(
m.Cond(m.VAL['end'] != 0, m.VAL['end'],
k.TimestampMicros() / 1000000L) -
m.Cond(m.VAL['start'] != 0, m.VAL['start'],
m.TimestampMicros() / 1000000L)))
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB66011084(self):
unformatted_code = """\
X = {
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": # Comment 1.
([] if True else [ # Comment 2.
"bbbbbbbbbbbbbbbbbbb", # Comment 3.
"cccccccccccccccccccccccc", # Comment 4.
"ddddddddddddddddddddddddd", # Comment 5.
"eeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", # Comment 6.
"fffffffffffffffffffffffffffffff", # Comment 7.
"ggggggggggggggggggggggggggg", # Comment 8.
"hhhhhhhhhhhhhhhhhh", # Comment 9.
]),
}
"""
expected_formatted_code = """\
X = {
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": # Comment 1.
([] if True else [ # Comment 2.
"bbbbbbbbbbbbbbbbbbb", # Comment 3.
"cccccccccccccccccccccccc", # Comment 4.
"ddddddddddddddddddddddddd", # Comment 5.
"eeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", # Comment 6.
"fffffffffffffffffffffffffffffff", # Comment 7.
"ggggggggggggggggggggggggggg", # Comment 8.
"hhhhhhhhhhhhhhhhhh", # Comment 9.
]),
}
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB67455376(self):
unformatted_code = """\
sponge_ids.extend(invocation.id() for invocation in self._client.GetInvocationsByLabels(labels))
"""
expected_formatted_code = """\
sponge_ids.extend(invocation.id()
for invocation in self._client.GetInvocationsByLabels(labels))
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB35210351(self):
unformatted_code = """\
def _():
config.AnotherRuleThing(
'the_title_to_the_thing_here',
{'monitorname': 'firefly',
'service': ACCOUNTING_THING,
'severity': 'the_bug',
'monarch_module_name': alerts.TheLabel(qa_module_regexp, invert=True)},
fanout,
alerts.AlertUsToSomething(
GetTheAlertToIt('the_title_to_the_thing_here'),
GetNotificationTemplate('your_email_here')))
"""
expected_formatted_code = """\
def _():
config.AnotherRuleThing(
'the_title_to_the_thing_here', {
'monitorname': 'firefly',
'service': ACCOUNTING_THING,
'severity': 'the_bug',
'monarch_module_name': alerts.TheLabel(qa_module_regexp, invert=True)
}, fanout,
alerts.AlertUsToSomething(
GetTheAlertToIt('the_title_to_the_thing_here'),
GetNotificationTemplate('your_email_here')))
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB34774905(self):
unformatted_code = """\
x=[VarExprType(ir_name=IrName( value='x',
expr_type=UnresolvedAttrExprType( atom=UnknownExprType(), attr_name=IrName(
value='x', expr_type=UnknownExprType(), usage='UNKNOWN', fqn=None,
astn=None), usage='REF'), usage='ATTR', fqn='<attr>.x', astn=None))]
"""
expected_formatted_code = """\
x = [
VarExprType(
ir_name=IrName(
value='x',
expr_type=UnresolvedAttrExprType(
atom=UnknownExprType(),
attr_name=IrName(
value='x',
expr_type=UnknownExprType(),
usage='UNKNOWN',
fqn=None,
astn=None),
usage='REF'),
usage='ATTR',
fqn='<attr>.x',
astn=None))
]
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB65176185(self):
code = """\
xx = zip(*[(a, b) for (a, b, c) in yy])
"""
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB35210166(self):
unformatted_code = """\
def _():
query = (
m.Fetch(n.Raw('monarch.BorgTask', '/proc/container/memory/usage'), { 'borg_user': borguser, 'borg_job': jobname })
| o.Window(m.Align('5m')) | p.GroupBy(['borg_user', 'borg_job', 'borg_cell'], q.Mean()))
"""
expected_formatted_code = """\
def _():
query = (
m.Fetch(
n.Raw('monarch.BorgTask', '/proc/container/memory/usage'), {
'borg_user': borguser,
'borg_job': jobname
})
| o.Window(m.Align('5m'))
| p.GroupBy(['borg_user', 'borg_job', 'borg_cell'], q.Mean()))
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB32167774(self):
unformatted_code = """\
X = (
'is_official',
'is_cover',
'is_remix',
'is_instrumental',
'is_live',
'has_lyrics',
'is_album',
'is_compilation',)
"""
expected_formatted_code = """\
X = (
'is_official',
'is_cover',
'is_remix',
'is_instrumental',
'is_live',
'has_lyrics',
'is_album',
'is_compilation',
)
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB66912275(self):
unformatted_code = """\
def _():
with self.assertRaisesRegexp(errors.HttpError, 'Invalid'):
patch_op = api_client.forwardingRules().patch(
project=project_id,
region=region,
forwardingRule=rule_name,
body={'fingerprint': base64.urlsafe_b64encode('invalid_fingerprint')}).execute()
"""
expected_formatted_code = """\
def _():
with self.assertRaisesRegexp(errors.HttpError, 'Invalid'):
patch_op = api_client.forwardingRules().patch(
project=project_id,
region=region,
forwardingRule=rule_name,
body={
'fingerprint': base64.urlsafe_b64encode('invalid_fingerprint')
}).execute()
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB67312284(self):
code = """\
def _():
self.assertEqual(
[u'to be published 2', u'to be published 1', u'to be published 0'],
[el.text for el in page.first_column_tds])
"""
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB65241516(self):
unformatted_code = """\
checkpoint_files = gfile.Glob(os.path.join(TrainTraceDir(unit_key, "*", "*"), embedding_model.CHECKPOINT_FILENAME + "-*"))
"""
expected_formatted_code = """\
checkpoint_files = gfile.Glob(
os.path.join(
TrainTraceDir(unit_key, "*", "*"),
embedding_model.CHECKPOINT_FILENAME + "-*"))
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB37460004(self):
code = textwrap.dedent("""\
assert all(s not in (_SENTINEL, None) for s in nested_schemas
), 'Nested schemas should never contain None/_SENTINEL'
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB36806207(self):
code = """\
def _():
linearity_data = [[row] for row in [
"%.1f mm" % (np.mean(linearity_values["pos_error"]) * 1000.0),
"%.1f mm" % (np.max(linearity_values["pos_error"]) * 1000.0),
"%.1f mm" % (np.mean(linearity_values["pos_error_chunk_mean"]) * 1000.0),
"%.1f mm" % (np.max(linearity_values["pos_error_chunk_max"]) * 1000.0),
"%.1f deg" % math.degrees(np.mean(linearity_values["rot_noise"])),
"%.1f deg" % math.degrees(np.max(linearity_values["rot_noise"])),
"%.1f deg" % math.degrees(np.mean(linearity_values["rot_drift"])),
"%.1f deg" % math.degrees(np.max(linearity_values["rot_drift"])),
"%.1f%%" % (np.max(linearity_values["pos_discontinuity"]) * 100.0),
"%.1f%%" % (np.max(linearity_values["rot_discontinuity"]) * 100.0)
]]
"""
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB36215507(self):
code = textwrap.dedent("""\
class X():
def _():
aaaaaaaaaaaaa._bbbbbbbbbbbbbbbbbbbbbbbbbbbbbb(
mmmmmmmmmmmmm, nnnnn, ooooooooo,
_(ppppppppppppppppppppppppppppppppppppp),
*(qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq),
**(qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq))
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB35212469(self):
unformatted_code = textwrap.dedent("""\
def _():
X = {
'retain': {
'loadtest': # This is a comment in the middle of a dictionary entry
('/some/path/to/a/file/that/is/needed/by/this/process')
}
}
""")
expected_formatted_code = textwrap.dedent("""\
def _():
X = {
'retain': {
'loadtest': # This is a comment in the middle of a dictionary entry
('/some/path/to/a/file/that/is/needed/by/this/process')
}
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB31063453(self):
unformatted_code = textwrap.dedent("""\
def _():
while ((not mpede_proc) or ((time_time() - last_modified) < FLAGS_boot_idle_timeout)):
pass
""")
expected_formatted_code = textwrap.dedent("""\
def _():
while ((not mpede_proc) or
((time_time() - last_modified) < FLAGS_boot_idle_timeout)):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB35021894(self):
unformatted_code = textwrap.dedent("""\
def _():
labelacl = Env(qa={
'read': 'name/some-type-of-very-long-name-for-reading-perms',
'modify': 'name/some-other-type-of-very-long-name-for-modifying'
},
prod={
'read': 'name/some-type-of-very-long-name-for-reading-perms',
'modify': 'name/some-other-type-of-very-long-name-for-modifying'
})
""")
expected_formatted_code = textwrap.dedent("""\
def _():
labelacl = Env(
qa={
'read': 'name/some-type-of-very-long-name-for-reading-perms',
'modify': 'name/some-other-type-of-very-long-name-for-modifying'
},
prod={
'read': 'name/some-type-of-very-long-name-for-reading-perms',
'modify': 'name/some-other-type-of-very-long-name-for-modifying'
})
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB34682902(self):
unformatted_code = textwrap.dedent("""\
logging.info("Mean angular velocity norm: %.3f", np.linalg.norm(np.mean(ang_vel_arr, axis=0)))
""")
expected_formatted_code = textwrap.dedent("""\
logging.info("Mean angular velocity norm: %.3f",
np.linalg.norm(np.mean(ang_vel_arr, axis=0)))
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB33842726(self):
unformatted_code = textwrap.dedent("""\
class _():
def _():
hints.append(('hg tag -f -l -r %s %s # %s' % (short(ctx.node(
)), candidatetag, firstline))[:78])
""")
expected_formatted_code = textwrap.dedent("""\
class _():
def _():
hints.append(('hg tag -f -l -r %s %s # %s' % (short(
ctx.node()), candidatetag, firstline))[:78])
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB32931780(self):
unformatted_code = textwrap.dedent("""\
environments = {
'prod': {
# this is a comment before the first entry.
'entry one':
'an entry.',
# this is the comment before the second entry.
'entry number 2.':
'something',
# this is the comment before the third entry and it's a doozy. So big!
'who':
'allin',
# This is an entry that has a dictionary in it. It's ugly
'something': {
'page': ['this-is-a-page@xxxxxxxx.com', 'something-for-eml@xxxxxx.com'],
'bug': ['bugs-go-here5300@xxxxxx.com'],
'email': ['sometypeof-email@xxxxxx.com'],
},
# a short comment
'yolo!!!!!':
'another-email-address@xxxxxx.com',
# this entry has an implicit string concatenation
'implicit':
'https://this-is-very-long.url-addr.com/'
'?something=something%20some%20more%20stuff..',
# A more normal entry.
'.....':
'this is an entry',
}
}
""")
expected_formatted_code = textwrap.dedent("""\
environments = {
'prod': {
# this is a comment before the first entry.
'entry one': 'an entry.',
# this is the comment before the second entry.
'entry number 2.': 'something',
# this is the comment before the third entry and it's a doozy. So big!
'who': 'allin',
# This is an entry that has a dictionary in it. It's ugly
'something': {
'page': [
'this-is-a-page@xxxxxxxx.com', 'something-for-eml@xxxxxx.com'
],
'bug': ['bugs-go-here5300@xxxxxx.com'],
'email': ['sometypeof-email@xxxxxx.com'],
},
# a short comment
'yolo!!!!!': 'another-email-address@xxxxxx.com',
# this entry has an implicit string concatenation
'implicit': 'https://this-is-very-long.url-addr.com/'
'?something=something%20some%20more%20stuff..',
# A more normal entry.
'.....': 'this is an entry',
}
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB33047408(self):
code = textwrap.dedent("""\
def _():
for sort in (sorts or []):
request['sorts'].append({
'field': {
'user_field': sort
},
'order': 'ASCENDING'
})
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB32714745(self):
code = textwrap.dedent("""\
class _():
def _BlankDefinition():
'''Return a generic blank dictionary for a new field.'''
return {
'type': '',
'validation': '',
'name': 'fieldname',
'label': 'Field Label',
'help': '',
'initial': '',
'required': False,
'required_msg': 'Required',
'invalid_msg': 'Please enter a valid value',
'options': {
'regex': '',
'widget_attr': '',
'choices_checked': '',
'choices_count': '',
'choices': {}
},
'isnew': True,
'dirty': False,
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB32737279(self):
unformatted_code = textwrap.dedent("""\
here_is_a_dict = {
'key':
# Comment.
'value'
}
""")
expected_formatted_code = textwrap.dedent("""\
here_is_a_dict = {
'key': # Comment.
'value'
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB32570937(self):
code = textwrap.dedent("""\
def _():
if (job_message.ball not in ('*', ball) or
job_message.call not in ('*', call) or
job_message.mall not in ('*', job_name)):
return False
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB31937033(self):
code = textwrap.dedent("""\
class _():
def __init__(self, metric, fields_cb=None):
self._fields_cb = fields_cb or (lambda *unused_args, **unused_kwargs: {})
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB31911533(self):
code = """\
class _():
@parameterized.NamedParameters(
('IncludingModInfoWithHeaderList', AAAA, aaaa),
('IncludingModInfoWithoutHeaderList', BBBB, bbbbb),
('ExcludingModInfoWithHeaderList', CCCCC, cccc),
('ExcludingModInfoWithoutHeaderList', DDDDD, ddddd),
)
def _():
pass
"""
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB31847238(self):
unformatted_code = textwrap.dedent("""\
class _():
def aaaaa(self, bbbbb, cccccccccccccc=None): # TODO(who): pylint: disable=unused-argument
return 1
def xxxxx(self, yyyyy, zzzzzzzzzzzzzz=None): # A normal comment that runs over the column limit.
return 1
""")
expected_formatted_code = textwrap.dedent("""\
class _():
def aaaaa(self, bbbbb, cccccccccccccc=None): # TODO(who): pylint: disable=unused-argument
return 1
def xxxxx(
self, yyyyy,
zzzzzzzzzzzzzz=None): # A normal comment that runs over the column limit.
return 1
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB30760569(self):
unformatted_code = textwrap.dedent("""\
{'1234567890123456789012345678901234567890123456789012345678901234567890':
'1234567890123456789012345678901234567890'}
""")
expected_formatted_code = textwrap.dedent("""\
{
'1234567890123456789012345678901234567890123456789012345678901234567890':
'1234567890123456789012345678901234567890'
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB26034238(self):
unformatted_code = textwrap.dedent("""\
class Thing:
def Function(self):
thing.Scrape('/aaaaaaaaa/bbbbbbbbbb/ccccc/dddd/eeeeeeeeeeeeee/ffffffffffffff').AndReturn(42)
""")
expected_formatted_code = textwrap.dedent("""\
class Thing:
def Function(self):
thing.Scrape(
'/aaaaaaaaa/bbbbbbbbbb/ccccc/dddd/eeeeeeeeeeeeee/ffffffffffffff'
).AndReturn(42)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB30536435(self):
unformatted_code = textwrap.dedent("""\
def main(unused_argv):
if True:
if True:
aaaaaaaaaaa.comment('import-from[{}] {} {}'.format(
bbbbbbbbb.usage,
ccccccccc.within,
imports.ddddddddddddddddddd(name_item.ffffffffffffffff)))
""")
expected_formatted_code = textwrap.dedent("""\
def main(unused_argv):
if True:
if True:
aaaaaaaaaaa.comment('import-from[{}] {} {}'.format(
bbbbbbbbb.usage, ccccccccc.within,
imports.ddddddddddddddddddd(name_item.ffffffffffffffff)))
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB30442148(self):
unformatted_code = textwrap.dedent("""\
def lulz():
return (some_long_module_name.SomeLongClassName.
some_long_attribute_name.some_long_method_name())
""")
expected_formatted_code = textwrap.dedent("""\
def lulz():
return (some_long_module_name.SomeLongClassName.some_long_attribute_name.
some_long_method_name())
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB26868213(self):
unformatted_code = textwrap.dedent("""\
def _():
xxxxxxxxxxxxxxxxxxx = {
'ssssss': {'ddddd': 'qqqqq',
'p90': aaaaaaaaaaaaaaaaa,
'p99': bbbbbbbbbbbbbbbbb,
'lllllllllllll': yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy(),},
'bbbbbbbbbbbbbbbbbbbbbbbbbbbb': {
'ddddd': 'bork bork bork bo',
'p90': wwwwwwwwwwwwwwwww,
'p99': wwwwwwwwwwwwwwwww,
'lllllllllllll': None, # use the default
}
}
""")
expected_formatted_code = textwrap.dedent("""\
def _():
xxxxxxxxxxxxxxxxxxx = {
'ssssss': {
'ddddd': 'qqqqq',
'p90': aaaaaaaaaaaaaaaaa,
'p99': bbbbbbbbbbbbbbbbb,
'lllllllllllll': yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy(),
},
'bbbbbbbbbbbbbbbbbbbbbbbbbbbb': {
'ddddd': 'bork bork bork bo',
'p90': wwwwwwwwwwwwwwwww,
'p99': wwwwwwwwwwwwwwwww,
'lllllllllllll': None, # use the default
}
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB30173198(self):
code = textwrap.dedent("""\
class _():
def _():
self.assertFalse(
evaluation_runner.get_larps_in_eval_set('these_arent_the_larps'))
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB29908765(self):
code = textwrap.dedent("""\
class _():
def __repr__(self):
return '<session %s on %s>' % (self._id,
self._stub._stub.rpc_channel().target()) # pylint:disable=protected-access
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB30087362(self):
code = textwrap.dedent("""\
def _():
for s in sorted(env['foo']):
bar()
# This is a comment
# This is another comment
foo()
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB30087363(self):
code = textwrap.dedent("""\
if False:
bar()
# This is a comment
# This is another comment
elif True:
foo()
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB29093579(self):
unformatted_code = textwrap.dedent("""\
def _():
_xxxxxxxxxxxxxxx(aaaaaaaa, bbbbbbbbbbbbbb.cccccccccc[
dddddddddddddddddddddddddddd.eeeeeeeeeeeeeeeeeeeeee.fffffffffffffffffffff])
""")
expected_formatted_code = textwrap.dedent("""\
def _():
_xxxxxxxxxxxxxxx(
aaaaaaaa,
bbbbbbbbbbbbbb.cccccccccc[dddddddddddddddddddddddddddd.
eeeeeeeeeeeeeeeeeeeeee.fffffffffffffffffffff])
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB26382315(self):
code = textwrap.dedent("""\
@hello_world
# This is a first comment
# Comment
def foo():
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB27616132(self):
unformatted_code = textwrap.dedent("""\
if True:
query.fetch_page.assert_has_calls([
mock.call(100,
start_cursor=None),
mock.call(100,
start_cursor=cursor_1),
mock.call(100,
start_cursor=cursor_2),
])
""")
expected_formatted_code = textwrap.dedent("""\
if True:
query.fetch_page.assert_has_calls([
mock.call(100, start_cursor=None),
mock.call(100, start_cursor=cursor_1),
mock.call(100, start_cursor=cursor_2),
])
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB27590179(self):
unformatted_code = textwrap.dedent("""\
if True:
if True:
self.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = (
{ True:
self.bbb.cccccccccc(ddddddddddddddddddddddd.eeeeeeeeeeeeeeeeeeeeee),
False:
self.bbb.cccccccccc(ddddddddddddddddddddddd.eeeeeeeeeeeeeeeeeeeeee)
})
""")
expected_formatted_code = textwrap.dedent("""\
if True:
if True:
self.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = ({
True:
self.bbb.cccccccccc(ddddddddddddddddddddddd.eeeeeeeeeeeeeeeeeeeeee),
False:
self.bbb.cccccccccc(ddddddddddddddddddddddd.eeeeeeeeeeeeeeeeeeeeee)
})
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB27266946(self):
unformatted_code = textwrap.dedent("""\
def _():
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = (self.bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccccccccccc)
""")
expected_formatted_code = textwrap.dedent("""\
def _():
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = (
self.bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb.
cccccccccccccccccccccccccccccccccccc)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB25505359(self):
code = textwrap.dedent("""\
_EXAMPLE = {
'aaaaaaaaaaaaaa': [{
'bbbb': 'cccccccccccccccccccccc',
'dddddddddddd': []
}, {
'bbbb': 'ccccccccccccccccccc',
'dddddddddddd': []
}]
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB25324261(self):
code = textwrap.dedent("""\
aaaaaaaaa = set(bbbb.cccc
for ddd in eeeeee.fffffffffff.gggggggggggggggg
for cccc in ddd.specification)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB25136704(self):
code = textwrap.dedent("""\
class f:
def test(self):
self.bbbbbbb[0]['aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', {
'xxxxxx': 'yyyyyy'
}] = cccccc.ddd('1m', '10x1+1')
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB25165602(self):
code = textwrap.dedent("""\
def f():
ids = {u: i for u, i in zip(self.aaaaa, xrange(42, 42 + len(self.aaaaaa)))}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB25157123(self):
code = textwrap.dedent("""\
def ListArgs():
FairlyLongMethodName([relatively_long_identifier_for_a_list],
another_argument_with_a_long_identifier)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB25136820(self):
unformatted_code = textwrap.dedent("""\
def foo():
return collections.OrderedDict({
# Preceding comment.
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa':
'$bbbbbbbbbbbbbbbbbbbbbbbb',
})
""")
expected_formatted_code = textwrap.dedent("""\
def foo():
return collections.OrderedDict({
# Preceding comment.
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa':
'$bbbbbbbbbbbbbbbbbbbbbbbb',
})
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB25131481(self):
unformatted_code = textwrap.dedent("""\
APPARENT_ACTIONS = ('command_type', {
'materialize': lambda x: some_type_of_function('materialize ' + x.command_def),
'#': lambda x: x # do nothing
})
""")
expected_formatted_code = textwrap.dedent("""\
APPARENT_ACTIONS = (
'command_type',
{
'materialize':
lambda x: some_type_of_function('materialize ' + x.command_def),
'#':
lambda x: x # do nothing
})
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB23445244(self):
unformatted_code = textwrap.dedent("""\
def foo():
if True:
return xxxxxxxxxxxxxxxx(
command,
extra_env={
"OOOOOOOOOOOOOOOOOOOOO": FLAGS.zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
"PPPPPPPPPPPPPPPPPPPPP":
FLAGS.aaaaaaaaaaaaaa + FLAGS.bbbbbbbbbbbbbbbbbbb,
})
""")
expected_formatted_code = textwrap.dedent("""\
def foo():
if True:
return xxxxxxxxxxxxxxxx(
command,
extra_env={
"OOOOOOOOOOOOOOOOOOOOO":
FLAGS.zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
"PPPPPPPPPPPPPPPPPPPPP":
FLAGS.aaaaaaaaaaaaaa + FLAGS.bbbbbbbbbbbbbbbbbbb,
})
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB20559654(self):
unformatted_code = textwrap.dedent("""\
class A(object):
def foo(self):
unused_error, result = server.Query(
['AA BBBB CCC DDD EEEEEEEE X YY ZZZZ FFF EEE AAAAAAAA'],
aaaaaaaaaaa=True, bbbbbbbb=None)
""")
expected_formatted_code = textwrap.dedent("""\
class A(object):
def foo(self):
unused_error, result = server.Query(
['AA BBBB CCC DDD EEEEEEEE X YY ZZZZ FFF EEE AAAAAAAA'],
aaaaaaaaaaa=True,
bbbbbbbb=None)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB23943842(self):
unformatted_code = textwrap.dedent("""\
class F():
def f():
self.assertDictEqual(
accounts, {
'foo':
{'account': 'foo',
'lines': 'l1\\nl2\\nl3\\n1 line(s) were elided.'},
'bar': {'account': 'bar',
'lines': 'l5\\nl6\\nl7'},
'wiz': {'account': 'wiz',
'lines': 'l8'}
})
""")
expected_formatted_code = textwrap.dedent("""\
class F():
def f():
self.assertDictEqual(
accounts, {
'foo': {
'account': 'foo',
'lines': 'l1\\nl2\\nl3\\n1 line(s) were elided.'
},
'bar': {
'account': 'bar',
'lines': 'l5\\nl6\\nl7'
},
'wiz': {
'account': 'wiz',
'lines': 'l8'
}
})
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB20551180(self):
unformatted_code = textwrap.dedent("""\
def foo():
if True:
return (struct.pack('aaaa', bbbbbbbbbb, ccccccccccccccc, dddddddd) + eeeeeee)
""")
expected_formatted_code = textwrap.dedent("""\
def foo():
if True:
return (
struct.pack('aaaa', bbbbbbbbbb, ccccccccccccccc, dddddddd) + eeeeeee)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB23944849(self):
unformatted_code = textwrap.dedent("""\
class A(object):
def xxxxxxxxx(self, aaaaaaa, bbbbbbb=ccccccccccc, dddddd=300, eeeeeeeeeeeeee=None, fffffffffffffff=0):
pass
""")
expected_formatted_code = textwrap.dedent("""\
class A(object):
def xxxxxxxxx(self,
aaaaaaa,
bbbbbbb=ccccccccccc,
dddddd=300,
eeeeeeeeeeeeee=None,
fffffffffffffff=0):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB23935890(self):
unformatted_code = textwrap.dedent("""\
class F():
def functioni(self, aaaaaaa, bbbbbbb, cccccc, dddddddddddddd, eeeeeeeeeeeeeee):
pass
""")
expected_formatted_code = textwrap.dedent("""\
class F():
def functioni(self, aaaaaaa, bbbbbbb, cccccc, dddddddddddddd,
eeeeeeeeeeeeeee):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB28414371(self):
code = textwrap.dedent("""\
def _():
return ((m.fffff(
m.rrr('mmmmmmmmmmmmmmmm', 'ssssssssssssssssssssssssss'), ffffffffffffffff)
| m.wwwwww(m.ddddd('1h'))
| m.ggggggg(bbbbbbbbbbbbbbb)
| m.ppppp(
(1 - m.ffffffffffffffff(llllllllllllllllllllll * 1000000, m.vvv))
* m.ddddddddddddddddd(m.vvv)),
m.fffff(
m.rrr('mmmmmmmmmmmmmmmm', 'sssssssssssssssssssssss'),
dict(
ffffffffffffffff, **{
'mmmmmm:ssssss':
m.rrrrrrrrrrr('|'.join(iiiiiiiiiiiiii), iiiiii=True)
}))
| m.wwwwww(m.rrrr('1h'))
| m.ggggggg(bbbbbbbbbbbbbbb))
| m.jjjj()
| m.ppppp(m.vvv[0] + m.vvv[1]))
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB20127686(self):
code = textwrap.dedent("""\
def f():
if True:
return ((m.fffff(
m.rrr('xxxxxxxxxxxxxxxx',
'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy'),
mmmmmmmm)
| m.wwwwww(m.rrrr(self.tttttttttt, self.mmmmmmmmmmmmmmmmmmmmm))
| m.ggggggg(self.gggggggg, m.sss()), m.fffff('aaaaaaaaaaaaaaaa')
| m.wwwwww(m.ddddd(self.tttttttttt, self.mmmmmmmmmmmmmmmmmmmmm))
| m.ggggggg(self.gggggggg))
| m.jjjj()
| m.ppppp(m.VAL[0] / m.VAL[1]))
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB20016122(self):
try:
style.SetGlobalStyle(
style.CreateStyleFromConfig(
'{based_on_style: pep8, split_penalty_import_names: 35}'))
unformatted_code = textwrap.dedent("""\
from a_very_long_or_indented_module_name_yada_yada import (long_argument_1,
long_argument_2)
""")
expected_formatted_code = textwrap.dedent("""\
from a_very_long_or_indented_module_name_yada_yada import (
long_argument_1, long_argument_2)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code,
reformatter.Reformat(uwlines))
finally:
style.SetGlobalStyle(style.CreatePEP8Style())
try:
style.SetGlobalStyle(
style.CreateStyleFromConfig('{based_on_style: chromium, '
'split_before_logical_operator: True}'))
code = textwrap.dedent("""\
class foo():
def __eq__(self, other):
return (isinstance(other, type(self))
and self.xxxxxxxxxxx == other.xxxxxxxxxxx
and self.xxxxxxxx == other.xxxxxxxx
and self.aaaaaaaaaaaa == other.aaaaaaaaaaaa
and self.bbbbbbbbbbb == other.bbbbbbbbbbb
and self.ccccccccccccccccc == other.ccccccccccccccccc
and self.ddddddddddddddddddddddd == other.ddddddddddddddddddddddd
and self.eeeeeeeeeeee == other.eeeeeeeeeeee
and self.ffffffffffffff == other.time_completed
and self.gggggg == other.gggggg and self.hhh == other.hhh
and len(self.iiiiiiii) == len(other.iiiiiiii)
and all(jjjjjjj in other.iiiiiiii for jjjjjjj in self.iiiiiiii))
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
finally:
style.SetGlobalStyle(style.CreateChromiumStyle())
def testB22527411(self):
unformatted_code = textwrap.dedent("""\
def f():
if True:
aaaaaa.bbbbbbbbbbbbbbbbbbbb[-1].cccccccccccccc.ddd().eeeeeeee(ffffffffffffff)
""")
expected_formatted_code = textwrap.dedent("""\
def f():
if True:
aaaaaa.bbbbbbbbbbbbbbbbbbbb[-1].cccccccccccccc.ddd().eeeeeeee(
ffffffffffffff)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB20849933(self):
unformatted_code = textwrap.dedent("""\
def main(unused_argv):
if True:
aaaaaaaa = {
'xxx': '%s/cccccc/ddddddddddddddddddd.jar' %
(eeeeee.FFFFFFFFFFFFFFFFFF),
}
""")
expected_formatted_code = textwrap.dedent("""\
def main(unused_argv):
if True:
aaaaaaaa = {
'xxx':
'%s/cccccc/ddddddddddddddddddd.jar' % (eeeeee.FFFFFFFFFFFFFFFFFF),
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB20813997(self):
code = textwrap.dedent("""\
def myfunc_1():
myarray = numpy.zeros((2, 2, 2))
print(myarray[:, 1, :])
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB20605036(self):
code = textwrap.dedent("""\
foo = {
'aaaa': {
# A comment for no particular reason.
'xxxxxxxx': 'bbbbbbbbb',
'yyyyyyyyyyyyyyyyyy': 'cccccccccccccccccccccccccccccc'
'dddddddddddddddddddddddddddddddddddddddddd',
}
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB20562732(self):
code = textwrap.dedent("""\
foo = [
# Comment about first list item
'First item',
# Comment about second list item
'Second item',
]
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB20128830(self):
code = textwrap.dedent("""\
a = {
'xxxxxxxxxxxxxxxxxxxx': {
'aaaa':
'mmmmmmm',
'bbbbb':
'mmmmmmmmmmmmmmmmmmmmm',
'cccccccccc': [
'nnnnnnnnnnn',
'ooooooooooo',
'ppppppppppp',
'qqqqqqqqqqq',
],
},
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB20073838(self):
code = textwrap.dedent("""\
class DummyModel(object):
def do_nothing(self, class_1_count):
if True:
class_0_count = num_votes - class_1_count
return ('{class_0_name}={class_0_count}, {class_1_name}={class_1_count}'
.format(
class_0_name=self.class_0_name,
class_0_count=class_0_count,
class_1_name=self.class_1_name,
class_1_count=class_1_count))
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB19626808(self):
code = textwrap.dedent("""\
if True:
aaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbb(
'ccccccccccc', ddddddddd='eeeee').fffffffff([ggggggggggggggggggggg])
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB19547210(self):
code = textwrap.dedent("""\
while True:
if True:
if True:
if True:
if xxxxxxxxxxxx.yyyyyyy(aa).zzzzzzz() not in (
xxxxxxxxxxxx.yyyyyyyyyyyyyy.zzzzzzzz,
xxxxxxxxxxxx.yyyyyyyyyyyyyy.zzzzzzzz):
continue
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB19377034(self):
code = textwrap.dedent("""\
def f():
if (aaaaaaaaaaaaaaa.start >= aaaaaaaaaaaaaaa.end or
bbbbbbbbbbbbbbb.start >= bbbbbbbbbbbbbbb.end):
return False
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB19372573(self):
code = textwrap.dedent("""\
def f():
if a: return 42
while True:
if b: continue
if c: break
return 0
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
try:
style.SetGlobalStyle(style.CreatePEP8Style())
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
finally:
style.SetGlobalStyle(style.CreateChromiumStyle())
def testB19353268(self):
code = textwrap.dedent("""\
a = {1, 2, 3}[x]
b = {'foo': 42, 'bar': 37}['foo']
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB19287512(self):
unformatted_code = textwrap.dedent("""\
class Foo(object):
def bar(self):
with xxxxxxxxxx.yyyyy(
'aaaaaaa.bbbbbbbb.ccccccc.dddddddddddddddddddd.eeeeeeeeeee',
fffffffffff=(aaaaaaa.bbbbbbbb.ccccccc.dddddddddddddddddddd
.Mmmmmmmmmmmmmmmmmm(-1, 'permission error'))):
self.assertRaises(nnnnnnnnnnnnnnnn.ooooo, ppppp.qqqqqqqqqqqqqqqqq)
""")
expected_formatted_code = textwrap.dedent("""\
class Foo(object):
def bar(self):
with xxxxxxxxxx.yyyyy(
'aaaaaaa.bbbbbbbb.ccccccc.dddddddddddddddddddd.eeeeeeeeeee',
fffffffffff=(
aaaaaaa.bbbbbbbb.ccccccc.dddddddddddddddddddd.Mmmmmmmmmmmmmmmmmm(
-1, 'permission error'))):
self.assertRaises(nnnnnnnnnnnnnnnn.ooooo, ppppp.qqqqqqqqqqqqqqqqq)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB19194420(self):
code = textwrap.dedent("""\
method.Set(
'long argument goes here that causes the line to break',
lambda arg2=0.5: arg2)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB19073499(self):
code = """\
instance = (
aaaaaaa.bbbbbbb().ccccccccccccccccc().ddddddddddd({
'aa': 'context!'
}).eeeeeeeeeeeeeeeeeee({ # Inline comment about why fnord has the value 6.
'fnord': 6
}))
"""
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB18257115(self):
code = textwrap.dedent("""\
if True:
if True:
self._Test(aaaa, bbbbbbb.cccccccccc, dddddddd, eeeeeeeeeee,
[ffff, ggggggggggg, hhhhhhhhhhhh, iiiiii, jjjj])
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB18256666(self):
code = textwrap.dedent("""\
class Foo(object):
def Bar(self):
aaaaa.bbbbbbb(
ccc='ddddddddddddddd',
eeee='ffffffffffffffffffffff-%s-%s' % (gggg, int(time.time())),
hhhhhh={
'iiiiiiiiiii': iiiiiiiiiii,
'jjjj': jjjj.jjjjj(),
'kkkkkkkkkkkk': kkkkkkkkkkkk,
},
llllllllll=mmmmmm.nnnnnnnnnnnnnnnn)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB18256826(self):
code = textwrap.dedent("""\
if True:
pass
# A multiline comment.
# Line two.
elif False:
pass
if True:
pass
# A multiline comment.
# Line two.
elif False:
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB18255697(self):
code = textwrap.dedent("""\
AAAAAAAAAAAAAAA = {
'XXXXXXXXXXXXXX': 4242, # Inline comment
# Next comment
'YYYYYYYYYYYYYYYY': ['zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'],
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testB17534869(self):
unformatted_code = textwrap.dedent("""\
if True:
self.assertLess(abs(time.time()-aaaa.bbbbbbbbbbb(
datetime.datetime.now())), 1)
""")
expected_formatted_code = textwrap.dedent("""\
if True:
self.assertLess(
abs(time.time() - aaaa.bbbbbbbbbbb(datetime.datetime.now())), 1)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB17489866(self):
unformatted_code = textwrap.dedent("""\
def f():
if True:
if True:
return aaaa.bbbbbbbbb(ccccccc=dddddddddddddd({('eeee', \
'ffffffff'): str(j)}))
""")
expected_formatted_code = textwrap.dedent("""\
def f():
if True:
if True:
return aaaa.bbbbbbbbb(
ccccccc=dddddddddddddd({
('eeee', 'ffffffff'): str(j)
}))
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB17133019(self):
unformatted_code = textwrap.dedent("""\
class aaaaaaaaaaaaaa(object):
def bbbbbbbbbb(self):
with io.open("/dev/null", "rb"):
with io.open(os.path.join(aaaaa.bbbbb.ccccccccccc,
DDDDDDDDDDDDDDD,
"eeeeeeeee ffffffffff"
), "rb") as gggggggggggggggggggg:
print(gggggggggggggggggggg)
""")
expected_formatted_code = textwrap.dedent("""\
class aaaaaaaaaaaaaa(object):
def bbbbbbbbbb(self):
with io.open("/dev/null", "rb"):
with io.open(
os.path.join(aaaaa.bbbbb.ccccccccccc, DDDDDDDDDDDDDDD,
"eeeeeeeee ffffffffff"), "rb") as gggggggggggggggggggg:
print(gggggggggggggggggggg)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB17011869(self):
unformatted_code = textwrap.dedent("""\
'''blah......'''
class SomeClass(object):
'''blah.'''
AAAAAAAAAAAA = { # Comment.
'BBB': 1.0,
'DDDDDDDD': 0.4811
}
""")
expected_formatted_code = textwrap.dedent("""\
'''blah......'''
class SomeClass(object):
'''blah.'''
AAAAAAAAAAAA = { # Comment.
'BBB': 1.0,
'DDDDDDDD': 0.4811
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB16783631(self):
unformatted_code = textwrap.dedent("""\
if True:
with aaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccc(ddddddddddddd,
eeeeeeeee=self.fffffffffffff
)as gggg:
pass
""")
expected_formatted_code = textwrap.dedent("""\
if True:
with aaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccc(
ddddddddddddd, eeeeeeeee=self.fffffffffffff) as gggg:
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB16572361(self):
unformatted_code = textwrap.dedent("""\
def foo(self):
def bar(my_dict_name):
self.my_dict_name['foo-bar-baz-biz-boo-baa-baa'].IncrementBy.assert_called_once_with('foo_bar_baz_boo')
""")
expected_formatted_code = textwrap.dedent("""\
def foo(self):
def bar(my_dict_name):
self.my_dict_name[
'foo-bar-baz-biz-boo-baa-baa'].IncrementBy.assert_called_once_with(
'foo_bar_baz_boo')
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB15884241(self):
unformatted_code = textwrap.dedent("""\
if 1:
if 1:
for row in AAAA:
self.create(aaaaaaaa="/aaa/bbbb/cccc/dddddd/eeeeeeeeeeeeeeeeeeeeeeeeee/%s" % row [0].replace(".foo", ".bar"), aaaaa=bbb[1], ccccc=bbb[2], dddd=bbb[3], eeeeeeeeeee=[s.strip() for s in bbb[4].split(",")], ffffffff=[s.strip() for s in bbb[5].split(",")], gggggg=bbb[6])
""")
expected_formatted_code = textwrap.dedent("""\
if 1:
if 1:
for row in AAAA:
self.create(
aaaaaaaa="/aaa/bbbb/cccc/dddddd/eeeeeeeeeeeeeeeeeeeeeeeeee/%s" %
row[0].replace(".foo", ".bar"),
aaaaa=bbb[1],
ccccc=bbb[2],
dddd=bbb[3],
eeeeeeeeeee=[s.strip() for s in bbb[4].split(",")],
ffffffff=[s.strip() for s in bbb[5].split(",")],
gggggg=bbb[6])
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB15697268(self):
unformatted_code = textwrap.dedent("""\
def main(unused_argv):
ARBITRARY_CONSTANT_A = 10
an_array_with_an_exceedingly_long_name = range(ARBITRARY_CONSTANT_A + 1)
ok = an_array_with_an_exceedingly_long_name[:ARBITRARY_CONSTANT_A]
bad_slice = map(math.sqrt, an_array_with_an_exceedingly_long_name[:ARBITRARY_CONSTANT_A])
a_long_name_slicing = an_array_with_an_exceedingly_long_name[:ARBITRARY_CONSTANT_A]
bad_slice = ("I am a crazy, no good, string whats too long, etc." + " no really ")[:ARBITRARY_CONSTANT_A]
""")
expected_formatted_code = textwrap.dedent("""\
def main(unused_argv):
ARBITRARY_CONSTANT_A = 10
an_array_with_an_exceedingly_long_name = range(ARBITRARY_CONSTANT_A + 1)
ok = an_array_with_an_exceedingly_long_name[:ARBITRARY_CONSTANT_A]
bad_slice = map(math.sqrt,
an_array_with_an_exceedingly_long_name[:ARBITRARY_CONSTANT_A])
a_long_name_slicing = an_array_with_an_exceedingly_long_name[:
ARBITRARY_CONSTANT_A]
bad_slice = ("I am a crazy, no good, string whats too long, etc." +
" no really ")[:ARBITRARY_CONSTANT_A]
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB15597568(self):
unformatted_code = textwrap.dedent("""\
if True:
if True:
if True:
print(("Return code was %d" + (", and the process timed out." if did_time_out else ".")) % errorcode)
""")
expected_formatted_code = textwrap.dedent("""\
if True:
if True:
if True:
print(("Return code was %d" + (", and the process timed out."
if did_time_out else ".")) % errorcode)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB15542157(self):
unformatted_code = textwrap.dedent("""\
aaaaaaaaaaaa = bbbb.ccccccccccccccc(dddddd.eeeeeeeeeeeeee, ffffffffffffffffff, gggggg.hhhhhhhhhhhhhhhhh)
""")
expected_formatted_code = textwrap.dedent("""\
aaaaaaaaaaaa = bbbb.ccccccccccccccc(dddddd.eeeeeeeeeeeeee, ffffffffffffffffff,
gggggg.hhhhhhhhhhhhhhhhh)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB15438132(self):
unformatted_code = textwrap.dedent("""\
if aaaaaaa.bbbbbbbbbb:
cccccc.dddddddddd(eeeeeeeeeee=fffffffffffff.gggggggggggggggggg)
if hhhhhh.iiiii.jjjjjjjjjjjjj:
# This is a comment in the middle of it all.
kkkkkkk.llllllllll.mmmmmmmmmmmmm = True
if (aaaaaa.bbbbb.ccccccccccccc != ddddddd.eeeeeeeeee.fffffffffffff or
eeeeee.fffff.ggggggggggggggggggggggggggg() != hhhhhhh.iiiiiiiiii.jjjjjjjjjjjj):
aaaaaaaa.bbbbbbbbbbbb(
aaaaaa.bbbbb.cc,
dddddddddddd=eeeeeeeeeeeeeeeeeee.fffffffffffffffff(
gggggg.hh,
iiiiiiiiiiiiiiiiiii.jjjjjjjjjj.kkkkkkk,
lllll.mm),
nnnnnnnnnn=ooooooo.pppppppppp)
""")
expected_formatted_code = textwrap.dedent("""\
if aaaaaaa.bbbbbbbbbb:
cccccc.dddddddddd(eeeeeeeeeee=fffffffffffff.gggggggggggggggggg)
if hhhhhh.iiiii.jjjjjjjjjjjjj:
# This is a comment in the middle of it all.
kkkkkkk.llllllllll.mmmmmmmmmmmmm = True
if (aaaaaa.bbbbb.ccccccccccccc != ddddddd.eeeeeeeeee.fffffffffffff or
eeeeee.fffff.ggggggggggggggggggggggggggg() !=
hhhhhhh.iiiiiiiiii.jjjjjjjjjjjj):
aaaaaaaa.bbbbbbbbbbbb(
aaaaaa.bbbbb.cc,
dddddddddddd=eeeeeeeeeeeeeeeeeee.fffffffffffffffff(
gggggg.hh, iiiiiiiiiiiiiiiiiii.jjjjjjjjjj.kkkkkkk, lllll.mm),
nnnnnnnnnn=ooooooo.pppppppppp)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB14468247(self):
unformatted_code = """\
call(a=1,
b=2,
)
"""
expected_formatted_code = """\
call(
a=1,
b=2,
)
"""
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB14406499(self):
unformatted_code = textwrap.dedent("""\
def foo1(parameter_1, parameter_2, parameter_3, parameter_4, \
parameter_5, parameter_6): pass
""")
expected_formatted_code = textwrap.dedent("""\
def foo1(parameter_1, parameter_2, parameter_3, parameter_4, parameter_5,
parameter_6):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB13900309(self):
unformatted_code = textwrap.dedent("""\
self.aaaaaaaaaaa( # A comment in the middle of it all.
948.0/3600, self.bbb.ccccccccccccccccccccc(dddddddddddddddd.eeee, True))
""")
expected_formatted_code = textwrap.dedent("""\
self.aaaaaaaaaaa( # A comment in the middle of it all.
948.0 / 3600, self.bbb.ccccccccccccccccccccc(dddddddddddddddd.eeee, True))
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
code = textwrap.dedent("""\
aaaaaaaaaa.bbbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccccc(
DC_1, (CL - 50, CL), AAAAAAAA, BBBBBBBBBBBBBBBB, 98.0,
CCCCCCC).ddddddddd( # Look! A comment is here.
AAAAAAAA - (20 * 60 - 5))
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
unformatted_code = textwrap.dedent("""\
aaaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccccccccccccccccccccc().dddddddddddddddddddddddddd(1, 2, 3, 4)
""")
expected_formatted_code = textwrap.dedent("""\
aaaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccccccccccccccccccccc(
).dddddddddddddddddddddddddd(1, 2, 3, 4)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
unformatted_code = textwrap.dedent("""\
aaaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccccccccccccccccccccc(x).dddddddddddddddddddddddddd(1, 2, 3, 4)
""")
expected_formatted_code = textwrap.dedent("""\
aaaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccccccccccccccccccccc(
x).dddddddddddddddddddddddddd(1, 2, 3, 4)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
unformatted_code = textwrap.dedent("""\
aaaaaaaaaaaaaaaaaaaaaaaa(xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx).dddddddddddddddddddddddddd(1, 2, 3, 4)
""")
expected_formatted_code = textwrap.dedent("""\
aaaaaaaaaaaaaaaaaaaaaaaa(
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx).dddddddddddddddddddddddddd(1, 2, 3, 4)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
unformatted_code = textwrap.dedent("""\
aaaaaaaaaaaaaaaaaaaaaaaa().bbbbbbbbbbbbbbbbbbbbbbbb().ccccccccccccccccccc().\
dddddddddddddddddd().eeeeeeeeeeeeeeeeeeeee().fffffffffffffffff().gggggggggggggggggg()
""")
expected_formatted_code = textwrap.dedent("""\
aaaaaaaaaaaaaaaaaaaaaaaa().bbbbbbbbbbbbbbbbbbbbbbbb().ccccccccccccccccccc(
).dddddddddddddddddd().eeeeeeeeeeeeeeeeeeeee().fffffffffffffffff(
).gggggggggggggggggg()
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testB67935687(self):
code = textwrap.dedent("""\
Fetch(
Raw('monarch.BorgTask', '/union/row_operator_action_delay'),
{'borg_user': self.borg_user})
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
unformatted_code = textwrap.dedent("""\
shelf_renderer.expand_text = text.translate_to_unicode(
expand_text % {
'creator': creator
})
""")
expected_formatted_code = textwrap.dedent("""\
shelf_renderer.expand_text = text.translate_to_unicode(
expand_text % {'creator': creator})
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
if __name__ == '__main__':
unittest.main()
|
sbc100/yapf
|
yapftests/reformatter_buganizer_test.py
|
Python
|
apache-2.0
| 73,591
|
[
"Firefly"
] |
9ef89ada202e244e96964fdbe1179345a2f48a08a230a93700230fc9df95ca93
|
# Copyright (c) 2004 Canonical Limited
# Author: Robert Collins <robert.collins@canonical.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
import logging
import unittest
class LogCollector(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.records=[]
def emit(self, record):
self.records.append(record.getMessage())
def makeCollectingLogger():
"""I make a logger instance that collects its logs for programmatic analysis
-> (logger, collector)"""
logger=logging.Logger("collector")
handler=LogCollector()
handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
logger.addHandler(handler)
return logger, handler
def visitTests(suite, visitor):
"""A foreign method for visiting the tests in a test suite."""
for test in suite._tests:
#Abusing types to avoid monkey patching unittest.TestCase.
# Maybe that would be better?
try:
test.visit(visitor)
except AttributeError:
if isinstance(test, unittest.TestCase):
visitor.visitCase(test)
elif isinstance(test, unittest.TestSuite):
visitor.visitSuite(test)
visitTests(test, visitor)
else:
print "unvisitable non-unittest.TestCase element %r (%r)" % (test, test.__class__)
class TestSuite(unittest.TestSuite):
"""I am an extended TestSuite with a visitor interface.
This is primarily to allow filtering of tests - and suites or
more in the future. An iterator of just tests wouldn't scale..."""
def visit(self, visitor):
"""visit the composite. Visiting is depth-first.
current callbacks are visitSuite and visitCase."""
visitor.visitSuite(self)
visitTests(self, visitor)
class TestLoader(unittest.TestLoader):
"""Custome TestLoader to set the right TestSuite class."""
suiteClass = TestSuite
class TestVisitor(object):
"""A visitor for Tests"""
def visitSuite(self, aTestSuite):
pass
def visitCase(self, aTestCase):
pass
|
zarboz/XBMC-PVR-mac
|
tools/darwin/depends/samba/samba-3.6.6/lib/subunit/python/subunit/tests/TestUtil.py
|
Python
|
gpl-2.0
| 2,788
|
[
"VisIt"
] |
489973e9b5caee4063426bb2b5e53d76dbffa8568d7ae2286536fa771adbda88
|
"""Test check utilities."""
# Authors: MNE Developers
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD-3-Clause
import os
import os.path as op
import sys
import numpy as np
import pytest
from pathlib import Path
import mne
from mne import read_vectorview_selection
from mne.datasets import testing
from mne.io.pick import pick_channels_cov, _picks_to_idx
from mne.utils import (check_random_state, _check_fname, check_fname, _suggest,
_check_subject, _check_info_inv, _check_option, Bunch,
check_version, _path_like, _validate_type, _on_missing,
requires_nibabel, _safe_input, _check_ch_locs)
data_path = testing.data_path(download=False)
base_dir = op.join(data_path, 'MEG', 'sample')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_event = op.join(base_dir, 'sample_audvis_trunc_raw-eve.fif')
fname_fwd = op.join(base_dir, 'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_mgz = op.join(data_path, 'subjects', 'sample', 'mri', 'aseg.mgz')
reject = dict(grad=4000e-13, mag=4e-12)
@testing.requires_testing_data
def test_check(tmp_path):
"""Test checking functions."""
pytest.raises(ValueError, check_random_state, 'foo')
pytest.raises(TypeError, _check_fname, 1)
_check_fname(Path('./foo'))
fname = tmp_path / 'foo'
with open(fname, 'wb'):
pass
assert op.isfile(fname)
_check_fname(fname, overwrite='read', must_exist=True)
orig_perms = os.stat(fname).st_mode
os.chmod(fname, 0)
if not sys.platform.startswith('win'):
with pytest.raises(PermissionError, match='read permissions'):
_check_fname(fname, overwrite='read', must_exist=True)
os.chmod(fname, orig_perms)
os.remove(fname)
assert not op.isfile(fname)
pytest.raises(IOError, check_fname, 'foo', 'tets-dip.x', (), ('.fif',))
pytest.raises(ValueError, _check_subject, None, None)
pytest.raises(TypeError, _check_subject, None, 1)
pytest.raises(TypeError, _check_subject, 1, None)
# smoke tests for permitted types
check_random_state(None).choice(1)
check_random_state(0).choice(1)
check_random_state(np.random.RandomState(0)).choice(1)
if check_version('numpy', '1.17'):
check_random_state(np.random.default_rng(0)).choice(1)
@testing.requires_testing_data
@pytest.mark.parametrize('suffix',
('_meg.fif', '_eeg.fif', '_ieeg.fif',
'_meg.fif.gz', '_eeg.fif.gz', '_ieeg.fif.gz'))
def test_check_fname_suffixes(suffix, tmp_path):
"""Test checking for valid filename suffixes."""
new_fname = tmp_path / op.basename(fname_raw).replace('_raw.fif', suffix)
raw = mne.io.read_raw_fif(fname_raw).crop(0, 0.1)
raw.save(new_fname)
mne.io.read_raw_fif(new_fname)
def _get_data():
"""Read in data used in tests."""
# read forward model
forward = mne.read_forward_solution(fname_fwd)
# read data
raw = mne.io.read_raw_fif(fname_raw, preload=True)
events = mne.read_events(fname_event)
event_id, tmin, tmax = 1, -0.1, 0.15
# decimate for speed
left_temporal_channels = read_vectorview_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True,
selection=left_temporal_channels)
picks = picks[::2]
raw.pick_channels([raw.ch_names[ii] for ii in picks])
del picks
raw.info.normalize_proj() # avoid projection warnings
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
baseline=(None, 0.), preload=True, reject=reject)
noise_cov = mne.compute_covariance(epochs, tmin=None, tmax=0.)
data_cov = mne.compute_covariance(epochs, tmin=0.01, tmax=0.15)
return epochs, data_cov, noise_cov, forward
@testing.requires_testing_data
def test_check_info_inv():
"""Test checks for common channels across fwd model and cov matrices."""
epochs, data_cov, noise_cov, forward = _get_data()
# make sure same channel lists exist in data to make testing life easier
assert epochs.info['ch_names'] == data_cov.ch_names
assert epochs.info['ch_names'] == noise_cov.ch_names
# check whether bad channels get excluded from the channel selection
# info
info_bads = epochs.info.copy()
info_bads['bads'] = info_bads['ch_names'][1:3] # include two bad channels
picks = _check_info_inv(info_bads, forward, noise_cov=noise_cov)
assert [1, 2] not in picks
# covariance matrix
data_cov_bads = data_cov.copy()
data_cov_bads['bads'] = data_cov_bads.ch_names[0]
picks = _check_info_inv(epochs.info, forward, data_cov=data_cov_bads)
assert 0 not in picks
# noise covariance matrix
noise_cov_bads = noise_cov.copy()
noise_cov_bads['bads'] = noise_cov_bads.ch_names[1]
picks = _check_info_inv(epochs.info, forward, noise_cov=noise_cov_bads)
assert 1 not in picks
# test whether reference channels get deleted
info_ref = epochs.info.copy()
info_ref['chs'][0]['kind'] = 301 # pretend to have a ref channel
picks = _check_info_inv(info_ref, forward, noise_cov=noise_cov)
assert 0 not in picks
# pick channels in all inputs and make sure common set is returned
epochs.pick_channels([epochs.ch_names[ii] for ii in range(10)])
data_cov = pick_channels_cov(data_cov, include=[data_cov.ch_names[ii]
for ii in range(5, 20)])
noise_cov = pick_channels_cov(noise_cov, include=[noise_cov.ch_names[ii]
for ii in range(7, 12)])
picks = _check_info_inv(epochs.info, forward, noise_cov=noise_cov,
data_cov=data_cov)
assert list(range(7, 10)) == picks
def test_check_option():
"""Test checking the value of a parameter against a list of options."""
allowed_values = ['valid', 'good', 'ok']
# Value is allowed
assert _check_option('option', 'valid', allowed_values)
assert _check_option('option', 'good', allowed_values)
assert _check_option('option', 'ok', allowed_values)
assert _check_option('option', 'valid', ['valid'])
# Check error message for invalid value
msg = ("Invalid value for the 'option' parameter. Allowed values are "
"'valid', 'good', and 'ok', but got 'bad' instead.")
with pytest.raises(ValueError, match=msg):
assert _check_option('option', 'bad', allowed_values)
# Special error message if only one value is allowed
msg = ("Invalid value for the 'option' parameter. The only allowed value "
"is 'valid', but got 'bad' instead.")
with pytest.raises(ValueError, match=msg):
assert _check_option('option', 'bad', ['valid'])
def test_path_like():
"""Test _path_like()."""
str_path = str(base_dir)
pathlib_path = Path(base_dir)
no_path = dict(foo='bar')
assert _path_like(str_path) is True
assert _path_like(pathlib_path) is True
assert _path_like(no_path) is False
def test_validate_type():
"""Test _validate_type."""
_validate_type(1, 'int-like')
with pytest.raises(TypeError, match='int-like'):
_validate_type(False, 'int-like')
@requires_nibabel()
@testing.requires_testing_data
def test_suggest():
"""Test suggestions."""
names = mne.get_volume_labels_from_aseg(fname_mgz)
sug = _suggest('', names)
assert sug == '' # nothing
sug = _suggest('Left-cerebellum', names)
assert sug == " Did you mean 'Left-Cerebellum-Cortex'?"
sug = _suggest('Cerebellum-Cortex', names)
assert sug == " Did you mean one of ['Left-Cerebellum-Cortex', 'Right-Cerebellum-Cortex', 'Left-Cerebral-Cortex']?" # noqa: E501
def test_on_missing():
"""Test _on_missing."""
msg = 'test'
with pytest.raises(ValueError, match=msg):
_on_missing('raise', msg)
with pytest.warns(RuntimeWarning, match=msg):
_on_missing('warn', msg)
_on_missing('ignore', msg)
with pytest.raises(ValueError,
match='Invalid value for the \'on_missing\' parameter'):
_on_missing('foo', msg)
def _matlab_input(msg):
raise EOFError()
def test_safe_input(monkeypatch):
"""Test _safe_input."""
monkeypatch.setattr(mne.utils.check, 'input', _matlab_input)
with pytest.raises(RuntimeError, match='Could not use input'):
_safe_input('whatever', alt='nothing')
assert _safe_input('whatever', use='nothing') == 'nothing'
@testing.requires_testing_data
def test_check_ch_locs():
"""Test _check_ch_locs behavior."""
info = mne.io.read_info(fname_raw)
assert _check_ch_locs(info=info)
for picks in ([0], [0, 1], None):
assert _check_ch_locs(info=info, picks=picks)
for ch_type in ('meg', 'mag', 'grad', 'eeg'):
assert _check_ch_locs(info=info, ch_type=ch_type)
# drop locations for EEG
picks_eeg = _picks_to_idx(info=info, picks='eeg')
for idx in picks_eeg:
info['chs'][idx]['loc'][:3] = np.nan
# EEG tests should fail now
assert _check_ch_locs(info=info, picks=picks_eeg) is False
assert _check_ch_locs(info=info, ch_type='eeg') is False
# tests for other (and "all") channels should still pass
assert _check_ch_locs(info=info)
assert _check_ch_locs(info=info, ch_type='mag')
# Check a bunch of version schemes as of 2022/03/01
# We don't have to get this 100% generalized, but it would be nice if all
# of these worked.
@pytest.mark.parametrize('version, want, have_unstripped', [
# test some dev cases
('1.23.0.dev0+782.g1168868df6', '1.23', False), # NumPy
('1.9.0.dev0+1485.b06254e', '1.9', False), # SciPy
('3.6.0.dev1651+g30d6161406', '3.6', False), # matplotlib
('1.1.dev0', '1.1', False), # sklearn
('0.56.0dev0+39.gef1ba4c10', '0.56', False), # numba
('9.1.0.rc1', '9.1', False), # VTK
('0.3dev0', '0.3', False), # mne-connectivity
('0.2.2.dev0', '0.2.2', False), # mne-qt-browser
('3.2.2+150.g1e93bd5d', '3.2.2', True), # nibabel
# test some stable cases
('1.2.3', '1.2.3', True),
('1.2', '1.2', True),
('1', '1', True),
])
def test_strip_dev(version, want, have_unstripped, monkeypatch):
"""Test that stripping dev works."""
monkeypatch.setattr(
mne.utils.check, 'import_module',
lambda x: Bunch(__version__=version))
got_have_unstripped, same_version = check_version(
version, want, strip=False, return_version=True)
assert same_version == version
assert got_have_unstripped is have_unstripped
have, simpler_version = check_version(
'foo', want, return_version=True) # strip=True is the default
assert have, (simpler_version, version)
def looks_stable(version):
try:
[int(x) for x in version.split('.')]
except ValueError:
return False
else:
return True
if looks_stable(version):
assert 'dev' not in version
assert 'rc' not in version
assert simpler_version == version
else:
assert simpler_version != version
assert 'dev' not in simpler_version
assert 'rc' not in simpler_version
assert not simpler_version.endswith('.')
assert looks_stable(simpler_version)
|
mne-tools/mne-python
|
mne/utils/tests/test_check.py
|
Python
|
bsd-3-clause
| 11,295
|
[
"VTK"
] |
65b7bb2d7c00ae200f5858bde7f0edfd7f97e5837fb1b8d260846c95cd2360b2
|
"""
Set of routines to interface with MULTI (1D or _3D)
"""
import numpy as np
import os
class Multi_3dOut:
def __init__(self, outfile=None, basedir='.', atmosid='', length=4,
verbose=False, readall=False):
""" Class that reads and deals with output from multi_3d """
self.verbose = verbose
out3dfiles = ['cmass3d', 'dscal2', 'height3d', 'Iv3d', 'taulg3d',
'x3d', 'xnorm3d']
c3dfiles = ['n3d', 'b3d']
if outfile is None:
outfile = '%s/out3d.%s' % (basedir, atmosid)
else:
basedir = os.path.split(outfile)[0]
atmosid = os.path.split(mm)[1].split('out3d.')[1]
out3dfiles = ['%s/%s.%s' % (basedir, s, atmosid) for s in out3dfiles]
c3dfiles = ['%s/%s.%s' % (basedir, s, atmosid) for s in c3dfiles]
self.read_out3d(outfile, length=length)
# read all output files
if readall:
for f in out3dfiles:
if os.path.isfile(f):
self.read_out3d(f, length=length)
for f in c3dfiles:
if os.path.isfile(f):
self.read_c3d(f, length=length,
mode=(os.path.split(f)[1].split('.' +
atmosid)[0]))
return
def check_basic(self):
"""
Checks to see if basic input parameters have been read from out3d.
"""
basic = ['nx', 'ny', 'ndep', 'mq', 'nrad', 'nqtot']
for p in basic:
if p not in dir(self):
raise ValueError('(EEE) %s has not been read. Make sure '
'out3d was read.' % p)
return
def read_out3d(self, outfile, length=4):
""" Reads out3d file. """
from ..io.fio import fort_read
# find out endianness
test = np.fromfile(outfile, dtype='<i', count=1)[0]
be = False if test == 16 else True
file = open(outfile, 'r')
readon = True
arrays_xyz = ['taulg3d', 'cmass3d', 'dscal2', 'xnorm3d', 'x3d',
'height3d']
while readon:
try:
itype, isize, cname = fort_read(file, 0, ['i', 'i', '8c'],
big_endian=be, length=length)
cname = cname.strip()
if self.verbose:
print(('--- reading ' + cname))
if cname == 'id':
self.id = fort_read(file, 0, ['80c'])[0].strip()
elif cname == 'dim':
aa = fort_read(file, isize, 'i', big_endian=be,
length=length)
self.nx, self.ny, self.ndep, self.mq, self.nrad = aa[:5]
if isize == 5:
self.version = 1
else:
self.version = 2
self.nq = aa[5:]
self.nqtot = np.sum(self.nq) + self.nrad
self.nxyz = self.nx * self.ny * self.ndep
elif cname == 'q':
self.check_basic()
aa = fort_read(file, self.mq * self.nrad, 'f',
big_endian=be, length=length)
self.q = np.transpose(aa.reshape(self.nrad, self.mq))
elif cname == 'xl':
self.check_basic()
self.xl = fort_read(file, self.nqtot, 'd', big_endian=be,
length=length)
elif cname in arrays_xyz:
self.check_basic()
aa = fort_read(file, self.nxyz, 'f', big_endian=be,
length=length)
setattr(self, cname,
np.transpose(aa.reshape(self.ndep, self.ny,
self.nx)))
elif cname == 'Iv':
self.check_basic()
aa = fort_read(file, isize, 'f', big_endian=be,
length=length)
self.Iv = np.transpose(aa.reshape(self.ny, self.nx,
self.nqtot))
elif cname == 'n3d': # might be brokenp...
self.check_basic()
self.nk = isize // (self.nx * self.ny * self.ndep)
aa = fort_read(file, isize, 'f', big_endian=be,
length=length)
self.n3d = np.transpose(aa.reshape(self.nk, self.ndep,
self.ny, self.nx))
elif cname == 'nk':
self.nk = fort_read(file, 1, 'i', big_endian=be,
length=length)[0]
else:
print(('(WWW) read_out3d: unknown label found: %s. '
'Aborting.' % cname))
break
except EOFError:
readon = False
if self.verbose:
print(('--- Read %s.' % outfile))
return
def read_c3d(self, outfile, length=4, mode='n3d'):
''' Reads the 3D cube output file, like n3d or b3d. '''
self.check_basic()
self.nk = os.path.getsize(outfile) // (self.nxyz * 4)
setattr(self, mode, np.memmap(outfile, dtype='Float32', mode='r',
order='F', shape=(self.nx, self.ny,
self.ndep, self.nk)))
if self.verbose:
print('--- Read ' + outfile)
return
class Atmos3d:
def __init__(self, infile, big_endian=False):
''' Reads multi_3d/old multi3d atmos3d file '''
self.big_endian = big_endian
self.read(infile, big_endian=big_endian)
return
def read(self, infile, big_endian, length=4):
from ..io.fio import fort_read
file = open(infile, 'r')
types = {4: 'f', 5: 'd'} # precision, float or double
# read header stuff
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
nx, ny, nz = fort_read(file, 3, 'i', big_endian=big_endian,
length=length)
self.nx = nx
self.ny = ny
self.nz = nz
# x [cm]
itype, isize, lx1, lx2 = fort_read(file, 4, 'i', big_endian=big_endian,
length=length)
prec = types[itype]
self.x = fort_read(file, nx, prec, big_endian=big_endian,
length=length)
# y [cm]
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
self.y = fort_read(file, ny, prec, big_endian=big_endian,
length=length)
# z [cm]
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
self.z = fort_read(file, nz, prec, big_endian=big_endian,
length=length)
# electron density [cm-3]
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
aa = fort_read(file, nx * ny * nz, prec, big_endian=big_endian,
length=length)
self.ne = np.transpose(aa.reshape((nz, ny, nx)))
# temperature [K]
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
aa = fort_read(file, nx * ny * nz, prec, big_endian=big_endian,
length=length)
self.temp = np.transpose(aa.reshape((nz, ny, nx)))
# vx [km/s]
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
aa = fort_read(file, nx * ny * nz, prec, big_endian=big_endian,
length=length)
self.vx = np.transpose(aa.reshape((nz, ny, nx)))
# vy [km/s]
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
aa = fort_read(file, nx * ny * nz, prec, big_endian=big_endian,
length=length)
self.vy = np.transpose(aa.reshape((nz, ny, nx)))
# vz [km/s]
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
aa = fort_read(file, nx * ny * nz, prec, big_endian=big_endian,
length=length)
self.vz = np.transpose(aa.reshape((nz, ny, nx)))
# reading rho, if written to file
last = fort_read(file, 16, 'b', big_endian=big_endian, length=length)
if len(last) != 0:
# rho [g cm-3]
aa = fort_read(file, nx * ny * nz, prec, big_endian=big_endian,
length=length)
self.rho = np.transpose(aa.reshape((nz, ny, nx)))
file.close()
return
def write_rh15d(self, outfile, sx=None, sy=None, sz=None, desc=None):
''' Writes atmos into rh15d NetCDF format. '''
from . import rh15d
if not hasattr(self, 'rho'):
raise UnboundLocalError('(EEE) write_rh15d: present atmosphere has'
'no rho, cannot convert to rh15d format')
# slicing and unit conversion
if sx is None:
sx = [0, self.nx, 1]
if sy is None:
sy = [0, self.ny, 1]
if sz is None:
sz = [0, self.nz, 1]
temp = self.temp[sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]]
rho = self.rho[sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2], sz[0]:sz[1]:sz[2]]
ne = self.ne[sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]] * 1.e6
vz = self.vz[sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]] * 1.e3
z = self.z[sz[0]:sz[1]:sz[2]] * 1e-2
nh = rho / 2.380491e-24 * 1.e6 # from rho to nH in m^-3
# write to file
rh15d.make_ncdf_atmos(outfile, temp, vz, ne, nh, z, append=False,
desc=desc, snap=0)
return
def watmos_multi(filename, temp, ne, z=None, logtau=None, vz=None, vturb=None,
cmass=None, nh=None, id='Model', scale='height', logg=4.44,
write_dscale=False, spherical=False, radius=6.96e5):
"""
Writes atmosphere in MULTI format, either HEIGHT scale or TAU(5000) scale.
Does NOT write dscale file.
The following units must be used:
* Temp [K]
* ne [cm^-3]
* nh [cm^-3]
* vz [km/s]
* vturb [km/s]
* z [km]
* cmass [gm cm^-2] (optional)
--Tiago, 20101118
"""
if scale.lower() == 'height':
if z is None:
raise ValueError('watmos_multi: height scale selected '
'but z not given!')
scl = z
desc = 'HEIGHT (KM)'
elif scale.lower() == 'tau':
if not logtau:
raise ValueError('watmos_multi: tau scale selected but '
'tau not given!')
scl = logtau
desc = 'LG TAU(5000)'
elif scale.lower() == 'mass':
if cmass is None:
raise ValueError('watmos_multi: mass scale selected but '
'column mass not given!')
scl = cmass
desc = 'LOG COLUMN MASS'
f = open(filename, 'w')
ndep = len(temp)
# write 'header'
f.write(' {0}\n*\n'.format(id))
f.write(' {0} scale\n'.format(scale).upper())
f.write('* LG G\n')
f.write('{0:6.2f}\n'.format(logg))
if spherical:
f.write('* Nradius Ncore Ninter\n')
f.write('{0:5d} 8 0\n'.format(ndep))
else:
f.write('* NDEP\n')
f.write('{0:5d}\n'.format(ndep))
f.write('* {0} TEMPERATURE NE V '
'VTURB\n'.format(desc))
if vz is None:
vz = np.zeros(ndep, dtype='f')
if not vturb:
vturb = np.zeros(ndep, dtype='f')
elif type(vturb) == type(5): # constant vturb
vturb = np.zeros(ndep, dtype='f') + vturb
# write atmosphere
for i in range(ndep):
# astype hack to get over numpy bug
f.write('{0:15.6E}{1:15.6E}{2:15.6E}{3:15.6E}{4:15.6E}'
'\n'.format(scl[i].astype('d'), temp[i].astype('d'),
ne[i].astype('d'), vz[i].astype('d'),
vturb[i].astype('d')))
# if nh given
if nh is not None:
if nh.shape != (6, ndep):
raise ValueError('watmos_multi: nh has incorrect shape. Must be '
'6 H levels!')
f.write('*\n* Hydrogen populations\n')
f.write('* nh(1) nh(2) nh(3) nh(4) nh(5) '
'np\n')
for i in range(ndep):
ss = ''
for j in range(nh.shape[0]):
ss += '{0:12.4E}'.format(nh[j, i].astype('d'))
f.write(ss + '\n')
f.close()
print('--- Wrote multi atmosphere to ' + filename)
if write_dscale:
f = open(filename + '.dscale', 'w')
f.write(' {0}\n*\n'.format(id))
f.write(' {0} scale\n'.format(scale).upper())
# setting the second element to zero will force it to be calculated
# in DPCONV. Will it work for height scale?
f.write('{0:5d} {1:.5f}\n'.format(ndep, 0.))
for i in range(ndep):
f.write('{0:15.6E}\n'.format(scl[i].astype('d')))
f.close()
print(('--- Wrote dscale to ' + filename + '.dscale'))
return
def write_atmos3d(outfile, x, y, z, ne, temp, vz, vx=None, vy=None, rho=None,
big_endian=False, length=4, prec='Float32'):
"""
Writes atmos3d atmosphere (format of 'old' multi3d and multi_3d).
vx and vy are optional, if not specified zeros will be used. rho is
also optional, if not specified it will not be written at the end of the
file.
Input 3D arrays (ne, temp, vx, vy, vz, rho) must be in C order!
(shape = [nx, ny, nz]) They will be written in Fortran order
(shape=[nz,ny,nx])
IN:
x, y, z [cm]: 1D arrays
ne [cm-3]: 3D array
temp [K]: 3D array
vz [km/s]: 3D array
vx, vy [km/s]: 3D arrays, optional
rho [g cm-3]: 3D array, optional
big_endian: Boolean, if true will write in big endian
length: length of fortran format pad. Should be 4 in most cases.
prec: precision (Float32 or Float64)
"""
import os
from ..io.fio import fort_write
if os.path.isfile(outfile):
raise IOError('(EEE) write_atmos3d: file %s already exists, refusing '
'to overwrite.' % outfile)
f = open(outfile, 'w')
# Tiago note: these should be fortran longs. However, in 64-bit systems the
# size of a long in python is 8 bytes, where fortran longs are
# still 4 bytes. Hence, it is better to keep all longs as ints,
# as sizeof(int) = 4
nx = len(x)
ny = len(y)
nz = len(z)
ii = 3
ir = 5 if prec in ['Float64', 'd'] else 4
ll = length
be = big_endian
if vx is None:
vx = np.zeros(vz.shape, dtype=prec)
if vy is None:
vy = np.zeros(vz.shape, dtype=prec)
fort_write(f, 0, [ii, 3, 'dim '], big_endian=be, length=ll)
fort_write(f, 0, [nx, ny, nz], big_endian=be, length=ll)
fort_write(f, 0, [ir, nx, 'x grid '], big_endian=be, length=ll)
fort_write(f, x.size, x.astype(prec), big_endian=be, length=ll)
fort_write(f, 0, [ir, nx, 'y grid '], big_endian=be, length=ll)
fort_write(f, y.size, y.astype(prec), big_endian=be, length=ll)
fort_write(f, 0, [ir, nx, 'z grid '], big_endian=be, length=ll)
fort_write(f, z.size, z.astype(prec), big_endian=be, length=ll)
fort_write(f, 0, [ir, nx, 'nne '], big_endian=be, length=ll)
fort_write(f, ne.size, np.transpose(ne).astype(prec), big_endian=be,
length=ll)
fort_write(f, 0, [ir, nx, 'temp '], big_endian=be, length=ll)
fort_write(f, temp.size, np.transpose(temp).astype(prec), big_endian=be,
length=ll)
fort_write(f, 0, [ir, nx, 'vel x '], big_endian=be, length=ll)
fort_write(f, vx.size, np.transpose(vx).astype(prec), big_endian=be,
length=ll)
fort_write(f, 0, [ir, nx, 'vel y '], big_endian=be, length=ll)
fort_write(f, vy.size, np.transpose(vy).astype(prec), big_endian=be,
length=ll)
fort_write(f, 0, [ir, nx, 'vel z '], big_endian=be, length=ll)
fort_write(f, vz.size, np.transpose(vz).astype(prec), big_endian=be,
length=ll)
if rho is not None:
fort_write(f, 0, [ir, nx, 'rho '], big_endian=be, length=ll)
fort_write(f, rho.size, np.transpose(rho).astype(prec), big_endian=be,
length=ll)
f.close()
print(('Wrote %s' % outfile))
return
|
M1kol4j/helita
|
helita/sim/multi.py
|
Python
|
bsd-3-clause
| 16,939
|
[
"NetCDF"
] |
b0984b34347a0158f7bdc31c7ad628d08d1bede27f6ce70cfdad3597616f14ab
|
"""Synthetic datasets for vector-field learning."""
from numpy import arange, sqrt, meshgrid, pi, exp, gradient, floor, hstack
# pylint: disable=C0103
def _gaussian(x_mesh, y_mesh, x_mesh_mean, y_mesh_mean, scale=1):
"""Generate a gaussian.
Parameters
----------
x_mesh : array, shape = [n_samples, n_samples]
The inputs mesh x_axis.
y_mesh : array, shape = [n_samples, n_samples]
The inputs mesh y_axis.
x_mesh_mean :
The x_axis center of the Gaussian.
y_mesh_mean :
The y_axis center of the Gaussian.
scale :
The scale parameter of the Gaussian. Must be positive.
Returns
-------
field : array, shape = [n_samples, n_samples]
A scalar field with a Gaussian with some scale centered at
x_mesh_mean, y_mesh_mean.
"""
x_mesh_centered = x_mesh - x_mesh_mean
y_mesh_centered = y_mesh - y_mesh_mean
return pi ** 2 * exp(- scale / 2 * (x_mesh_centered ** 2 +
y_mesh_centered ** 2)) / sqrt(scale)
def array2mesh(X, side=None):
"""Array to mesh converter.
Parameters
----------
X : array, shape = [n_samples, 2]
The inputs array.
Returns
-------
x_mesh : array, shape = [n_samples, n_samples]
The x_axis of the mesh corresponding to inputs.
y_mesh : array, shape = [n_samples, n_samples]
The y_axis of the mesh corresponding to inputs
"""
if side is None:
side = int(floor(sqrt(X.shape[0])))
x_mesh = X[:, 0].reshape((side, side))
y_mesh = X[:, 1].reshape((side, side))
return x_mesh, y_mesh
def mesh2array(x_mesh, y_mesh):
"""Mesh to array converter.
Parameters
----------
x_mesh : array, shape = [n_samples, n_samples]
The x_axis mesh.
y_mesh : array, shape = [n_samples, n_samples]
The y_axis mesh.
Returns
-------
inputs : array, shape = [n_samples, 2]
The inputs corresponding to the mesh (x_mesh, y_mesh).
"""
return hstack((x_mesh.ravel().reshape((-1, 1)),
y_mesh.ravel().reshape((-1, 1))))
def toy_data_curl_free_mesh(n_samples, loc=25., space=0.5):
"""Curl-Free toy dataset.
Generate a scalar field as mixture of five gaussians at location:
- (0 , 0)
- (0 , loc)
- ( loc, 0)
- (-loc, 0)
- (0 , -loc)
whith variance equal to 'space'. Then return the gradient of the field.
The return result is a pair meshes.
Parameters
----------
n_samples : int
Number of samples to generate.
loc: float, optional (default = 25.)
Centers of the Gaussians.
space: float, optional (default = .5)
Variance of the Gaussians.
Returns
-------
X, Y : :rtype: (array, array), shape = [n, n]
Mesh, X, Y coordinates.
U, V : :rtype: (array, array), shape = [n, n]
Mesh, (U, V) velocity at (X, Y) coordinates
See also
--------
operalib.toy_data_curl_free_field
Generate Curl-Free field.
operalib.toy_data_div_free_field
Generate Divergence-Free field.
"""
x_grid = arange(-1, 1, 2. / sqrt(n_samples))
y_grid = arange(-1, 1, 2. / sqrt(n_samples))
x_mesh, y_mesh = meshgrid(x_grid, y_grid)
field = (_gaussian(x_mesh, y_mesh, -space, 0, loc) +
_gaussian(x_mesh, y_mesh, space, 0, loc) -
_gaussian(x_mesh, y_mesh, 0, space, loc) -
_gaussian(x_mesh, y_mesh, 0, -space, loc))
v_mesh, u_mesh = gradient(field)
return (x_mesh, y_mesh), (u_mesh, v_mesh)
def toy_data_div_free_mesh(n_samples, loc=25., space=0.5):
"""Divergence-Free toy dataset.
Generate a scalar field as mixture of five gaussians at location:
- (0 , 0)
- (0 , loc)
- ( loc, 0)
- (-loc, 0)
- (0 , -loc)
whith variance equal to 'space'. Then return the orthogonal of gradient of
the field. The return result is a pair of meshes.
Parameters
----------
n_points : int
Number of samples to generate.
loc: float, optional (default = 25.)
Centers of the Gaussians.
space: float, optional (default = .5)
Variance of the Gaussians.
Returns
-------
X, Y : :rtype: (array, array), shape = [n, n]
Mesh, X, Y coordinates.
U, V : :rtype: (array, array), shape = [n, n]
Mesh, (U, V) velocity at (X, Y) coordinates
See also
--------
operalib.toy_data_curl_free_field
Generate Curl-Free field.
operalib.toy_data_div_free_field
Generate Divergence-Free field.
"""
x_grid = arange(-1, 1, 2. / sqrt(n_samples))
y_grid = arange(-1, 1, 2. / sqrt(n_samples))
x_mesh, y_mesh = meshgrid(x_grid, y_grid)
field = (_gaussian(x_mesh, y_mesh, -space, 0, loc) +
_gaussian(x_mesh, y_mesh, space, 0, loc) -
_gaussian(x_mesh, y_mesh, 0, space, loc) -
_gaussian(x_mesh, y_mesh, 0, -space, loc))
v_mesh, u_mesh = gradient(field)
return (x_mesh, y_mesh), (v_mesh, -u_mesh)
def toy_data_curl_free_field(n_samples, loc=25, space=0.5):
"""Curl-Free toy dataset.
Generate a scalar field as mixture of five gaussians at location:
- (0 , 0)
- (0 , loc)
- ( loc, 0)
- (-loc, 0)
- (0 , -loc)
whith variance equal to 'space'. Then return the gradient of the field.
The return result is a pair (inputs, targets) of arrays.
Parameters
----------
n_samples : int
Number of samples to generate.
loc: float, optional (default = 25.)
Centers of the Gaussians.
space: float, optional (default = .5)
Variance of the Gaussians.
Returns
-------
X : array, shape = [n_samples, 2]
Array of evenly space points.
y : array shape = [n_samples, 2]
Array corresponding to the velocity at the coordinates present in
inputs.
See also
--------
operalib.toy_data_curl_free_mesh
Generate Curl-Free mesh.
operalib.toy_data_div_free_mesh
Generate Divergence-Free mesh.
"""
(x_mesh, y_mesh), (u_mesh, v_mesh) = toy_data_curl_free_mesh(n_samples,
loc, space)
inputs = mesh2array(x_mesh, y_mesh)
targets = mesh2array(u_mesh, v_mesh)
return inputs, targets
def toy_data_div_free_field(n_samples, loc=25, space=0.5):
"""Divergence-Free toy dataset.
Generate a scalar field as mixture of five gaussians at location:
- (0 , 0)
- (0 , loc)
- ( loc, 0)
- (-loc, 0)
- (0 , -loc)
whith variance equal to 'space'. Then return the orthogonal of gradient of
the field. The return result is a pair (inputs, targets) of arrays.
Parameters
----------
n_points : int
Number of samples to generate.
loc: float, optional (default = 25.)
Centers of the Gaussians.
space: float, optional (default = .5)
Variance of the Gaussians.
Returns
-------
X : array, shape = [n_samples, 2]
Array of evenly space points.
y : array shape = [n_samples, 2]
Array corresponding to the velocity at the coordinates present in
inputs.
See also
--------
operalib.toy_data_div_free_mesh
Generate Curl-Free mesh.
operalib.toy_data_div_free_mesh
Generate Divergence-Free mesh.
"""
(x_mesh, y_mesh), (u_mesh, v_mesh) = toy_data_div_free_mesh(n_samples,
loc, space)
inputs = mesh2array(x_mesh, y_mesh)
targets = mesh2array(u_mesh, v_mesh)
return inputs, targets
|
operalib/operalib
|
operalib/datasets/vectorfield.py
|
Python
|
bsd-3-clause
| 7,798
|
[
"Gaussian"
] |
e3c1b1851215d59be586cffe7bc7e293419b76505da1a9154a458be8695356a3
|
# $HeadURL$
"""
Encoding and decoding for dirac, Ids:
i -> int
I -> long
f -> float
b -> bool
s -> string
z -> datetime
n -> none
l -> list
t -> tuple
d -> dictionary
"""
__RCSID__ = "$Id$"
import types
import datetime
_dateTimeObject = datetime.datetime.utcnow()
_dateTimeType = type( _dateTimeObject )
_dateType = type( _dateTimeObject.date() )
_timeType = type( _dateTimeObject.time() )
g_dEncodeFunctions = {}
g_dDecodeFunctions = {}
#Encoding and decoding ints
def encodeInt( iValue, eList ):
eList.extend( ( "i", str( iValue ), "e" ) )
def decodeInt( data, i ):
i += 1
end = data.index( 'e', i )
value = int( data[i:end] )
return ( value, end + 1 )
g_dEncodeFunctions[ types.IntType ] = encodeInt
g_dDecodeFunctions[ "i" ] = decodeInt
#Encoding and decoding longs
def encodeLong( iValue, eList ):
# corrected by KGG eList.extend( ( "l", str( iValue ), "e" ) )
eList.extend( ( "I", str( iValue ), "e" ) )
def decodeLong( data, i ):
i += 1
end = data.index( 'e', i )
value = long( data[i:end] )
return ( value, end + 1 )
g_dEncodeFunctions[ types.LongType ] = encodeLong
g_dDecodeFunctions[ "I" ] = decodeLong
#Encoding and decoding floats
def encodeFloat( iValue, eList ):
eList.extend( ( "f", str( iValue ), "e" ) )
def decodeFloat( data, i ):
i += 1
end = data.index( 'e', i )
if end + 1 < len( data ) and data[end + 1] in ( '+', '-' ):
eI = end
end = data.index( 'e', end + 1 )
value = float( data[i:eI] ) * 10 ** int( data[eI + 1:end] )
else:
value = float( data[i:end] )
return ( value, end + 1 )
g_dEncodeFunctions[ types.FloatType ] = encodeFloat
g_dDecodeFunctions[ "f" ] = decodeFloat
#Encoding and decoding booleand
def encodeBool( bValue, eList ):
if bValue:
eList.append( "b1" )
else:
eList.append( "b0" )
def decodeBool( data, i ):
if data[ i + 1 ] == "0":
return ( False, i + 2 )
else:
return ( True, i + 2 )
g_dEncodeFunctions[ types.BooleanType ] = encodeBool
g_dDecodeFunctions[ "b" ] = decodeBool
#Encoding and decoding strings
def encodeString( sValue, eList ):
eList.extend( ( 's', str( len( sValue ) ), ':', sValue ) )
def decodeString( data, i ):
i += 1
colon = data.index( ":", i )
value = int( data[ i : colon ] )
colon += 1
end = colon + value
return ( data[ colon : end] , end )
g_dEncodeFunctions[ types.StringType ] = encodeString
g_dDecodeFunctions[ "s" ] = decodeString
#Encoding and decoding unicode strings
def encodeUnicode( sValue, eList ):
valueStr = sValue.encode( 'utf-8' )
eList.extend( ( 'u', str( len( valueStr ) ), ':', valueStr ) )
def decodeUnicode( data, i ):
i += 1
colon = data.index( ":", i )
value = int( data[ i : colon ] )
colon += 1
end = colon + value
return ( unicode( data[ colon : end], 'utf-8' ) , end )
g_dEncodeFunctions[ types.UnicodeType ] = encodeUnicode
g_dDecodeFunctions[ "u" ] = decodeUnicode
#Encoding and decoding datetime
def encodeDateTime( oValue, eList ):
if type( oValue ) == _dateTimeType:
tDateTime = ( oValue.year, oValue.month, oValue.day, \
oValue.hour, oValue.minute, oValue.second, \
oValue.microsecond, oValue.tzinfo )
eList.append( "za" )
# corrected by KGG encode( tDateTime, eList )
g_dEncodeFunctions[ type( tDateTime ) ]( tDateTime, eList )
elif type( oValue ) == _dateType:
tData = ( oValue.year, oValue.month, oValue. day )
eList.append( "zd" )
# corrected by KGG encode( tData, eList )
g_dEncodeFunctions[ type( tData ) ]( tData, eList )
elif type( oValue ) == _timeType:
tTime = ( oValue.hour, oValue.minute, oValue.second, oValue.microsecond, oValue.tzinfo )
eList.append( "zt" )
# corrected by KGG encode( tTime, eList )
g_dEncodeFunctions[ type( tTime ) ]( tTime, eList )
else:
raise Exception( "Unexpected type %s while encoding a datetime object" % str( type( oValue ) ) )
def decodeDateTime( data, i ):
i += 1
dataType = data[i]
# corrected by KGG tupleObject, i = decode( data, i + 1 )
tupleObject, i = g_dDecodeFunctions[ data[ i + 1 ] ]( data, i + 1 )
if dataType == 'a':
dtObject = datetime.datetime( *tupleObject )
elif dataType == 'd':
dtObject = datetime.date( *tupleObject )
elif dataType == 't':
dtObject = datetime.time( *tupleObject )
else:
raise Exception( "Unexpected type %s while decoding a datetime object" % dataType )
return ( dtObject, i )
g_dEncodeFunctions[ _dateTimeType ] = encodeDateTime
g_dEncodeFunctions[ _dateType ] = encodeDateTime
g_dEncodeFunctions[ _timeType ] = encodeDateTime
g_dDecodeFunctions[ 'z' ] = decodeDateTime
#Encoding and decoding None
def encodeNone( oValue, eList ):
eList.append( "n" )
def decodeNone( data, i ):
return ( None, i + 1 )
g_dEncodeFunctions[ types.NoneType ] = encodeNone
g_dDecodeFunctions[ 'n' ] = decodeNone
#Encode and decode a list
def encodeList( lValue, eList ):
eList.append( "l" )
for uObject in lValue:
g_dEncodeFunctions[ type( uObject ) ]( uObject, eList )
eList.append( "e" )
def decodeList( data, i ):
oL = []
i += 1
while data[ i ] != "e":
ob, i = g_dDecodeFunctions[ data[ i ] ]( data, i )
oL.append( ob )
return( oL, i + 1 )
g_dEncodeFunctions[ types.ListType ] = encodeList
g_dDecodeFunctions[ "l" ] = decodeList
#Encode and decode a tuple
def encodeTuple( lValue, eList ):
eList.append( "t" )
for uObject in lValue:
g_dEncodeFunctions[ type( uObject ) ]( uObject, eList )
eList.append( "e" )
def decodeTuple( data, i ):
oL, i = decodeList( data, i )
return ( tuple( oL ), i )
g_dEncodeFunctions[ types.TupleType ] = encodeTuple
g_dDecodeFunctions[ "t" ] = decodeTuple
#Encode and decode a dictionary
def encodeDict( dValue, eList ):
eList.append( "d" )
for key in sorted( dValue ):
g_dEncodeFunctions[ type( key ) ]( key, eList )
g_dEncodeFunctions[ type( dValue[key] ) ]( dValue[key], eList )
eList.append( "e" )
def decodeDict( data, i ):
oD = {}
i += 1
while data[ i ] != "e":
k, i = g_dDecodeFunctions[ data[ i ] ]( data, i )
oD[ k ], i = g_dDecodeFunctions[ data[ i ] ]( data, i )
return ( oD, i + 1 )
g_dEncodeFunctions[ types.DictType ] = encodeDict
g_dDecodeFunctions[ "d" ] = decodeDict
#Encode function
def encode( uObject ):
try:
eList = []
#print "ENCODE FUNCTION : %s" % g_dEncodeFunctions[ type( uObject ) ]
g_dEncodeFunctions[ type( uObject ) ]( uObject, eList )
return "".join( eList )
except Exception:
raise
def decode( data ):
if not data:
return data
try:
#print "DECODE FUNCTION : %s" % g_dDecodeFunctions[ sStream [ iIndex ] ]
return g_dDecodeFunctions[ data[ 0 ] ]( data, 0 )
except Exception:
raise
if __name__ == "__main__":
gObject = {2:"3", True : ( 3, None ), 2.0 * 10 ** 20 : 2.0 * 10 ** -10 }
print "Initial: %s" % gObject
gData = encode( gObject )
print "Encoded: %s" % gData
print "Decoded: %s, [%s]" % decode( gData )
|
avedaee/DIRAC
|
Core/Utilities/DEncode.py
|
Python
|
gpl-3.0
| 6,959
|
[
"DIRAC"
] |
e5074eaefe86ab15a87e23d9e081467e9725b9c63f5f0ae330017c847e8e195e
|
"""
Genetics Module of the CAB Sugarscape simulation.
Encapsulates all aspects of the agent genetics.
Credit to David Grotzky.
"""
__author__ = 'Michael Wagner'
__version__ = '1.0'
from cab.util.rng import get_RNG
# TODO: Implement proper immune system.
class Chromosome:
"""
This class handles all biological aspects of an agent.
"""
def __init__(self, dna):
"""
Standard initializer.
:return:
"""
self.genomes = dna[0:2]
self.culture = dna[2]
self.immune_system = dna[3]
my_generation = max(dna[4][0], dna[4][1]) + 1
self.generation = (dna[4][0], dna[4][1], my_generation)
self.meta_sugar = None
self.meta_spice = None
self.init_sugar = None
self.init_spice = None
self.vision = None
self.gender = None
self.fertility = None
self.dying_age = None
self.dna_color = None
# Read dictionary entries as:
# ----> {attribute: (start index, end index)}
# TODO: Shift this map into GlobalConstants and automatically generate genome lengths from the given constants.
self.att_map = {'meta_sugar': (0, 3),
'meta_spice': (3, 6),
'init_sugar': (6, 12),
'init_spice': (12, 18),
'vision': (18, 21),
'gender': (21, 22),
'fertility_1': (22, 28),
'fertility_2': (28, 34),
'dying_age': (34, 41)}
self.map_genome_to_attributes()
def map_genome_to_attributes(self):
"""
Decodes the genome and creates the attribute of the individual.
"""
# The meta and init attributes cannot become smaller than 1,
# even though that is possible by the encoding. We have to avoid that.
meta_sugar = Chromosome.choose_dominant_gene(self.get_genome_substring('meta_sugar'))
meta_spice = Chromosome.choose_dominant_gene(self.get_genome_substring('meta_spice'))
init_sugar = Chromosome.choose_dominant_gene(self.get_genome_substring('init_sugar'))
init_spice = Chromosome.choose_dominant_gene(self.get_genome_substring('init_spice'))
vision = Chromosome.choose_dominant_gene(self.get_genome_substring('vision'))
gender = get_RNG().choice(self.get_genome_substring('gender'))
f1 = Chromosome.choose_dominant_gene(self.get_genome_substring('fertility_1'))
f2 = Chromosome.choose_dominant_gene(self.get_genome_substring('fertility_2'))
dying_age = Chromosome.choose_dominant_gene(self.get_genome_substring('dying_age'))
self.meta_sugar = max(int(meta_sugar, 2), 1)
self.meta_spice = max(int(meta_spice, 2), 1)
self.init_sugar = max(int(init_sugar, 2), 1)
self.init_spice = max(int(init_spice, 2), 1)
self.vision = int(vision, 2)
self.gender = int(gender, 2)
self.dying_age = int(dying_age, 2)
self.fertility = (int(f1, 2), int(f2, 2))
dna = "".join((meta_sugar, meta_spice, init_sugar, init_spice, vision, gender, f1, f2, dying_age))
self.dna_color = Chromosome.convert_to_color(dna)
def get_genome_substring(self, key):
"""
Retrieves the partitions of both genes.
:param key: The key of the partition entries' location in the dictionary
:return: Two sub-strings of the genomes
"""
indices = self.att_map[key]
start = indices[0]
end = indices[1]
return self.genomes[0][start: end], self.genomes[1][start: end]
@staticmethod
def choose_dominant_gene(strings):
"""
Takes two gene strings and returns the dominant one,
or random if both are dominant/ recessive
:param strings: Two sub-genes of the chromosome
:return: The more dominant/ luckier string of both.
"""
# How do we determine dominance?
# For now just by looking whether there is an even number of 'ones' in it.
dominant0 = strings[0].count('1') % 2 == 0
dominant1 = strings[1].count('1') % 2 == 0
if (dominant0 and dominant1) or (not (dominant0 or dominant1)):
return get_RNG().choice([strings[0], strings[1]])
elif dominant1:
return strings[0]
else:
return strings[1]
def merge_with(self, mate_chromosome):
"""
Takes the chromosome from the mate, performs
all necessary crossovers and returns the resulting DNA
:param mate_chromosome:
:return: The child's chromosome.
"""
# Concept: divide genome in partitions of varying length.
# Exchange those parts between mother and father gametes?
genome1 = Chromosome.create_gamete(self.genomes)
genome2 = Chromosome.create_gamete(mate_chromosome.genomes)
culture = Chromosome.create_gamete((self.culture, mate_chromosome.culture))
immune_sys = Chromosome.create_gamete((self.immune_system, mate_chromosome.immune_system))
# Create a string out of the gene strings
genome1 = "".join(map(str, genome1))
genome2 = "".join(map(str, genome2))
# Order the generation tuple for better overview: (mom, dad)
if self.gender == 1:
generation = (self.generation[2], mate_chromosome.generation[2])
else:
generation = (mate_chromosome.generation[2], self.generation[2])
return [genome1, genome2, culture, immune_sys, generation]
@staticmethod
def create_gamete(genomes):
"""
Creates and returns a gamete that consists of parts of
both genomes in this chromosome.
:return: Gamete in form of a single bitstring.
"""
# 1) Generate a random number (gaussian distributed) of
# random indices which are then used to split the genes at the respective points.
genome_size = len(genomes[0])
num_partitions = int(get_RNG().triangular(0, genome_size / 2, genome_size))
partitions = get_RNG().sample(range(genome_size), num_partitions)
partitions.sort() # Now we have all our indices, and sorted.
partitions.append(genome_size) # Append the end of the string
start = 0
gamete = []
for p in partitions:
i = get_RNG().choice([0, 1])
gamete.extend(genomes[i][start:p])
start = p
# 'gamete' is now a list of integers. Convert the ints to strings and join 'em all together.
return gamete
def mutate(self):
"""
Has a chance of 0.5% to perform a random mutation in the dna,
and a chance of 1% to flip a few bits in the cultural dna.
:return:
"""
# Flip bit in genome
if get_RNG().random() < 0.005:
length = len(self.genomes)
index = get_RNG().randrange(length)
l = list(self.genomes[0])
l[index] = Chromosome.invert_bit(l[index])
g1 = "".join(l)
index = get_RNG().randrange(length)
l = list(self.genomes[1])
l[index] = Chromosome.invert_bit(l[index])
g2 = "".join(l)
self.genomes = (g1, g2)
# Flip a bit in culture
if get_RNG().random() < 0.01:
length = len(self.culture)
num_bits_changed = int(get_RNG().triangular(0, 1, length))
index = get_RNG().sample(range(length), num_bits_changed)
for i in index:
self.culture[i] = 1 - self.culture[i]
@staticmethod
def invert_bit(bit):
"""
Takes the bit as a string and inverts it.
:param bit:
:return: Inverted bit
"""
if bit == "0":
return "1"
else:
return "0"
# This method makes sense only for Lamarckian Evolution!
# def map_attributes_to_genome(self, attributes):
# return
@staticmethod
def convert_to_color(dna):
# l = len(dna)
# l1 = int(l / 3)
# l2 = 2 * l1
r_string = dna[0::3] # dna[0:l1]
g_string = dna[1::3] # dna[l1:l2]
b_string = dna[2::3] # dna[l2:]
r_num = int(r_string, 2)
g_num = int(g_string, 2)
b_num = int(b_string, 2)
r = int((r_num / (2 ** len(r_string))) * 25) * 10
g = int((g_num / (2 ** len(g_string))) * 25) * 10
b = int((b_num / (2 ** len(b_string))) * 25) * 10
return r, g, b
|
Micutio/CAB_Simulations
|
SugarScape/abm/ss_genetics.py
|
Python
|
mit
| 8,545
|
[
"Gaussian"
] |
1cb8b74c24899bd30dfa8626ba7899ff7197edcc2c6110811f41714a538a10c5
|
# $Id$
#
# Copyright (C) 2006 Greg Landrum
#
import unittest, os, sys
from rdkit.six.moves import cPickle
from rdkit import RDConfig
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import MolCatalog
class TestCase(unittest.TestCase):
def test1(self):
cat = MolCatalog.CreateMolCatalog()
es = []
for smi in ('C1CCC1OC', 'C1CCC1', 'C'):
m = Chem.MolFromSmiles(smi)
entry = MolCatalog.MolCatalogEntry()
entry.SetMol(m)
self.assertTrue(entry.GetMol())
eSmi = Chem.MolToSmiles(entry.GetMol())
self.assertTrue(eSmi == Chem.MolToSmiles(m))
entry.SetDescription(smi)
self.assertTrue(entry.GetDescription() == smi)
es.append(entry)
v = cat.AddEntry(es[0])
self.assertTrue(v == 0)
self.assertTrue(cat.GetNumEntries() == 1)
v = cat.AddEntry(es[1])
self.assertTrue(v == 1)
self.assertTrue(cat.GetNumEntries() == 2)
v = cat.AddEntry(es[2])
self.assertTrue(v == 2)
self.assertTrue(cat.GetNumEntries() == 3)
cat.AddEdge(0, 1)
cat.AddEdge(0, 2)
cat.AddEdge(1, 2)
d = cPickle.dumps(cat)
es = None
entry = None
cat = None
cat = cPickle.loads(d)
self.assertTrue(cat.GetNumEntries() == 3)
cat = None
if __name__ == '__main__':
unittest.main()
|
jandom/rdkit
|
Code/GraphMol/MolCatalog/Wrap/rough_test.py
|
Python
|
bsd-3-clause
| 1,303
|
[
"RDKit"
] |
3a92f260b8ffd2cea2792490cfc51c99620acb769bf46c74398713b8ddf9e3d3
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database of radical stabilization energies.
| Geometries from [E. Soydas and U. Bozkaya, JCTC, 9, 1452-1460 (2013)].
| Reference radical stabilization energies from [E. Soydas and U. Bozkaya, JCTC, 9, 1452-1460 (2013)] at CCSD(T)/cc-pCVTZ level.
- **benchmark**
- ``'RSE42'`` [E. Soydas and U. Bozkaya, JCTC, 9, 1452-1460 (2013)].
- |dl| ``'RSE42'`` |dr| [E. Soydas and U. Bozkaya, JCTC, 9, 1452-1460 (2013)].
- **subset**
- ``'small'`` <members_description>
- ``'large'`` <members_description>
- ``'RSE30'`` smaller systems in RSE42
- ``'<subset>'`` <members_description>
"""
import re
import qcdb
# <<< RSE42 Database Module >>>
dbse = 'RSE42'
isOS = 'True'
# <<< Database Members >>>
HRXN = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42']
HRXN_SM = []
HRXN_LG = []
RSE30 = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30']
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV['%s-%s' % (dbse, '1' )] = ['%s-%s-reagent' % (dbse, 'ch3no2'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2no2'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '1' )] = dict(zip(ACTV['%s-%s' % (dbse, '1')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '2' )] = ['%s-%s-reagent' % (dbse, 'ch3ocho'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2ocho'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '2' )] = dict(zip(ACTV['%s-%s' % (dbse, '2')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '3' )] = ['%s-%s-reagent' % (dbse, 'ch3sch3'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2sch3'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '3' )] = dict(zip(ACTV['%s-%s' % (dbse, '3')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '4' )] = ['%s-%s-reagent' % (dbse, 'cfhch2'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'cfch2'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '4' )] = dict(zip(ACTV['%s-%s' % (dbse, '4')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '5' )] = ['%s-%s-reagent' % (dbse, 'ch3ch2f'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2ch2f'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '5' )] = dict(zip(ACTV['%s-%s' % (dbse, '5')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '6' )] = ['%s-%s-reagent' % (dbse, 'ch3cho'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2cho'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '6' )] = dict(zip(ACTV['%s-%s' % (dbse, '6')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '7' )] = ['%s-%s-reagent' % (dbse, 'ch3cn'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2cn'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '7' )] = dict(zip(ACTV['%s-%s' % (dbse, '7')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '8' )] = ['%s-%s-reagent' % (dbse, 'ch3f'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2f'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '8' )] = dict(zip(ACTV['%s-%s' % (dbse, '8')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '9' )] = ['%s-%s-reagent' % (dbse, 'ch3nh2'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2nh2'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '9' )] = dict(zip(ACTV['%s-%s' % (dbse, '9')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '10' )] = ['%s-%s-reagent' % (dbse, 'ch3nh3'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2nh3'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '10' )] = dict(zip(ACTV['%s-%s' % (dbse, '10')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '11' )] = ['%s-%s-reagent' % (dbse, 'ch3nhoh'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2nhoh'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '11' )] = dict(zip(ACTV['%s-%s' % (dbse, '11')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '12' )] = ['%s-%s-reagent' % (dbse, 'ch3oh'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2oh'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '12' )] = dict(zip(ACTV['%s-%s' % (dbse, '12')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '13' )] = ['%s-%s-reagent' % (dbse, 'ch3ph3'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2ph3'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '13' )] = dict(zip(ACTV['%s-%s' % (dbse, '13')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '14' )] = ['%s-%s-reagent' % (dbse, 'ch3sh2'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2sh2'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '14' )] = dict(zip(ACTV['%s-%s' % (dbse, '14')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '15' )] = ['%s-%s-reagent' % (dbse, 'ch3sh'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2sh'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '15' )] = dict(zip(ACTV['%s-%s' % (dbse, '15')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '16' )] = ['%s-%s-reagent' % (dbse, 'ch3cch'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2cch'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '16' )] = dict(zip(ACTV['%s-%s' % (dbse, '16')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '17' )] = ['%s-%s-reagent' % (dbse, 'ch3ch3'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2ch3'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '17' )] = dict(zip(ACTV['%s-%s' % (dbse, '17')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '18' )] = ['%s-%s-reagent' % (dbse, 'ch3cl'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2cl'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '18' )] = dict(zip(ACTV['%s-%s' % (dbse, '18')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '19' )] = ['%s-%s-reagent' % (dbse, 'ch3bh2'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2bh2'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '19' )] = dict(zip(ACTV['%s-%s' % (dbse, '19')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '20' )] = ['%s-%s-reagent' % (dbse, 'ch2o'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'cho'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '20' )] = dict(zip(ACTV['%s-%s' % (dbse, '20')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '21' )] = ['%s-%s-reagent' % (dbse, 'ch3ph2'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2ph2'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '21' )] = dict(zip(ACTV['%s-%s' % (dbse, '21')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '22' )] = ['%s-%s-reagent' % (dbse, 'ch2clf'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'chclf'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '22' )] = dict(zip(ACTV['%s-%s' % (dbse, '22')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '23' )] = ['%s-%s-reagent' % (dbse, 'ch2fch3'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'chfch3'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '23' )] = dict(zip(ACTV['%s-%s' % (dbse, '23')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '24' )] = ['%s-%s-reagent' % (dbse, 'ch2ohoh'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'chohoh'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '24' )] = dict(zip(ACTV['%s-%s' % (dbse, '24')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '25' )] = ['%s-%s-reagent' % (dbse, 'ch2cl2'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'chcl2'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '25' )] = dict(zip(ACTV['%s-%s' % (dbse, '25')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '26' )] = ['%s-%s-reagent' % (dbse, 'ch2f2'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'chf2'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '26' )] = dict(zip(ACTV['%s-%s' % (dbse, '26')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '27' )] = ['%s-%s-reagent' % (dbse, 'ch2chcn'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2ccn'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '27' )] = dict(zip(ACTV['%s-%s' % (dbse, '27')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '28' )] = ['%s-%s-reagent' % (dbse, 'c2h2'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'hcc'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '28' )] = dict(zip(ACTV['%s-%s' % (dbse, '28')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '29' )] = ['%s-%s-reagent' % (dbse, 'c2h4'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'c2h3'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '29' )] = dict(zip(ACTV['%s-%s' % (dbse, '29')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '30' )] = ['%s-%s-reagent' % (dbse, 'ch3chch2'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2chch2'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '30' )] = dict(zip(ACTV['%s-%s' % (dbse, '30')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '31' )] = ['%s-%s-reagent' % (dbse, 'cyclopropane'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'cyclopropyl'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '31' )] = dict(zip(ACTV['%s-%s' % (dbse, '31')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '32' )] = ['%s-%s-reagent' % (dbse, 'ch3ch2cl'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2ch2cl'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '32' )] = dict(zip(ACTV['%s-%s' % (dbse, '32')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '33' )] = ['%s-%s-reagent' % (dbse, 'ch3ch2oh'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2ch2oh'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '33' )] = dict(zip(ACTV['%s-%s' % (dbse, '33')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '34' )] = ['%s-%s-reagent' % (dbse, 'methylcyclopropane'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'cyclopropylmethyl'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '34' )] = dict(zip(ACTV['%s-%s' % (dbse, '34')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '35' )] = ['%s-%s-reagent' % (dbse, 'ch3coch3'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2coch3'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '35' )] = dict(zip(ACTV['%s-%s' % (dbse, '35')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '36' )] = ['%s-%s-reagent' % (dbse, 'ch3conh2'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2conh2'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '36' )] = dict(zip(ACTV['%s-%s' % (dbse, '36')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '37' )] = ['%s-%s-reagent' % (dbse, 'ch3cooh'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2cooh'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '37' )] = dict(zip(ACTV['%s-%s' % (dbse, '37')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '38' )] = ['%s-%s-reagent' % (dbse, 'ch3nhch3'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2nhch3'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '38' )] = dict(zip(ACTV['%s-%s' % (dbse, '38')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '39' )] = ['%s-%s-reagent' % (dbse, 'ch3nhcho'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2nhcho'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '39' )] = dict(zip(ACTV['%s-%s' % (dbse, '39')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '40' )] = ['%s-%s-reagent' % (dbse, 'ch3och3'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch2och3'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '40' )] = dict(zip(ACTV['%s-%s' % (dbse, '40')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '41' )] = ['%s-%s-reagent' % (dbse, 'nh2ch2cn'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'nh2chcn'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '41' )] = dict(zip(ACTV['%s-%s' % (dbse, '41')], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, '42' )] = ['%s-%s-reagent' % (dbse, 'ch3chch2'),
'%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'ch3cch2'),
'%s-%s-reagent' % (dbse, 'ch4') ]
RXNM['%s-%s' % (dbse, '42' )] = dict(zip(ACTV['%s-%s' % (dbse, '42')], [-1,-1,1,1]))
# <<< Reference Values [kcal/mol] >>>
BIND = {}
BIND['%s-%s' % (dbse, '1' )] = -3.18
BIND['%s-%s' % (dbse, '2' )] = -4.74
BIND['%s-%s' % (dbse, '3' )] = -10.86
BIND['%s-%s' % (dbse, '4' )] = 6.72
BIND['%s-%s' % (dbse, '5' )] = -1.49
BIND['%s-%s' % (dbse, '6' )] = -9.65
BIND['%s-%s' % (dbse, '7' )] = -8.20
BIND['%s-%s' % (dbse, '8' )] = -4.17
BIND['%s-%s' % (dbse, '9' )] = -12.00
BIND['%s-%s' % (dbse, '10' )] = 4.59
BIND['%s-%s' % (dbse, '11' )] = -8.67
BIND['%s-%s' % (dbse, '12' )] = -9.21
BIND['%s-%s' % (dbse, '13' )] = 0.55
BIND['%s-%s' % (dbse, '14' )] = 2.35
BIND['%s-%s' % (dbse, '15' )] = -9.56
BIND['%s-%s' % (dbse, '16' )] = -12.66
BIND['%s-%s' % (dbse, '17' )] = -3.32
BIND['%s-%s' % (dbse, '18' )] = -5.58
BIND['%s-%s' % (dbse, '19' )] = -11.61
BIND['%s-%s' % (dbse, '20' )] = -17.41
BIND['%s-%s' % (dbse, '21' )] = -6.39
BIND['%s-%s' % (dbse, '22' )] = -6.48
BIND['%s-%s' % (dbse, '23' )] = -5.79
BIND['%s-%s' % (dbse, '24' )] = -6.59
BIND['%s-%s' % (dbse, '25' )] = -9.41
BIND['%s-%s' % (dbse, '26' )] = -3.97
BIND['%s-%s' % (dbse, '27' )] = 3.78
BIND['%s-%s' % (dbse, '28' )] = 27.08
BIND['%s-%s' % (dbse, '29' )] = 5.87
BIND['%s-%s' % (dbse, '30' )] = -17.05
BIND['%s-%s' % (dbse, '31' )] = 0.000
BIND['%s-%s' % (dbse, '32' )] = 0.000
BIND['%s-%s' % (dbse, '33' )] = 0.000
BIND['%s-%s' % (dbse, '34' )] = 0.000
BIND['%s-%s' % (dbse, '35' )] = 0.000
BIND['%s-%s' % (dbse, '36' )] = 0.000
BIND['%s-%s' % (dbse, '37' )] = 0.000
BIND['%s-%s' % (dbse, '38' )] = 0.000
BIND['%s-%s' % (dbse, '39' )] = 0.000
BIND['%s-%s' % (dbse, '40' )] = 0.000
BIND['%s-%s' % (dbse, '41' )] = 0.000
BIND['%s-%s' % (dbse, '42' )] = 0.000
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, '1' )] = """Reaction 1 """
TAGL['%s-%s' % (dbse, '2' )] = """Reaction 2 """
TAGL['%s-%s' % (dbse, '3' )] = """Reaction 3 """
TAGL['%s-%s' % (dbse, '4' )] = """Reaction 4 """
TAGL['%s-%s' % (dbse, '5' )] = """Reaction 5 """
TAGL['%s-%s' % (dbse, '6' )] = """Reaction 6 """
TAGL['%s-%s' % (dbse, '7' )] = """Reaction 7 """
TAGL['%s-%s' % (dbse, '8' )] = """Reaction 8 """
TAGL['%s-%s' % (dbse, '9' )] = """Reaction 9 """
TAGL['%s-%s' % (dbse, '10' )] = """Reaction 10 """
TAGL['%s-%s' % (dbse, '11' )] = """Reaction 11 """
TAGL['%s-%s' % (dbse, '12' )] = """Reaction 12 """
TAGL['%s-%s' % (dbse, '13' )] = """Reaction 13 """
TAGL['%s-%s' % (dbse, '14' )] = """Reaction 14 """
TAGL['%s-%s' % (dbse, '15' )] = """Reaction 15 """
TAGL['%s-%s' % (dbse, '16' )] = """Reaction 16 """
TAGL['%s-%s' % (dbse, '17' )] = """Reaction 17 """
TAGL['%s-%s' % (dbse, '18' )] = """Reaction 18 """
TAGL['%s-%s' % (dbse, '19' )] = """Reaction 19 """
TAGL['%s-%s' % (dbse, '20' )] = """Reaction 20 """
TAGL['%s-%s' % (dbse, '21' )] = """Reaction 21 """
TAGL['%s-%s' % (dbse, '22' )] = """Reaction 22 """
TAGL['%s-%s' % (dbse, '23' )] = """Reaction 23 """
TAGL['%s-%s' % (dbse, '24' )] = """Reaction 24 """
TAGL['%s-%s' % (dbse, '25' )] = """Reaction 25 """
TAGL['%s-%s' % (dbse, '26' )] = """Reaction 26 """
TAGL['%s-%s' % (dbse, '27' )] = """Reaction 27 """
TAGL['%s-%s' % (dbse, '28' )] = """Reaction 28 """
TAGL['%s-%s' % (dbse, '29' )] = """Reaction 29 """
TAGL['%s-%s' % (dbse, '30' )] = """Reaction 30 """
TAGL['%s-%s' % (dbse, '31' )] = """Reaction 31 """
TAGL['%s-%s' % (dbse, '32' )] = """Reaction 32 """
TAGL['%s-%s' % (dbse, '33' )] = """Reaction 33 """
TAGL['%s-%s' % (dbse, '34' )] = """Reaction 34 """
TAGL['%s-%s' % (dbse, '35' )] = """Reaction 35 """
TAGL['%s-%s' % (dbse, '36' )] = """Reaction 36 """
TAGL['%s-%s' % (dbse, '37' )] = """Reaction 37 """
TAGL['%s-%s' % (dbse, '38' )] = """Reaction 38 """
TAGL['%s-%s' % (dbse, '39' )] = """Reaction 39 """
TAGL['%s-%s' % (dbse, '40' )] = """Reaction 40 """
TAGL['%s-%s' % (dbse, '41' )] = """Reaction 41 """
TAGL['%s-%s' % (dbse, '42' )] = """Reaction 42 """
TAGL['%s-%s-reagent' % (dbse, 'ch2clf' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2fch3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2chch2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3f' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'nh2ch2cn' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2coch3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3cl' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2ch2cl' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3ch2cl' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'cyclopropylmethyl' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'chohoh' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3nhch3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3cch' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'cfch2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3bh2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2cl' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'cyclopropane' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3ocho' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2f' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2ccn' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2nhoh' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'methylcyclopropane' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3ch2f' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2chcn' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3ph2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'chfch3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'c2h2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3cho' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3cch2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2cho' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2nhch3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2bh2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3nh2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3cn' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3nhcho' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2ph2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2conh2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2ocho' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3conh2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'c2h3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2ch2oh' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3sh2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'hcc' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2ohoh' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2o' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2cl2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3sch3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2cooh' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2ch2f' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2f2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'chclf' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch4' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2nh2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'chf2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3nhoh' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3chch2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3coch3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'cfhch2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3oh' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2sh2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2ph3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3sh' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3och3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3ch2oh' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2cch' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'c2h4' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2sh' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'cho' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3cooh' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2cn' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2ch3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2nhcho' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2oh' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2nh3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3ph3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2och3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'nh2chcn' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2sch3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'chcl2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3nh3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3ch3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3no2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2no2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'cyclopropyl' )] = """ """
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-%s' % (dbse, 'ch2clf', 'reagent')] = qcdb.Molecule("""
0 1
C 0.58067800 0.57509900 0.00000000
H 0.68300400 1.16697100 0.90686900
H 0.68300300 1.16698200 -0.90686000
F 1.50669700 -0.42314600 0.00000000
CL -1.08296100 -0.11624900 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2fch3', 'reagent')] = qcdb.Molecule("""
0 1
C -0.10731600 0.55006900 -0.00001800
C 1.19344700 -0.22296700 -0.00006600
H -0.20208100 1.17829900 0.89066500
H -0.20250500 1.17945300 -0.88990400
H 1.26426900 -0.85757700 -0.88617900
H 2.04390700 0.46538200 0.00014000
H 1.26409900 -0.85777600 0.88602700
F -1.18716300 -0.34115500 -0.00002700
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2chch2', 'reagent')] = qcdb.Molecule("""
0 2
H 1.29620400 1.27814700 0.00031500
C 1.22747600 0.19569600 0.00007500
C 0.00002000 -0.44151000 -0.00011900
C -1.22747000 0.19572600 -0.00006200
H -2.15453000 -0.36306700 0.00092200
H 2.15459900 -0.36294700 -0.00001300
H -0.00012600 -1.52977700 -0.00012200
H -1.29630000 1.27817500 -0.00046200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3f', 'reagent')] = qcdb.Molecule("""
0 1
C -0.63474800 0.00000000 -0.00000500
H -0.99345300 0.93604800 -0.43577500
H -0.99347500 -0.09061900 1.02851700
H -0.99345500 -0.84542400 -0.59273200
F 0.75431900 0.00000000 0.00000200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'nh2ch2cn', 'reagent')] = qcdb.Molecule("""
0 1
H -2.42292000 -0.10756500 -0.17398600
N -1.48513000 -0.48905400 -0.11388600
C -0.52267600 0.60284100 0.04818300
C 0.85402700 0.09830600 -0.00779600
N 1.93691300 -0.29425300 -0.02269800
H -1.45838500 -1.10667700 0.69080300
H -0.64927500 1.30540400 -0.78074200
H -0.62000100 1.18510500 0.97768400
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2coch3', 'reagent')] = qcdb.Molecule("""
0 2
C -1.12950700 -0.87211300 -0.00005500
C -0.08591400 0.11935900 -0.00029700
C 1.35884600 -0.35579400 -0.00012600
H -2.15866300 -0.53573300 0.00114600
H -0.91846000 -1.93531800 -0.00052700
O -0.36674800 1.31947500 -0.00001600
H 2.02124300 0.50840600 -0.01441200
H 1.56766900 -0.95785600 0.88952900
H 1.56164700 -0.98401000 -0.87274100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3cl', 'reagent')] = qcdb.Molecule("""
0 1
C 1.14172600 0.00001000 -0.00001100
CL -0.66455600 -0.00000200 0.00000300
H 1.48242800 -0.15709100 1.02027600
H 1.48235400 -0.80512100 -0.64615700
H 1.48231600 0.96218500 -0.37410500
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2ch2cl', 'reagent')] = qcdb.Molecule("""
0 2
C 1.65215500 -0.36546200 -0.00002000
C 0.61456300 0.65491000 0.00017500
CL -1.10301600 -0.15556000 -0.00007700
H 1.97726100 -0.81625800 0.92878800
H 1.97884400 -0.81446300 -0.92913600
H 0.59733200 1.26901200 0.89634600
H 0.59752100 1.26954500 -0.89561800
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3ch2cl', 'reagent')] = qcdb.Molecule("""
0 1
C -1.61792700 -0.35828400 0.00000300
C -0.49398800 0.65857000 0.00000500
CL 1.14228200 -0.15032300 -0.00000200
H -2.57929200 0.16589400 -0.00142700
H -1.57402700 -0.99560000 -0.88502900
H -1.57568300 -0.99386300 0.88636700
H -0.50916200 1.28871900 -0.88740600
H -0.50913600 1.28862100 0.88748100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'cyclopropylmethyl', 'reagent')] = qcdb.Molecule("""
0 2
C 1.57690500 0.00025700 -0.16395700
C 0.27580500 -0.00015600 0.48454100
C -0.91190300 0.74706100 -0.13393400
C -0.91183900 -0.74701500 -0.13432800
H 1.65509200 0.00064000 -1.24484900
H 2.49026900 -0.00112700 0.41491300
H 0.29545600 -0.00050400 1.56912100
H -1.57797300 1.26783500 0.54415100
H -0.72934000 1.26136600 -1.07000800
H -1.57793900 -1.26822500 0.54341400
H -0.72937600 -1.26087000 -1.07067900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'chohoh', 'reagent')] = qcdb.Molecule("""
0 2
C 0.00688400 0.51186100 -0.14492200
O -1.14672000 -0.22110200 -0.07812600
O 1.17471800 -0.16554500 0.08128400
H 0.02457000 1.50148200 0.30721800
H 1.07671900 -1.04466600 -0.30624400
H -1.36657400 -0.43480400 0.84328900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3nhch3', 'reagent')] = qcdb.Molecule("""
0 1
H 2.08967400 0.42785800 -0.06224700
C 1.21538900 -0.22241300 0.02028200
N 0.00000700 0.56360400 -0.14844600
C -1.21542600 -0.22239500 0.02030000
H -2.08960200 0.42801100 -0.06218200
H 1.28226700 -0.96647500 -0.77983000
H 1.27608700 -0.76437700 0.98168500
H 0.00021800 1.32940400 0.51640300
H -1.28238800 -0.96637700 -0.77987600
H -1.27608400 -0.76442700 0.98167300
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3cch', 'reagent')] = qcdb.Molecule("""
0 1
H 1.62986900 -0.22418700 0.99608700
C 1.23811600 -0.00000200 -0.00001400
C -0.21921700 -0.00003100 0.00000300
C -1.42012300 0.00005600 0.00006700
H -2.48216700 -0.00018300 -0.00029300
H 1.62978000 0.97477500 -0.30390300
H 1.62985800 -0.75053900 -0.69223100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'cfch2', 'reagent')] = qcdb.Molecule("""
0 2
F 1.18236000 0.12888300 0.00008300
C -0.00743100 -0.42937300 -0.00017000
C -1.20008000 0.11937700 -0.00003700
H -2.08422800 -0.50156000 0.00065000
H -1.31193900 1.20158500 -0.00015200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3bh2', 'reagent')] = qcdb.Molecule("""
0 1
C -0.68214300 -0.00001300 -0.01636900
B 0.87205600 -0.00003500 -0.02090300
H -1.14585100 -0.89605500 -0.43613100
H -0.94870400 0.00006100 1.05694600
H -1.14545300 0.89608700 -0.43641500
H 1.48609900 1.02439000 0.00926200
H 1.48648600 -1.02423100 0.00906800
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2cl', 'reagent')] = qcdb.Molecule("""
0 2
C 1.12668100 0.00000000 -0.00045200
CL -0.58859200 0.00000000 0.00003600
H 1.62298700 -0.95630200 0.00105100
H 1.62298700 0.95630300 0.00105100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'cyclopropane', 'reagent')] = qcdb.Molecule("""
0 1
C 0.26222500 0.82955100 0.00000100
C 0.58730900 -0.64189800 0.00000100
C -0.84955700 -0.18773900 -0.00000300
H 0.43991900 1.39156800 -0.90914700
H 0.43990100 1.39158200 0.90914400
H -1.42517500 -0.31472400 -0.90913600
H -1.42515800 -0.31472000 0.90914200
H 0.98531900 -1.07660600 0.90916100
H 0.98533000 -1.07658600 -0.90916300
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3ocho', 'reagent')] = qcdb.Molecule("""
0 1
C 1.36739400 0.41209300 0.00000000
O 0.00000000 0.87018200 0.00000000
C -0.93310600 -0.09275600 0.00000000
H -1.92702500 0.37721300 0.00000000
H 1.97602500 1.31296200 0.00000000
H 1.56764700 -0.18831200 0.88859200
H 1.56764700 -0.18831200 -0.88859200
O -0.72375300 -1.27387900 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2f', 'reagent')] = qcdb.Molecule("""
0 2
C 0.65517400 0.00000000 -0.05820600
H 1.12122500 -0.95818100 0.12958600
H 1.12122500 0.95818100 0.12958600
F -0.68594400 0.00000000 0.01000700
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2ccn', 'reagent')] = qcdb.Molecule("""
0 2
C -1.80740300 0.05630000 0.00026900
C -0.52077400 -0.14829300 -0.00056500
C 0.80722000 -0.01016700 0.00027900
N 1.98157900 0.04542000 0.00000400
H -2.22720000 1.06313700 -0.00028800
H -2.51811100 -0.76812000 0.00035400
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2nhoh', 'reagent')] = qcdb.Molecule("""
0 2
H -2.10110400 0.31208400 -0.21944500
C -1.19615900 -0.26035100 -0.06713800
N -0.05044800 0.47182000 0.18732900
O 1.15271000 -0.24272900 -0.16818500
H 1.66921300 -0.19874400 0.64455800
H -1.23356900 -1.26401500 0.33245000
H -0.02612900 1.35187600 -0.32055700
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'methylcyclopropane', 'reagent')] = qcdb.Molecule("""
0 1
H 2.12855200 0.88463900 0.15688700
C 1.55441500 0.00000200 -0.13663300
C 0.18108800 -0.00019200 0.49815600
C -0.95723500 0.75588200 -0.13990700
C -0.95721600 -0.75573000 -0.14010200
H 1.48094300 0.00056200 -1.22847700
H 2.12860600 -0.88478000 0.15624200
H 0.18845400 -0.00024300 1.58445700
H -1.66438600 1.26919000 0.50089000
H -0.76177200 1.26046600 -1.07978800
H -1.66416000 -1.26924700 0.50085800
H -0.76255500 -1.26035500 -1.08014900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3ch2f', 'reagent')] = qcdb.Molecule("""
0 1
H 2.04393800 0.46531400 -0.00025500
C 1.19346600 -0.22298200 0.00000300
C -0.10736700 0.55014500 0.00003600
F -1.18710800 -0.34119200 -0.00002700
H 1.26381800 -0.85762100 -0.88615200
H 1.26425400 -0.85768800 0.88610200
H -0.20233100 1.17862500 0.89046700
H -0.20230000 1.17911900 -0.89014800
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2chcn', 'reagent')] = qcdb.Molecule("""
0 1
H -2.62698000 0.00343300 0.00127500
C -1.60537600 -0.35564600 0.00019100
C -0.58353400 0.50215200 -0.00011700
C 0.78295500 0.08952700 -0.00090900
N 1.89484100 -0.22375700 0.00060300
H -1.45041800 -1.42770800 -0.00098200
H -0.75075800 1.57437800 0.00049800
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3ph2', 'reagent')] = qcdb.Molecule("""
0 1
C -1.19705600 -0.00003100 0.02529600
P 0.66982000 0.00000200 -0.12443800
H -1.58957300 -0.88074500 -0.48670100
H -1.55478100 -0.00028500 1.05489100
H -1.58939300 0.88107700 -0.48619200
H 0.93418900 1.03642300 0.81636300
H 0.93459700 -1.03630900 0.81643200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'chfch3', 'reagent')] = qcdb.Molecule("""
0 2
C 0.11105500 0.51607700 -0.10093000
C -1.19373800 -0.17214000 0.01228500
H 0.29639400 1.52732100 0.24560500
H -1.35692900 -0.58834000 1.01911100
H -2.00676900 0.52649400 -0.19767600
H -1.25878800 -1.00450200 -0.69563600
F 1.20246600 -0.28051100 0.01783000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'c2h2', 'reagent')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 0.60499861
C 0.00000000 0.00000000 -0.60499861
H 0.00000000 -0.00000000 1.66488377
H 0.00000000 0.00000000 -1.66488377
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3cho', 'reagent')] = qcdb.Molecule("""
0 1
H 1.15548300 -1.23749400 -0.00007400
C 1.16884900 -0.14775500 -0.00000800
C -0.23563200 0.39720900 -0.00001200
H -0.30508600 1.50871600 0.00003100
H 1.70764100 0.22227400 0.87912400
H 1.70778300 0.22244500 -0.87896500
O -1.23314000 -0.27658300 0.00000100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3cch2', 'reagent')] = qcdb.Molecule("""
0 2
H -1.80737800 -0.28751200 -0.88203600
C -1.27092200 0.07589700 0.00003200
C 0.12553700 -0.37933600 -0.00002800
C 1.34313900 0.09759700 -0.00018800
H 2.22165600 -0.54200900 0.00065800
H -1.32823400 1.17576200 -0.00010500
H -1.80727200 -0.28728400 0.88226200
H 1.53470900 1.17609600 0.00032000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2cho', 'reagent')] = qcdb.Molecule("""
0 2
H 1.26423000 -1.25074500 -0.00022300
C 1.16786100 -0.17141500 -0.00003900
C -0.13387000 0.40646800 -0.00006100
H -0.18501200 1.51209700 -0.00017600
H 2.05914100 0.44514300 0.00050900
O -1.16778800 -0.26460200 0.00006200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2nhch3', 'reagent')] = qcdb.Molecule("""
0 2
C 1.25665700 -0.25296800 0.09101800
N 0.09359200 0.47138600 -0.13643100
C -1.19132800 -0.18644800 0.03339200
H -1.99251100 0.48192100 -0.28866900
H 1.25395100 -1.28237900 -0.24805200
H 2.18576000 0.30121100 0.04601900
H 0.11541700 1.41397000 0.22790100
H -1.22447800 -1.07807600 -0.59874400
H -1.38526200 -0.49985500 1.07010400
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2bh2', 'reagent')] = qcdb.Molecule("""
0 2
C 0.71330600 0.00000000 0.00000200
B -0.81296800 -0.00000200 -0.00002100
H 1.30746700 -0.90739700 -0.08361900
H 1.30746400 0.90739800 0.08363400
H -1.41496300 1.02725300 -0.07382900
H -1.41496300 -1.02724800 0.07390900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3nh2', 'reagent')] = qcdb.Molecule("""
0 1
H -1.11810600 -0.87790900 -0.48746900
C -0.70669300 -0.00000300 0.01773400
N 0.75219200 -0.00000100 -0.12374800
H -1.11797400 0.87906800 -0.48553000
H -1.08372700 -0.00111200 1.05295300
H 1.14730200 0.81201200 0.33996300
H 1.14731900 -0.81203100 0.33991600
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3cn', 'reagent')] = qcdb.Molecule("""
0 1
H 1.55303200 -0.19362100 1.00615000
C 1.17602200 0.00000000 -0.00000200
C -0.28095500 0.00000200 -0.00001400
N -1.43280200 -0.00000200 0.00000400
H 1.55307200 0.96815300 -0.33536300
H 1.55311000 -0.77452900 -0.67072400
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3nhcho', 'reagent')] = qcdb.Molecule("""
0 1
C 1.45060800 -0.43388200 0.00000600
N 0.47537300 0.64484300 -0.00000900
C -0.86810900 0.42639900 0.00000000
H 2.08490300 -0.39459000 -0.89019800
H 2.08477400 -0.39465900 0.89029400
H 0.89750900 -1.37158600 -0.00008900
H 0.79723800 1.59973400 0.00000200
H -1.43727300 1.37613100 0.00002100
O -1.40621900 -0.66050400 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2ph2', 'reagent')] = qcdb.Molecule("""
0 2
C 1.18935200 0.00005700 0.07043000
P -0.58298700 -0.00011500 -0.12323900
H 1.74314200 0.92389300 -0.04461000
H 1.74250200 -0.92404900 -0.04565700
H -0.93834000 -1.05920700 0.75926200
H -0.93860700 1.06075100 0.75701500
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2conh2', 'reagent')] = qcdb.Molecule("""
0 2
H 2.18625900 -0.02366000 0.02261200
C 1.27446100 -0.60287200 0.00168800
C 0.01415400 0.12760900 -0.00043300
N -1.13042200 -0.64022200 -0.03845600
H -2.00633300 -0.15662700 0.08092200
H 1.32929100 -1.68477600 -0.02718100
H -1.10934400 -1.62800200 0.15069600
O -0.02732600 1.35327500 0.00432700
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2ocho', 'reagent')] = qcdb.Molecule("""
0 2
C 1.39696200 -0.44693100 -0.00027000
O 0.57750500 0.64559700 0.00004800
C -0.77182900 0.44666800 0.00003400
H -1.26113100 1.42735100 -0.00011500
H 0.95655300 -1.43028600 0.00054600
H 2.44597000 -0.20760900 0.00061800
O -1.31402900 -0.61908100 -0.00000200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3', 'reagent')] = qcdb.Molecule("""
0 2
C 0.00000000 0.00000000 0.00021400
H 0.00000000 1.08040900 -0.00042800
H 0.93566200 -0.54020500 -0.00042800
H -0.93566200 -0.54020500 -0.00042800
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3conh2', 'reagent')] = qcdb.Molecule("""
0 1
H 1.90246300 0.16251900 -0.79962900
C 1.36161600 -0.34591400 -0.00025200
C -0.07735100 0.14833800 -0.00371100
N -1.03441600 -0.82787400 -0.00411700
H -2.00130700 -0.54487700 0.01827000
H 1.46157800 -1.42571100 -0.12782200
H 1.82569300 -0.05631300 0.94525000
H -0.81281100 -1.80771000 0.01025300
O -0.35503700 1.33158200 0.00078500
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'c2h3', 'reagent')] = qcdb.Molecule("""
0 2
C 0.04825900 -0.58537400 0.00000000
C 0.04825900 0.71905900 0.00000000
H -0.88001800 -1.16408100 0.00000000
H 0.96858300 -1.16591900 0.00000000
H -0.66767700 1.52789000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2ch2oh', 'reagent')] = qcdb.Molecule("""
0 2
H -2.18970300 0.23835000 -0.24005400
C -1.25191200 -0.25471300 -0.01994900
C 0.00851200 0.51937100 0.03731500
O 1.11275700 -0.37712300 -0.06380000
H 1.91487400 0.11923700 0.12180500
H -1.25306000 -1.28679500 0.30475400
H 0.02620700 1.27214500 -0.76868100
H 0.06002400 1.08609600 0.98837600
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3sh2', 'reagent')] = qcdb.Molecule("""
1 1
H -1.51535400 -0.00004800 1.07239600
C -1.21297000 -0.00000100 0.02856200
S 0.62777800 0.00000100 -0.11140700
H -1.55326900 -0.89268700 -0.49425800
H -1.55326500 0.89273500 -0.49417700
H 0.92762600 0.99336200 0.76359300
H 0.92763100 -0.99337500 0.76357900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'hcc', 'reagent')] = qcdb.Molecule("""
0 2
C 0.00000000 0.00000000 0.00000000
C 1.21283562 0.00000000 0.00000000
H -1.05818189 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2ohoh', 'reagent')] = qcdb.Molecule("""
0 1
C -0.00005100 0.53181800 0.00000000
O 1.16882200 -0.24677600 0.09147700
O -1.16881300 -0.24681500 -0.09148400
H 0.00848900 1.16260500 0.89514600
H -0.00857300 1.16266000 -0.89507300
H -1.21965900 -0.78378900 0.70749200
H 1.21998200 -0.78365400 -0.70751500
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2o', 'reagent')] = qcdb.Molecule("""
0 1
C 0.00000100 -0.52592000 0.00000000
O 0.00000100 0.67402700 0.00000000
H 0.93860800 -1.11835000 0.00000000
H -0.93861700 -1.11835000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2cl2', 'reagent')] = qcdb.Molecule("""
0 1
C 0.00000000 0.76838400 0.00000000
CL 1.49583700 -0.21639700 0.00000000
CL -1.49583600 -0.21641300 0.00000000
H -0.00000700 1.37372600 0.89921100
H -0.00000700 1.37372600 -0.89921100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3sch3', 'reagent')] = qcdb.Molecule("""
0 1
C 1.39450900 -0.51481700 0.00000000
S 0.00000000 0.66160700 0.00000000
C -1.39450800 -0.51481700 0.00000000
H 2.30921400 0.07884900 0.00000000
H 1.38175000 -1.14140200 0.89387100
H 1.38175000 -1.14140200 -0.89387100
H -2.30921400 0.07885100 0.00000000
H -1.38175200 -1.14140000 -0.89387100
H -1.38175200 -1.14140000 0.89387100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2cooh', 'reagent')] = qcdb.Molecule("""
0 2
C 1.37652700 -0.27885400 0.00007400
C -0.01480300 0.11096500 0.00004500
O -0.86030200 -0.95687700 -0.00004800
H -1.75682400 -0.59202100 0.00082400
H 1.66483000 -1.32063100 -0.00085900
H 2.12298100 0.50163400 0.00071600
O -0.41486400 1.25917100 -0.00012700
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2ch2f', 'reagent')] = qcdb.Molecule("""
0 2
C -1.22285000 -0.25059600 -0.02923000
C 0.03294900 0.52095200 0.06492100
F 1.14514700 -0.32233900 -0.06018000
H -1.26498700 -1.24902400 0.38614400
H -2.13334000 0.21667000 -0.38082800
H 0.09765700 1.28601300 -0.71691200
H 0.13375700 1.02525400 1.03907300
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2f2', 'reagent')] = qcdb.Molecule("""
0 1
C 0.00000400 0.50314900 0.00000000
F -1.10821200 -0.29035800 -0.00000100
H 0.00000100 1.10379200 -0.91279600
H 0.00000100 1.10377600 0.91280700
F 1.10820900 -0.29036000 -0.00000100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'chclf', 'reagent')] = qcdb.Molecule("""
0 2
C -0.55167800 0.55013700 -0.13353900
H -0.72586900 1.49900600 0.36337900
F -1.52740700 -0.34238000 0.02747200
CL 1.04603500 -0.10108300 0.01121200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch4', 'reagent')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 0.00000000
H 0.62958700 0.62958700 0.62958700
H -0.62958700 -0.62958700 0.62958700
H -0.62958700 0.62958700 -0.62958700
H 0.62958700 -0.62958700 -0.62958700
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2nh2', 'reagent')] = qcdb.Molecule("""
0 2
H 1.24137600 0.92884200 -0.13169100
C 0.72878100 -0.00007100 0.08564100
N -0.65633800 -0.00005500 -0.09925400
H 1.24163700 -0.92867500 -0.13216700
H -1.13115900 -0.83256900 0.22273500
H -1.13017600 0.83321100 0.22205700
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'chf2', 'reagent')] = qcdb.Molecule("""
0 2
C 0.00000000 0.48713600 -0.15110400
F -1.10027700 -0.24185600 0.02841400
H 0.00000000 1.43059400 0.39518300
F 1.10027700 -0.24185600 0.02841400
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3nhoh', 'reagent')] = qcdb.Molecule("""
0 1
H 2.02509400 0.44185800 -0.06388800
C 1.16605500 -0.23232900 0.00456500
N -0.04441600 0.57005800 -0.15285600
O -1.17243400 -0.29041100 0.12920900
H -1.68759600 -0.22935400 -0.68128200
H 1.22725100 -0.95421100 -0.81193400
H 1.20629000 -0.77610500 0.95778200
H -0.07697800 1.24466600 0.60825200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3chch2', 'reagent')] = qcdb.Molecule("""
0 1
H -1.80765000 -0.15368300 -0.87813100
C -1.23352400 0.16233900 0.00002500
C 0.13465000 -0.45361900 -0.00010100
C 1.28048100 0.22043300 -0.00006200
H 2.23887900 -0.28641300 0.00024900
H -1.18210100 1.25365400 -0.00052600
H -1.80710900 -0.15289100 0.87880500
H 0.16667600 -1.54200800 0.00022700
H 1.30166500 1.30642200 0.00020600
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3coch3', 'reagent')] = qcdb.Molecule("""
0 1
C -1.29144800 -0.61344900 0.00162800
C -0.00000100 0.18639500 -0.00001300
C 1.29144900 -0.61344500 -0.00163200
H -2.14125300 0.06176600 0.09074300
H -1.37741200 -1.18620100 -0.92773100
H -1.30085900 -1.33611700 0.82327900
O -0.00000300 1.39550900 0.00000200
H 2.14124800 0.06175100 -0.09095500
H 1.37750300 -1.18597500 0.92785600
H 1.30079800 -1.33630500 -0.82311200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'cfhch2', 'reagent')] = qcdb.Molecule("""
0 1
F 1.15775800 -0.22307000 0.00000600
C -0.02075700 0.43302300 -0.00002200
C -1.18005100 -0.19842800 0.00000300
H 0.11660400 1.50809800 0.00003400
H -2.09895700 0.37140500 0.00003100
H -1.23261900 -1.27943700 -0.00000900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3oh', 'reagent')] = qcdb.Molecule("""
0 1
H 1.08276700 0.98760000 -0.00028200
C 0.66362300 -0.01954600 0.00000100
O -0.75009300 0.12169100 0.00000000
H -1.13398500 -0.75944800 0.00001200
H 1.03509500 -0.54244500 -0.89192500
H 1.03513400 -0.54195800 0.89219400
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2sh2', 'reagent')] = qcdb.Molecule("""
1 2
C -1.20662600 0.02170400 0.02219700
H -1.68207400 0.97499900 0.20542400
H -1.73384700 -0.90995700 -0.12920000
S 0.55425100 -0.07913500 -0.07976000
H 0.86827600 1.23974300 -0.17152600
H 0.91939200 -0.16885000 1.23828900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2ph3', 'reagent')] = qcdb.Molecule("""
1 2
C -1.24118800 -0.00000500 -0.00420900
P 0.52397100 0.00000300 -0.00112200
H -1.77980500 -0.93960200 0.02212500
H -1.77982900 0.93957700 0.02212200
H 1.03398600 1.13558400 -0.64768800
H 1.07918000 -0.00072100 1.29444700
H 1.03403900 -1.13485700 -0.64892800
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3sh', 'reagent')] = qcdb.Molecule("""
0 1
H -1.52774200 -1.00777000 -0.00169900
C -1.16516100 0.01984900 0.00004300
S 0.66721700 -0.08708800 0.00001100
H 0.90833000 1.23982400 -0.00009200
H -1.53240700 0.52241100 -0.89383000
H -1.53269700 0.51984700 0.89518300
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3och3', 'reagent')] = qcdb.Molecule("""
0 1
H 2.02193500 -0.48923200 -0.00071000
C 1.17294200 0.19529900 0.00000000
O 0.00000100 -0.58914400 0.00001900
C -1.17294400 0.19529800 0.00000000
H -2.02193900 -0.48923000 -0.00106500
H 1.23374500 0.83653400 0.89241600
H 1.23300600 0.83748100 -0.89177200
H -1.23391300 0.83628100 0.89258300
H -1.23283200 0.83773500 -0.89160200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3ch2oh', 'reagent')] = qcdb.Molecule("""
0 1
H 2.07592400 0.45870400 -0.00139100
C 1.22080500 -0.22279500 0.00033800
C -0.08478500 0.55050000 -0.00015000
O -1.15070400 -0.39819000 -0.00039200
H -1.98159000 0.08549700 0.00300000
H 1.28351300 -0.86183700 -0.88367400
H 1.28455500 -0.85892900 0.88610000
H -0.13737200 1.19906600 0.88662100
H -0.13551500 1.19679300 -0.88864500
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2cch', 'reagent')] = qcdb.Molecule("""
0 2
C 0.11561300 -0.00003000 -0.00001300
C 1.33791000 0.00002600 -0.00000900
H 2.40011400 -0.00004900 0.00007800
C -1.25132200 0.00000500 -0.00000500
H -1.80663200 0.93004300 0.00004100
H -1.80668700 -0.92999900 0.00004100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'c2h4', 'reagent')] = qcdb.Molecule("""
0 1
C 0.00000000 0.66349000 0.00000000
C 0.00000000 -0.66349000 0.00000000
H 0.00000000 1.23461800 0.92256200
H 0.00000000 1.23461800 -0.92256200
H 0.00000000 -1.23461800 -0.92256200
H 0.00000000 -1.23461800 0.92256200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2sh', 'reagent')] = qcdb.Molecule("""
0 2
H 1.68655500 -0.90997600 0.00055800
C 1.14729300 0.02417600 -0.03176100
S -0.58501000 -0.08911600 0.00912900
H -0.86056300 1.22722500 -0.09184000
H 1.65041300 0.96355500 0.13578900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'cho', 'reagent')] = qcdb.Molecule("""
0 2
C 0.06228700 0.58421000 0.00000000
O 0.06228700 -0.59005800 0.00000000
H -0.87201400 1.21520500 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3cooh', 'reagent')] = qcdb.Molecule("""
0 1
H -1.91951200 0.83381100 -0.00001900
C -1.39368200 -0.11802900 -0.00000300
C 0.09176300 0.12824400 -0.00001100
O 0.78506300 -1.03886100 0.00000700
H 1.72359800 -0.79889600 0.00001600
H -1.67259500 -0.70149700 0.88023200
H -1.67259900 -0.70153300 -0.88021200
O 0.63401500 1.20221400 0.00000100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2cn', 'reagent')] = qcdb.Molecule("""
0 2
C 0.18727900 -0.00000100 -0.00000500
N 1.35587400 0.00000100 0.00000300
C -1.19102700 0.00000000 0.00000000
H -1.73431100 0.93521500 0.00000700
H -1.73431500 -0.93521200 0.00000700
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2ch3', 'reagent')] = qcdb.Molecule("""
0 2
C 0.79404000 -0.00001000 -0.01898900
C -0.69354400 0.00011200 -0.00171900
H 1.35165100 -0.92648600 0.04178700
H 1.35211600 0.92615600 0.04184900
H -1.10700000 0.89016100 -0.48573900
H -1.10738100 -0.88205600 -0.49991300
H -1.09236600 -0.00838900 1.02626600
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2nhcho', 'reagent')] = qcdb.Molecule("""
0 2
C -1.44716800 -0.49182200 -0.00025100
N -0.57509300 0.57056600 -0.00005000
C 0.79897400 0.44788700 0.00008900
H -2.50479900 -0.29017100 0.00146300
H -1.02384400 -1.48129200 0.00008900
H -0.95098500 1.50778300 -0.00055400
H 1.30076800 1.43105100 0.00039600
O 1.38670900 -0.61221600 -0.00000900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2oh', 'reagent')] = qcdb.Molecule("""
0 2
H -1.23081700 -0.88927600 0.10207100
C -0.68449600 0.02764800 -0.06653200
O 0.67116100 -0.12516300 0.02350300
H 1.09168100 0.73511600 -0.06829800
H -1.12317500 0.98957200 0.17739600
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2nh3', 'reagent')] = qcdb.Molecule("""
1 2
H 1.31094400 0.95958500 0.06254700
C 0.82661100 -0.00000600 -0.02780600
N -0.64330400 0.00002700 0.00195700
H 1.31073200 -0.95972200 0.06250000
H -1.01795500 -0.00400200 0.96403900
H -1.03003400 0.82965200 -0.46454100
H -1.03022800 -0.82566500 -0.47141100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3ph3', 'reagent')] = qcdb.Molecule("""
1 1
H 1.58306100 0.47144600 -0.91637400
C 1.22355800 -0.00000600 0.00000000
P -0.58438900 0.00000100 0.00000200
H 1.58314400 -1.02930900 0.04987600
H 1.58312400 0.55787600 0.86644700
H -1.10827300 -0.59602900 1.15572600
H -1.10823400 1.29890900 -0.06168600
H -1.10833400 -0.70287400 -1.09402100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2och3', 'reagent')] = qcdb.Molecule("""
0 2
C 1.20368100 0.22731000 0.07010300
O 0.09172500 -0.54123500 -0.03988900
C -1.13925700 0.16821300 0.01374700
H -1.93367000 -0.56449600 -0.12107400
H 2.12393300 -0.33163800 -0.02661000
H 1.14335000 1.27002200 -0.23072900
H -1.25993800 0.66692800 0.98160700
H -1.19401900 0.91592200 -0.78718500
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'nh2chcn', 'reagent')] = qcdb.Molecule("""
0 2
H -2.45610400 -0.01444300 0.23937200
N -1.55523600 -0.34528300 -0.07015500
C -0.49903400 0.53856300 0.01366400
C 0.81999200 0.11101400 0.00107900
N 1.93043500 -0.24950800 0.00385500
H -1.37038800 -1.31387700 0.14493400
H -0.72565300 1.59439500 -0.00866900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2sch3', 'reagent')] = qcdb.Molecule("""
0 2
C -1.38974300 0.43014600 0.02091500
S 0.11017200 -0.60914300 -0.01904600
C 1.35489400 0.58185000 0.04179000
H -2.24570300 -0.24284500 -0.02480000
H -1.42650300 1.00472000 0.94638700
H -1.41259000 1.09892200 -0.84044700
H 1.16313900 1.59665400 -0.27952900
H 2.36799900 0.21686200 0.12689900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'chcl2', 'reagent')] = qcdb.Molecule("""
0 2
C 0.00000000 0.68873000 -0.09737200
CL -1.48435900 -0.17167400 0.00930300
CL 1.48435900 -0.17167400 0.00930300
H 0.00000000 1.70452500 0.26792100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3nh3', 'reagent')] = qcdb.Molecule("""
1 1
H 1.14365300 -0.16818600 1.01921200
C 0.80331800 -0.00000100 -0.00000600
N -0.71232800 0.00000000 0.00000700
H 1.14367100 0.96675700 -0.36395500
H 1.14364200 -0.79859600 -0.65523500
H -1.08821900 -0.89243500 0.33598100
H -1.08817300 0.73722000 0.60485500
H -1.08818700 0.15524400 -0.94087200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3ch3', 'reagent')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.76522200 0.00000000
C 0.00000000 0.76522200 0.00000000
H 1.01839600 -1.16365400 0.00000000
H -0.50924700 -1.16358500 0.88196300
H -0.50924700 -1.16358500 -0.88196300
H -1.01839600 1.16365300 0.00000000
H 0.50924900 1.16358400 0.88196200
H 0.50924900 1.16358400 -0.88196200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3no2', 'reagent')] = qcdb.Molecule("""
0 1
C -0.03891200 -1.32592100 0.00000000
H 0.49240200 -1.65949700 0.88961400
H 0.49240200 -1.65949700 -0.88961400
H -1.07696300 -1.64238400 0.00000000
O -1.06579500 0.76938100 0.00000000
O 1.10649900 0.69073400 0.00000000
N 0.00000000 0.17657000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2no2', 'reagent')] = qcdb.Molecule("""
0 2
C -1.30974800 -0.00015200 -0.00012900
H -1.79634400 0.96237700 0.00044400
H -1.79649800 -0.96262600 0.00011800
O 0.66955200 -1.09726400 0.00001500
O 0.66919400 1.09753900 -0.00002000
N 0.10590800 -0.00014800 0.00003600
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'cyclopropyl', 'reagent')] = qcdb.Molecule("""
0 2
C 0.00000 0.87112 -0.16751
C 0.76811 -0.36353 0.0305
C -0.76811 -0.36353 0.0305
H 0.00000 1.81695 0.35474
H -1.25447 -0.82499 -0.82552
H -1.29862 -0.51568 0.9677
H 1.29862 -0.51568 0.9677
H 1.25447 -0.82499 -0.82552
units angstrom
""")
|
ashutoshvt/psi4
|
psi4/share/psi4/databases/RSE42.py
|
Python
|
lgpl-3.0
| 77,097
|
[
"Psi4"
] |
2b347ea756ece66b5421e650d56666925dace04e258a4c9e88baaefba7895373
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, Matthew Harrigan, Carlos Xavier Hernandez
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import pickle
import tempfile
import mdtraj as md
import numpy as np
import pytest
from mdtraj.testing import eq
try:
from simtk.openmm import app
import simtk.unit as u
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
needs_openmm = pytest.mark.skipif(not HAVE_OPENMM, reason='needs OpenMM')
@needs_openmm
def test_topology_openmm(get_fn):
topology = md.load(get_fn('1bpi.pdb')).topology
topology_with_bond_order = md.load(get_fn('imatinib.mol2')).topology
# the openmm trajectory doesn't have the distinction
# between resSeq and index, so if they're out of whack
# in the openmm version, that cant be preserved
for top in [topology, topology_with_bond_order]:
for residue in top.residues:
residue.resSeq = residue.index
mm = top.to_openmm()
assert isinstance(mm, app.Topology)
topology2 = md.Topology.from_openmm(mm)
eq(top, topology2)
@needs_openmm
def test_topology_openmm_boxes(get_fn):
traj = md.load(get_fn('1vii_sustiva_water.pdb'))
mmtop = traj.topology.to_openmm(traj=traj)
box = mmtop.getUnitCellDimensions() / u.nanometer
def test_topology_pandas(get_fn):
topology = md.load(get_fn('native.pdb')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
# Make sure default argument of None works, see issue #774
topology3 = md.Topology.from_dataframe(atoms)
def test_topology_pandas_TIP4PEW(get_fn):
topology = md.load(get_fn('GG-tip4pew.pdb')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
def test_topology_numbers(get_fn):
topology = md.load(get_fn('1bpi.pdb')).topology
assert len(list(topology.atoms)) == topology.n_atoms
assert len(list(topology.residues)) == topology.n_residues
assert all([topology.atom(i).index == i for i in range(topology.n_atoms)])
def test_topology_unique_elements_bpti(get_fn):
traj = md.load(get_fn('bpti.pdb'))
top, bonds = traj.top.to_dataframe()
atoms = np.unique(["C", "O", "N", "H", "S"])
eq(atoms, np.unique(top.element.values))
def test_chain(get_fn):
top = md.load(get_fn('bpti.pdb')).topology
chain = top.chain(0)
assert chain.n_residues == len(list(chain.residues))
atoms = list(chain.atoms)
assert chain.n_atoms == len(atoms)
for i in range(chain.n_atoms):
assert atoms[i] == chain.atom(i)
def test_residue(get_fn):
top = md.load(get_fn('bpti.pdb')).topology
residue = top.residue(0)
assert len(list(residue.atoms)) == residue.n_atoms
atoms = list(residue.atoms)
for i in range(residue.n_atoms):
assert residue.atom(i) == atoms[i]
def test_segment_id(get_fn):
top = md.load(get_fn('ala_ala_ala.pdb')).topology
assert next(top.residues).segment_id == "AAL", "Segment id is not being assigned correctly for ala_ala_ala.psf"
df = top.to_dataframe()[0]
assert len(df["segmentID"] == "AAL") == len(
df), "Segment id is not being assigned correctly to topology data frame ala_ala_ala.psf"
def test_nonconsective_resSeq(get_fn):
t = md.load(get_fn('nonconsecutive_resSeq.pdb'))
assert eq(np.array([r.resSeq for r in t.top.residues]), np.array([1, 3, 5]))
df1 = t.top.to_dataframe()
df2 = md.Topology.from_dataframe(*df1).to_dataframe()
assert eq(df1[0], df2[0])
# round-trip through a PDB load/save loop
fd, fname = tempfile.mkstemp(suffix='.pdb')
os.close(fd)
t.save(fname)
t2 = md.load(fname)
assert eq(df1[0], t2.top.to_dataframe()[0])
os.unlink(fname)
def test_pickle(get_fn):
# test pickling of topology (bug #391)
topology_without_bond_order = md.load(get_fn('bpti.pdb')).topology
topology_with_bond_order = md.load(get_fn('imatinib.mol2')).topology
for top in [topology_with_bond_order, topology_without_bond_order]:
loaded_top = pickle.loads(pickle.dumps(top))
assert loaded_top == top
def test_atoms_by_name(get_fn):
top = md.load(get_fn('bpti.pdb')).topology
atoms = list(top.atoms)
for atom1, atom2 in zip(top.atoms_by_name('CA'), top.chain(0).atoms_by_name('CA')):
assert atom1 == atom2
assert atom1 in atoms
assert atom1.name == 'CA'
assert len(list(top.atoms_by_name('CA'))) == sum(1 for _ in atoms if _.name == 'CA')
assert top.residue(15).atom('CA') == [a for a in top.residue(15).atoms if a.name == 'CA'][0]
with pytest.raises(KeyError):
top.residue(15).atom('sdfsdf')
def test_select_atom_indices(get_fn):
top = md.load(get_fn('native.pdb')).topology
assert eq(top.select_atom_indices('alpha'), np.array([8]))
assert eq(top.select_atom_indices('minimal'),
np.array([4, 5, 6, 8, 10, 14, 15, 16, 18]))
with pytest.raises(ValueError):
top.select_atom_indices('sdfsdf')
@needs_openmm
def test_top_dataframe_openmm_roundtrip(get_fn):
t = md.load(get_fn('2EQQ.pdb'))
top, bonds = t.top.to_dataframe()
t.topology = md.Topology.from_dataframe(top, bonds)
omm_top = t.top.to_openmm()
def test_n_bonds(get_fn):
t = md.load(get_fn('2EQQ.pdb'))
for atom in t.top.atoms:
if atom.element.symbol == 'H':
assert atom.n_bonds == 1
elif atom.element.symbol == 'C':
assert atom.n_bonds in [3, 4]
elif atom.element.symbol == 'O':
assert atom.n_bonds in [1, 2]
def test_load_unknown_topology(get_fn):
try:
md.load(get_fn('frame0.dcd'), top=get_fn('frame0.dcd'))
except IOError as e:
# we want to make sure there's a nice error message than includes
# a list of the supported topology formats.
assert all(s in str(e) for s in ('.pdb', '.psf', '.prmtop'))
else:
assert False # fail
def test_unique_pairs():
n = 10
a = np.arange(n)
b = np.arange(n, n + n)
eq(md.Topology._unique_pairs(a, a).sort(), md.Topology._unique_pairs_equal(a).sort())
eq(md.Topology._unique_pairs(a, b).sort(), md.Topology._unique_pairs_mutually_exclusive(a, b).sort())
def test_select_pairs(get_fn):
traj = md.load(get_fn('tip3p_300K_1ATM.pdb'))
select_pairs = traj.top.select_pairs
assert len(select_pairs(selection1='name O', selection2='name O')) == 258 * (258 - 1) // 2
assert len(select_pairs(selection1='name H1', selection2='name O')) == 258 * 258
selections = iter([
# Equal
("(name O) or (name =~ 'H.*')", "(name O) or (name =~ 'H.*')"),
('all', 'all'),
# Exclusive
('name O', 'name H1'),
('name H1', 'name O'),
# Overlap
(range(traj.n_atoms), 'name O'),
('all', 'name O')])
for select1, select2 in selections:
select3, select4 = next(selections)
assert eq(select_pairs(selection1=select1, selection2=select2).sort(),
select_pairs(selection1=select3, selection2=select4).sort())
def test_to_fasta(get_fn):
t = md.load(get_fn('2EQQ.pdb'))
assert t.topology.to_fasta(0) == "ENFSGGCVAGYMRTPDGRCKPTFYQLIT"
def test_subset(get_fn):
t1 = md.load(get_fn('2EQQ.pdb')).top
t2 = t1.subset([1, 2, 3])
assert t2.n_residues == 1
def test_molecules(get_fn):
top = md.load(get_fn('4OH9.pdb')).topology
molecules = top.find_molecules()
assert sum(len(mol) for mol in molecules) == top.n_atoms
assert sum(1 for mol in molecules if len(mol) > 1) == 2 # All but two molecules are water
def test_copy_and_hash(get_fn):
t = md.load(get_fn('traj.h5'))
t1 = t.topology
t2 = t.topology.copy()
assert t1 == t2
assert hash(tuple(t1._chains)) == hash(tuple(t2._chains))
assert hash(tuple(t1._atoms)) == hash(tuple(t2._atoms))
assert hash(tuple(t1._bonds)) == hash(tuple(t2._bonds))
assert hash(tuple(t1._residues)) == hash(tuple(t2._residues))
assert hash(t1) == hash(t2)
|
leeping/mdtraj
|
tests/test_topology.py
|
Python
|
lgpl-2.1
| 9,078
|
[
"MDTraj",
"OpenMM"
] |
622d95724aaff0489b1de32c256dc18ff379a6a2b120576a1e3cf9642b3f23cc
|
"""Django forms for hs_core module."""
import copy
from models import Party, Creator, Contributor, validate_user_url, Relation, Source, Identifier, \
FundingAgency, Description
from django.forms import ModelForm, BaseFormSet
from django.contrib.admin.widgets import forms
from django.utils.safestring import mark_safe
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, HTML
from crispy_forms.bootstrap import Field
from hydroshare import utils
class HorizontalRadioRenderer(forms.RadioSelect.renderer):
"""Return a horizontal list of radio buttons."""
def render(self):
"""Return a newline separated list of radio button elements."""
return mark_safe(u'\n'.join([u'%s\n' % w for w in self]))
class Helper(object):
"""Render resusable elements to use in Django forms."""
@classmethod
def get_element_add_modal_form(cls, element_name, modal_form_context_name):
"""Apply a modal UI element to a given form.
Used in netCDF and modflow_modelinstance apps
"""
modal_title = "Add %s" % element_name.title()
layout = Layout(
HTML('<div class="modal fade" id="add-element-dialog" tabindex="-1" '
'role="dialog" aria-labelledby="myModalLabel" aria-hidden="true">'
'<div class="modal-dialog">'
'<div class="modal-content">'),
HTML('<form action="{{ form.action }}" '
'method="POST" enctype="multipart/form-data"> '),
HTML('{% csrf_token %} '
'<input name="resource-mode" type="hidden" value="edit"/>'
'<div class="modal-header">'
'<button type="button" class="close" '
'data-dismiss="modal" aria-hidden="true">×'
'</button>'),
HTML('<h4 class="modal-title" id="myModalLabel"> Add Element </h4>'),
HTML('</div>'
'<div class="modal-body">'
'{% csrf_token %}'
'<div class="form-group">'),
HTML('{% load crispy_forms_tags %} {% crispy add_creator_modal_form %} '),
HTML('</div>'
'</div>'
'<div class="modal-footer">'
'<button type="button" class="btn btn-default" '
'data-dismiss="modal">Close</button>'
'<button type="submit" class="btn btn-primary">'
'Save changes</button>'
'</div>'
'</form>'
'</div>'
'</div>'
'</div>')
)
layout[0] = HTML('<div class="modal fade" id="add-%s-dialog" tabindex="-1" role="dialog" '
'aria-labelledby="myModalLabel" aria-hidden="true">'
'<div class="modal-dialog">'
'<div class="modal-content">' % element_name.lower())
layout[1] = HTML('<form action="{{ %s.action }}" method="POST" '
'enctype="multipart/form-data"> ' % modal_form_context_name)
layout[3] = HTML('<h4 class="modal-title" id="myModalLabel"> {title} '
'</h4>'.format(title=modal_title),)
html_str = '{% load crispy_forms_tags %} {% crispy' + ' add_{element}_modal_form'.format(
element=element_name.lower()) + ' %}'
layout[5] = HTML(html_str)
return layout
# the 1st and the 3rd HTML layout objects get replaced in MetaDataElementDeleteForm class
def _get_modal_confirm_delete_matadata_element():
layout = Layout(
HTML('<div class="modal fade" id="delete-metadata-element-dialog" '
'tabindex="-1" role="dialog" aria-labelledby="myModalLabel" '
'aria-hidden="true">'),
HTML('<div class="modal-dialog">'
'<div class="modal-content">'
'<div class="modal-header">'
'<button type="button" class="close" data-dismiss="modal" '
'aria-hidden="true">×</button>'
'<h4 class="modal-title" id="myModalLabel">'
'Delete metadata element</h4>'
'</div>'
'<div class="modal-body">'
'<strong>Are you sure you want to delete this metadata '
'element?</strong>'
'</div>'
'<div class="modal-footer">'
'<button type="button" class="btn btn-default" '
'data-dismiss="modal">Cancel</button>'),
HTML('<a type="button" class="btn btn-danger" href="">Delete</a>'),
HTML('</div>'
'</div>'
'</div>'
'</div>'),
)
return layout
class MetaDataElementDeleteForm(forms.Form):
"""Render a modal that confirms element deletion."""
def __init__(self, res_short_id, element_name, element_id, *args, **kwargs):
"""Render a modal that confirms element deletion.
uses _get_modal_confirm_delete_matadata_element
"""
super(MetaDataElementDeleteForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.delete_element_action = '"/hydroshare/hsapi/_internal/%s/%s/%s/delete-metadata/"' % \
(res_short_id, element_name, element_id)
self.helper.layout = _get_modal_confirm_delete_matadata_element()
self.helper.layout[0] = HTML('<div class="modal fade" id="delete-%s-element-dialog_%s" '
'tabindex="-1" role="dialog" aria-labelledby="myModalLabel" '
'aria-hidden="true">' % (element_name, element_id))
self.helper.layout[2] = HTML('<a type="button" class="btn btn-danger" '
'href=%s>Delete</a>' % self.delete_element_action)
self.helper.form_tag = False
class ExtendedMetadataForm(forms.Form):
"""Render an extensible metadata form via the extended_metadata_layout kwarg."""
def __init__(self, resource_mode='edit', extended_metadata_layout=None, *args, **kwargs):
"""Render an extensible metadata form via the extended_metadata_layout kwarg."""
super(ExtendedMetadataForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = extended_metadata_layout
class CreatorFormSetHelper(FormHelper):
"""Render a creator form with custom HTML5 validation and error display."""
def __init__(self, *args, **kwargs):
"""Render a creator form with custom HTML5 validation and error display."""
super(CreatorFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order
# these fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = True
self.layout = Layout(
Fieldset('Creator',
Field('name', css_class=field_width),
Field('description', css_class=field_width),
Field('organization', css_class=field_width),
Field('email', css_class=field_width),
Field('address', css_class=field_width),
Field('phone', css_class=field_width),
Field('homepage', css_class=field_width),
Field('order', css_class=field_width),
),
)
class PartyForm(ModelForm):
"""Render form for creating and editing Party models, aka people."""
def __init__(self, *args, **kwargs):
"""Render form for creating and editing Party models, aka people.
Removes profile link formset and renders proper description URL
"""
if 'initial' in kwargs:
if 'description' in kwargs['initial']:
if kwargs['initial']['description']:
kwargs['initial']['description'] = utils.current_site_url() + \
kwargs['initial']['description']
super(PartyForm, self).__init__(*args, **kwargs)
self.profile_link_formset = None
self.number = 0
class Meta:
"""Describe meta properties of PartyForm.
Fields that will be displayed are specified here - but not necessarily in the same order
"""
model = Party
fields = ['name', 'description', 'organization', 'email', 'address', 'phone', 'homepage']
# TODO: field labels and widgets types to be specified
labels = {'description': 'MyHPOM User Identifier (URL)'}
class CreatorForm(PartyForm):
"""Render form for creating and editing Creator models, as in creators of resources."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render form for creating and editing Creator models, as in creators of resources."""
super(CreatorForm, self).__init__(*args, **kwargs)
self.helper = CreatorFormSetHelper()
self.delete_modal_form = None
if res_short_id:
self.action = "/hydroshare/hsapi/_internal/%s/creator/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for fld_name in self.Meta.fields:
self.fields[fld_name].widget.attrs['readonly'] = True
self.fields[fld_name].widget.attrs['style'] = "background-color:white;"
self.fields['order'].widget.attrs['readonly'] = True
self.fields['order'].widget.attrs['style'] = "background-color:white;"
else:
if 'add-metadata' in self.action:
del self.fields['order']
@property
def form_id(self):
"""Render proper form id by prepending 'id_creator_'."""
form_id = 'id_creator_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render proper form id with quotes around it."""
form_id = 'id_creator_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Describe meta properties of PartyForm."""
model = Creator
fields = PartyForm.Meta.fields
fields.append("order")
labels = PartyForm.Meta.labels
class PartyValidationForm(forms.Form):
"""Validate form for Party models."""
description = forms.URLField(required=False, validators=[validate_user_url])
name = forms.CharField(required=False, max_length=100)
organization = forms.CharField(max_length=200, required=False)
email = forms.EmailField(required=False)
address = forms.CharField(max_length=250, required=False)
phone = forms.CharField(max_length=25, required=False)
homepage = forms.URLField(required=False)
def clean_description(self):
"""Create absolute URL for Party.description field."""
user_absolute_url = self.cleaned_data['description']
if user_absolute_url:
url_parts = user_absolute_url.split('/')
return '/hydroshare/user/{user_id}/'.format(user_id=url_parts[5])
return user_absolute_url
def clean(self):
"""Validate that name and/or organization are present in form data."""
cleaned_data = super(PartyValidationForm, self).clean()
name = cleaned_data.get('name', None)
org = cleaned_data.get('organization', None)
if not org:
if not name or len(name.strip()) == 0:
self._errors['name'] = ["A value for name or organization is required but both "
"are missing"]
return self.cleaned_data
class CreatorValidationForm(PartyValidationForm):
"""Validate form for Creator models. Extends PartyValidationForm."""
order = forms.IntegerField(required=False)
class ContributorValidationForm(PartyValidationForm):
"""Validate form for Contributor models. Extends PartyValidationForm."""
pass
class BaseCreatorFormSet(BaseFormSet):
"""Render BaseFormSet for working with Creator models."""
def add_fields(self, form, index):
"""Pass through add_fields function to super."""
super(BaseCreatorFormSet, self).add_fields(form, index)
def get_metadata(self):
"""Collect and append creator data to form fields."""
creators_data = []
for form in self.forms:
creator_data = {k: v for k, v in form.cleaned_data.iteritems()}
if creator_data:
creators_data.append({'creator': creator_data})
return creators_data
class ContributorFormSetHelper(FormHelper):
"""Render layout for Contributor model form and activate required fields."""
def __init__(self, *args, **kwargs):
"""Render layout for Contributor model form and activate required fields."""
super(ContributorFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order
# these fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.layout = Layout(
Fieldset('Contributor',
Field('name', css_class=field_width),
Field('description', css_class=field_width),
Field('organization', css_class=field_width),
Field('email', css_class=field_width),
Field('address', css_class=field_width),
Field('phone', css_class=field_width),
Field('homepage', css_class=field_width),
),
)
self.render_required_fields = True,
class ContributorForm(PartyForm):
"""Render Contributor model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render Contributor model form with appropriate attributes."""
super(ContributorForm, self).__init__(*args, **kwargs)
self.helper = ContributorFormSetHelper()
self.delete_modal_form = None
if res_short_id:
self.action = "/hydroshare/hsapi/_internal/%s/contributor/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for fld_name in self.Meta.fields:
self.fields[fld_name].widget.attrs['readonly'] = True
self.fields[fld_name].widget.attrs['style'] = "background-color:white;"
@property
def form_id(self):
"""Render proper form id by prepending 'id_contributor_'."""
form_id = 'id_contributor_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render proper form id with quotes around it."""
form_id = 'id_contributor_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Describe meta properties of ContributorForm, removing 'order' field."""
model = Contributor
fields = PartyForm.Meta.fields
labels = PartyForm.Meta.labels
if 'order' in fields:
fields.remove('order')
class BaseContributorFormSet(BaseFormSet):
"""Render BaseFormSet for working with Contributor models."""
def add_fields(self, form, index):
"""Pass through add_fields function to super."""
super(BaseContributorFormSet, self).add_fields(form, index)
def get_metadata(self):
"""Collect and append contributor data to form fields."""
contributors_data = []
for form in self.forms:
contributor_data = {k: v for k, v in form.cleaned_data.iteritems()}
if contributor_data:
contributors_data.append({'contributor': contributor_data})
return contributors_data
class RelationFormSetHelper(FormHelper):
"""Render layout for Relation form including HTML5 valdiation and errors."""
def __init__(self, *args, **kwargs):
"""Render layout for Relation form including HTML5 valdiation and errors."""
super(RelationFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order
# these fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = False
self.layout = Layout(
Fieldset('Relation',
Field('type', css_class=field_width),
Field('value', css_class=field_width),
),
)
class RelationForm(ModelForm):
"""Render Relation model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, *args, **kwargs):
"""Render Relation model form with appropriate attributes."""
super(RelationForm, self).__init__(*args, **kwargs)
self.helper = RelationFormSetHelper()
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hydroshare/hsapi/_internal/%s/relation/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for fld_name in self.Meta.fields:
self.fields[fld_name].widget.attrs['readonly'] = True
self.fields[fld_name].widget.attrs['style'] = "background-color:white;"
@property
def form_id(self):
"""Render proper form id by prepending 'id_relation_'."""
form_id = 'id_relation_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render form_id with quotes around it."""
form_id = 'id_relation_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Describe meta properties of RelationForm."""
model = Relation
# fields that will be displayed are specified here - but not necessarily in the same order
fields = ['type', 'value']
labels = {'type': 'Relation type', 'value': 'Related to'}
class RelationValidationForm(forms.Form):
"""Validate RelationForm 'type' and 'value' CharFields."""
type = forms.CharField(max_length=100)
value = forms.CharField(max_length=500)
class SourceFormSetHelper(FormHelper):
"""Render layout for Source form including HTML5 valdiation and errors."""
def __init__(self, *args, **kwargs):
"""Render layout for Source form including HTML5 valdiation and errors."""
super(SourceFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = False
self.layout = Layout(
Fieldset('Source',
Field('derived_from', css_class=field_width),
),
)
class SourceForm(ModelForm):
"""Render Source model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, *args, **kwargs):
"""Render Source model form with appropriate attributes."""
super(SourceForm, self).__init__(*args, **kwargs)
self.helper = SourceFormSetHelper()
self.number = 0
self.delete_modal_form = None
self.allow_edit = allow_edit
if res_short_id:
self.action = "/hydroshare/hsapi/_internal/%s/source/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
self.fields['derived_from'].widget.attrs['readonly'] = True
self.fields['derived_from'].widget.attrs['style'] = "background-color:white;"
@property
def form_id(self):
"""Render proper form id by prepending 'id_source_'."""
form_id = 'id_source_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render proper form id with quotes."""
form_id = 'id_source_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Define meta properties for SourceForm."""
model = Source
# fields that will be displayed are specified here - but not necessarily in the same order
fields = ['derived_from']
class SourceValidationForm(forms.Form):
"""Validate derived_from field from SourceForm."""
derived_from = forms.CharField(max_length=300)
class IdentifierFormSetHelper(FormHelper):
"""Render layout for Identifier form including HTML5 valdiation and errors."""
def __init__(self, *args, **kwargs):
"""Render layout for Identifier form including HTML5 valdiation and errors."""
super(IdentifierFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = True
self.layout = Layout(
Fieldset('Identifier',
Field('name', css_class=field_width),
Field('url', css_class=field_width),
),
)
class IdentifierForm(ModelForm):
"""Render Identifier model form with appropriate attributes."""
def __init__(self, res_short_id=None, *args, **kwargs):
"""Render Identifier model form with appropriate attributes."""
super(IdentifierForm, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs['readonly'] = True
self.fields['name'].widget.attrs['style'] = "background-color:white;"
self.fields['url'].widget.attrs['readonly'] = True
self.fields['url'].widget.attrs['style'] = "background-color:white;"
self.helper = IdentifierFormSetHelper()
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hydroshare/hsapi/_internal/%s/identifier/add-metadata/" % res_short_id
else:
self.action = ""
class Meta:
"""Define meta properties for IdentifierForm class."""
model = Identifier
# fields that will be displayed are specified here - but not necessarily in the same order
fields = ['name', 'url']
def clean(self):
"""Ensure that identifier name attribute is not blank."""
data = self.cleaned_data
if data['name'].lower() == 'hydroshareidentifier':
raise forms.ValidationError("Identifier name attribute can't have a value "
"of '{}'.".format(data['name']))
return data
class FundingAgencyFormSetHelper(FormHelper):
"""Render layout for FundingAgency form."""
def __init__(self, *args, **kwargs):
"""Render layout for FundingAgency form."""
super(FundingAgencyFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = False
self.layout = Layout(
Fieldset('Funding Agency',
Field('agency_name', css_class=field_width),
Field('award_title', css_class=field_width),
Field('award_number', css_class=field_width),
Field('agency_url', css_class=field_width),
),
)
class FundingAgencyForm(ModelForm):
"""Render FundingAgency model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, *args, **kwargs):
"""Render FundingAgency model form with appropriate attributes."""
super(FundingAgencyForm, self).__init__(*args, **kwargs)
self.helper = FundingAgencyFormSetHelper()
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hydroshare/hsapi/_internal/%s/fundingagency/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for fld_name in self.Meta.fields:
self.fields[fld_name].widget.attrs['readonly'] = True
self.fields[fld_name].widget.attrs['style'] = "background-color:white;"
@property
def form_id(self):
"""Render proper form id by prepending 'id_fundingagency_'."""
form_id = 'id_fundingagency_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render proper form id with quotes."""
form_id = 'id_fundingagency_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Define meta properties of FundingAgencyForm class."""
model = FundingAgency
# fields that will be displayed are specified here - but not necessarily in the same order
fields = ['agency_name', 'award_title', 'award_number', 'agency_url']
labels = {'agency_name': 'Funding agency name', 'award_title': 'Title of the award',
'award_number': 'Award number', 'agency_url': 'Agency website'}
class FundingAgencyValidationForm(forms.Form):
"""Validate FundingAgencyForm with agency_name, award_title, award_number and agency_url."""
agency_name = forms.CharField(required=True)
award_title = forms.CharField(required=False)
award_number = forms.CharField(required=False)
agency_url = forms.URLField(required=False)
class BaseFormHelper(FormHelper):
"""Render non-repeatable element related forms."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
element_layout=None, *args, **kwargs):
"""Render non-repeatable element related forms."""
coverage_type = kwargs.pop('coverage', None)
element_name_label = kwargs.pop('element_name_label', None)
super(BaseFormHelper, self).__init__(*args, **kwargs)
if res_short_id:
self.form_method = 'post'
self.form_tag = True
if element_name.lower() == 'coverage':
if coverage_type:
self.form_id = 'id-%s-%s' % (element_name.lower(), coverage_type)
else:
self.form_id = 'id-%s' % element_name.lower()
else:
self.form_id = 'id-%s' % element_name.lower()
if element_id:
self.form_action = "/hydroshare/hsapi/_internal/%s/%s/%s/update-metadata/" % \
(res_short_id, element_name.lower(), element_id)
else:
self.form_action = "/hydroshare/hsapi/_internal/%s/%s/add-metadata/" % (res_short_id,
element_name)
else:
self.form_tag = False
# change the first character to uppercase of the element name
element_name = element_name.title()
if element_name_label:
element_name = element_name_label
if element_name == "Subject":
element_name = "Keywords"
elif element_name == "Description":
element_name = "Abstract"
if res_short_id and allow_edit:
self.layout = Layout(
Fieldset(element_name,
element_layout,
HTML('<div style="margin-top:10px">'),
HTML('<button type="button" '
'class="btn btn-primary pull-right btn-form-submit" '
'return false;">Save changes</button>'),
HTML('</div>')
),
) # TODO: TESTING
else:
self.form_tag = False
self.layout = Layout(
Fieldset(element_name,
element_layout,
),
)
class TitleValidationForm(forms.Form):
"""Validate Title form with value."""
value = forms.CharField(max_length=300)
class SubjectsFormHelper(BaseFormHelper):
"""Render Subject form.
This form handles multiple subject elements - this was not implemented as formset
since we are providing one input field to enter multiple keywords (subjects) as comma
separated values
"""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
*args, **kwargs):
"""Render subject form.
The order in which the model fields are listed for the FieldSet is the order these
fields will be displayed
"""
field_width = 'form-control input-sm'
layout = Layout(
Field('value', css_class=field_width),
)
super(SubjectsFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class SubjectsForm(forms.Form):
"""Render Subjects model form with appropriate attributes."""
value = forms.CharField(max_length=500,
label='',
widget=forms.TextInput(attrs={'placeholder': 'Keywords'}),
help_text='Enter each keyword separated by a comma.')
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render Subjects model form with appropriate attributes."""
super(SubjectsForm, self).__init__(*args, **kwargs)
self.helper = SubjectsFormHelper(allow_edit, res_short_id, element_id,
element_name='subject')
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hydroshare/hsapi/_internal/%s/subject/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for field in self.fields.values():
field.widget.attrs['readonly'] = True
field.widget.attrs['style'] = "background-color:white;"
class AbstractFormHelper(BaseFormHelper):
"""Render Abstract form."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
*args, **kwargs):
"""Render Abstract form.
The order in which the model fields are listed for the FieldSet is the order these
fields will be displayed
"""
field_width = 'form-control input-sm'
layout = Layout(
Field('abstract', css_class=field_width),
)
super(AbstractFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class AbstractForm(ModelForm):
"""Render Abstract model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render Abstract model form with appropriate attributes."""
super(AbstractForm, self).__init__(*args, **kwargs)
self.helper = AbstractFormHelper(allow_edit, res_short_id, element_id,
element_name='description')
if not allow_edit:
self.fields['abstract'].widget.attrs['disabled'] = True
self.fields['abstract'].widget.attrs['style'] = "background-color:white;"
class Meta:
"""Describe meta properties of AbstractForm."""
model = Description
fields = ['abstract']
exclude = ['content_object']
labels = {'abstract': ''}
class AbstractValidationForm(forms.Form):
"""Validate Abstract form with abstract field."""
abstract = forms.CharField(max_length=5000)
class RightsValidationForm(forms.Form):
"""Validate Rights form with statement and URL field."""
statement = forms.CharField(required=False)
url = forms.URLField(required=False, max_length=500)
def clean(self):
"""Clean data and render proper error messages."""
cleaned_data = super(RightsValidationForm, self).clean()
statement = cleaned_data.get('statement', None)
url = cleaned_data.get('url', None)
if not statement and not url:
self._errors['statement'] = ["A value for statement is missing"]
self._errors['url'] = ["A value for Url is missing"]
return self.cleaned_data
class CoverageTemporalFormHelper(BaseFormHelper):
"""Render Temporal Coverage form."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
*args, **kwargs):
"""Render Temporal Coverage form.
The order in which the model fields are listed for the FieldSet is the order these
fields will be displayed
"""
file_type = kwargs.pop('file_type', False)
form_field_names = ['type', 'start', 'end']
crispy_form_fields = get_crispy_form_fields(form_field_names, file_type=file_type)
layout = Layout(*crispy_form_fields)
kwargs['coverage'] = 'temporal'
super(CoverageTemporalFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class CoverageTemporalForm(forms.Form):
"""Render Coverage Temporal Form."""
start = forms.DateField(label='Start Date')
end = forms.DateField(label='End Date')
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render Coverage Temporal Form."""
file_type = kwargs.pop('file_type', False)
super(CoverageTemporalForm, self).__init__(*args, **kwargs)
self.helper = CoverageTemporalFormHelper(allow_edit, res_short_id, element_id,
element_name='Temporal Coverage',
file_type=file_type)
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hydroshare/hsapi/_internal/%s/coverage/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for field in self.fields.values():
field.widget.attrs['readonly'] = True
def clean(self):
"""Modify the form's cleaned_data dictionary."""
is_form_errors = False
super(CoverageTemporalForm, self).clean()
start_date = self.cleaned_data.get('start', None)
end_date = self.cleaned_data.get('end', None)
if not start_date:
self._errors['start'] = ["Data for start date is missing"]
is_form_errors = True
if not end_date:
self._errors['end'] = ["Data for end date is missing"]
is_form_errors = True
if start_date > end_date:
self._errors['end'] = ["End date should be date after the start date"]
is_form_errors = True
if is_form_errors:
return self.cleaned_data
if 'name' in self.cleaned_data:
if len(self.cleaned_data['name']) == 0:
del self.cleaned_data['name']
self.cleaned_data['start'] = self.cleaned_data['start'].isoformat()
self.cleaned_data['end'] = self.cleaned_data['end'].isoformat()
self.cleaned_data['value'] = copy.deepcopy(self.cleaned_data)
self.cleaned_data['type'] = 'period'
if 'name' in self.cleaned_data:
del self.cleaned_data['name']
del self.cleaned_data['start']
del self.cleaned_data['end']
return self.cleaned_data
class CoverageSpatialFormHelper(BaseFormHelper):
"""Render layout for CoverageSpatial form."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
*args, **kwargs):
"""Render layout for CoverageSpatial form."""
file_type = kwargs.pop('file_type', False)
layout = Layout()
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
layout.append(Field('type', id="id_{}_filetype".format('type') if file_type else
"id_{}".format('type')))
form_field_names = ['name', 'projection', 'east', 'north', 'northlimit', 'eastlimit',
'southlimit', 'westlimit', 'units']
crispy_form_fields = get_crispy_form_fields(form_field_names, file_type=file_type)
for field in crispy_form_fields:
layout.append(field)
kwargs['coverage'] = 'spatial'
super(CoverageSpatialFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class CoverageSpatialForm(forms.Form):
"""Render CoverateSpatial form."""
TYPE_CHOICES = (
('box', 'Box'),
('point', 'Point')
)
type = forms.ChoiceField(choices=TYPE_CHOICES,
widget=forms.RadioSelect(renderer=HorizontalRadioRenderer), label='')
name = forms.CharField(max_length=200, required=False, label='Place/Area Name')
projection = forms.CharField(max_length=100, required=False,
label='Coordinate System/Geographic Projection')
east = forms.DecimalField(label='Longitude', widget=forms.TextInput())
north = forms.DecimalField(label='Latitude', widget=forms.TextInput())
units = forms.CharField(max_length=50, label='Coordinate Units')
northlimit = forms.DecimalField(label='North Latitude', widget=forms.TextInput())
eastlimit = forms.DecimalField(label='East Longitude', widget=forms.TextInput())
southlimit = forms.DecimalField(label='South Latitude', widget=forms.TextInput())
westlimit = forms.DecimalField(label='West Longitude', widget=forms.TextInput())
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render CoverateSpatial form."""
file_type = kwargs.pop('file_type', False)
super(CoverageSpatialForm, self).__init__(*args, **kwargs)
self.helper = CoverageSpatialFormHelper(allow_edit, res_short_id, element_id,
element_name='Spatial Coverage',
file_type=file_type)
self.number = 0
self.delete_modal_form = None
if self.errors:
self.errors.clear()
if res_short_id:
self.action = "/hydroshare/hsapi/_internal/%s/coverage/add-metadata/" % res_short_id
else:
self.action = ""
if len(self.initial) > 0:
self.initial['projection'] = 'WGS 84 EPSG:4326'
self.initial['units'] = 'Decimal degrees'
else:
self.fields['type'].widget.attrs['checked'] = 'checked'
self.fields['projection'].widget.attrs['value'] = 'WGS 84 EPSG:4326'
self.fields['units'].widget.attrs['value'] = 'Decimal degrees'
if not allow_edit:
for field in self.fields.values():
field.widget.attrs['readonly'] = True
else:
self.fields['projection'].widget.attrs['readonly'] = True
self.fields['units'].widget.attrs['readonly'] = True
def clean(self):
"""Modify the form's cleaned_data dictionary."""
super(CoverageSpatialForm, self).clean()
temp_cleaned_data = copy.deepcopy(self.cleaned_data)
spatial_coverage_type = temp_cleaned_data['type']
is_form_errors = False
if self.errors:
self.errors.clear()
if spatial_coverage_type == 'point':
north = temp_cleaned_data.get('north', None)
east = temp_cleaned_data.get('east', None)
if not north:
self._errors['north'] = ["Data for north is missing"]
is_form_errors = True
del self.cleaned_data['north']
if not east:
self._errors['east'] = ["Data for east is missing"]
is_form_errors = True
del self.cleaned_data['east']
if is_form_errors:
return self.cleaned_data
if 'northlimit' in temp_cleaned_data:
del temp_cleaned_data['northlimit']
if 'eastlimit' in self.cleaned_data:
del temp_cleaned_data['eastlimit']
if 'southlimit' in temp_cleaned_data:
del temp_cleaned_data['southlimit']
if 'westlimit' in temp_cleaned_data:
del temp_cleaned_data['westlimit']
if 'uplimit' in temp_cleaned_data:
del temp_cleaned_data['uplimit']
if 'downlimit' in temp_cleaned_data:
del temp_cleaned_data['downlimit']
temp_cleaned_data['north'] = str(temp_cleaned_data['north'])
temp_cleaned_data['east'] = str(temp_cleaned_data['east'])
else: # box type coverage
if 'north' in temp_cleaned_data:
del temp_cleaned_data['north']
if 'east' in temp_cleaned_data:
del temp_cleaned_data['east']
if 'elevation' in temp_cleaned_data:
del temp_cleaned_data['elevation']
for limit in ('northlimit', 'eastlimit', 'southlimit', 'westlimit'):
limit_data = temp_cleaned_data.get(limit, None)
if not limit_data:
self._errors[limit] = ["Data for %s is missing" % limit]
is_form_errors = True
del self.cleaned_data[limit]
if is_form_errors:
return self.cleaned_data
temp_cleaned_data['northlimit'] = str(temp_cleaned_data['northlimit'])
temp_cleaned_data['eastlimit'] = str(temp_cleaned_data['eastlimit'])
temp_cleaned_data['southlimit'] = str(temp_cleaned_data['southlimit'])
temp_cleaned_data['westlimit'] = str(temp_cleaned_data['westlimit'])
del temp_cleaned_data['type']
if 'projection' in temp_cleaned_data:
if len(temp_cleaned_data['projection']) == 0:
del temp_cleaned_data['projection']
if 'name' in temp_cleaned_data:
if len(temp_cleaned_data['name']) == 0:
del temp_cleaned_data['name']
self.cleaned_data['value'] = copy.deepcopy(temp_cleaned_data)
if 'northlimit' in self.cleaned_data:
del self.cleaned_data['northlimit']
if 'eastlimit' in self.cleaned_data:
del self.cleaned_data['eastlimit']
if 'southlimit' in self.cleaned_data:
del self.cleaned_data['southlimit']
if 'westlimit' in self.cleaned_data:
del self.cleaned_data['westlimit']
if 'uplimit' in self.cleaned_data:
del self.cleaned_data['uplimit']
if 'downlimit' in self.cleaned_data:
del self.cleaned_data['downlimit']
if 'north' in self.cleaned_data:
del self.cleaned_data['north']
if 'east' in self.cleaned_data:
del self.cleaned_data['east']
if 'elevation' in self.cleaned_data:
del self.cleaned_data['elevation']
if 'name' in self.cleaned_data:
del self.cleaned_data['name']
if 'units' in self.cleaned_data:
del self.cleaned_data['units']
if 'zunits' in self.cleaned_data:
del self.cleaned_data['zunits']
if 'projection' in self.cleaned_data:
del self.cleaned_data['projection']
return self.cleaned_data
class LanguageValidationForm(forms.Form):
"""Validate LanguageValidation form with code attribute."""
code = forms.CharField(max_length=3)
class ValidDateValidationForm(forms.Form):
"""Validate DateValidationForm with start_date and end_date attribute."""
start_date = forms.DateField()
end_date = forms.DateField()
def clean(self):
"""Modify the form's cleaned data dictionary."""
cleaned_data = super(ValidDateValidationForm, self).clean()
start_date = cleaned_data.get('start_date', None)
end_date = cleaned_data.get('end_date', None)
if start_date and not end_date:
self._errors['end_date'] = ["End date is missing"]
if end_date and not start_date:
self._errors['start_date'] = ["Start date is missing"]
if not start_date and not end_date:
del self._errors['start_date']
del self._errors['end_date']
if start_date and end_date:
self.cleaned_data['type'] = 'valid'
return self.cleaned_data
def get_crispy_form_fields(field_names, file_type=False):
"""Return a list of objects of type Field.
:param field_names: list of form field names
:param file_type: if true, then this is a metadata form for file type, otherwise, a form
for resource
:return: a list of Field objects
"""
crispy_fields = []
def get_field_id(field_name):
if file_type:
return "id_{}_filetype".format(field_name)
return "id_{}".format(field_name)
for field_name in field_names:
crispy_fields.append(Field(field_name, css_class='form-control input-sm',
id=get_field_id(field_name)))
return crispy_fields
|
ResearchSoftwareInstitute/MyHPOM
|
hs_core/forms.py
|
Python
|
bsd-3-clause
| 46,995
|
[
"NetCDF"
] |
cdb1dc114cbc76ee701ec923e5420e14949a768b7cf76894df6794d2e2b28a9d
|
#!/usr/bin/env python
'''
Phillip Kuznetsov
2017
'''
from __future__ import print_function
import os, sys
os.environ['GLOG_minloglevel'] = '2' # suprress Caffe verbose prints
import re
import settings
sys.path.insert(0, settings.caffe_root)
import caffe
import numpy as np
from numpy.linalg import norm
import scipy.misc, scipy.io
import argparse
import util
from sampler import Sampler
from masks import get_mask, combine_masks
if settings.gpu:
#caffe.set_device(1) # GPU ID
caffe.set_mode_gpu() # sampling on GPU (recommended for speed)
class ClassConditionalSampler(Sampler):
def __init__ (self):
# Load the list of class names
with open(settings.synset_file, 'r') as synset_file:
self.class_names = [ line.split(",")[0].split(" ", 1)[1].rstrip('\n') for line in synset_file.readlines()]
# Hard-coded list of layers that has been tested
self.fc_layers = ["fc6", "fc7", "fc8", "loss3/classifier", "fc1000", "prob"]
self.conv_layers = ["conv1", "conv2", "conv3", "conv4", "conv5"]
def forward_backward_from_x_to_condition(self, net, end, image, condition):
'''
Forward and backward passes through 'net', the condition model p(y|x), here an image classifier.
'''
unit = condition['unit']
xy = condition['xy']
dst = net.blobs[end]
acts = net.forward(data=image, end=end)
one_hot = np.zeros_like(dst.data)
# Get the activations
if end in self.fc_layers:
layer_acts = acts[end][0]
elif end in self.conv_layers:
layer_acts = acts[end][0, :, xy, xy]
best_unit = layer_acts.argmax() # highest probability unit
# Compute the softmax probs by hand because it's handy in case we want to condition on hidden units as well
exp_acts = np.exp(layer_acts - np.max(layer_acts))
probs = exp_acts / (1e-10 + np.sum(exp_acts, keepdims=True))
# The gradient of log of softmax, log(p(y|x)), reduces to:
softmax_grad = 1 - probs.copy()
obj_prob = probs.flat[unit]
# Assign the gradient
if end in self.fc_layers:
one_hot.flat[unit] = softmax_grad[unit]
elif end in self.conv_layers:
one_hot[:, unit, xy, xy] = softmax_grad[unit]
else:
raise Exception("Invalid layer type!")
dst.diff[:] = one_hot
# Backpropagate the gradient to the image layer
diffs = net.backward(start=end, diffs=['data'])
g = diffs['data'].copy()
dst.diff.fill(0.) # reset objective after each step
# Info to be printed out in the below 'print_progress' method
info = {
'best_unit': best_unit,
'best_unit_prob': probs.flat[best_unit]
}
return g, obj_prob, info
def get_label(self, condition):
unit = condition['unit']
return self.class_names[unit]
def print_progress(self, i, info, condition, prob, grad):
print("step: %04d\t max: %4s [%.2f]\t obj: %4s [%.2f]\t norm: [%.2f]" % ( i, info['best_unit'], info['best_unit_prob'], condition['unit'], prob, norm(grad) ))
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--units', metavar='units', type=str, help='an unit to visualize e.g. [0, 999]')
parser.add_argument('--n_iters', metavar='iter', type=int, default=10, help='Number of sampling steps per each unit')
parser.add_argument('--threshold', metavar='w', type=float, default=-1.0, nargs='?', help='The probability threshold to decide whether to keep an image')
parser.add_argument('--save_every', metavar='save_iter', type=int, default=1, help='Save a sample every N iterations. 0 to disable saving')
parser.add_argument('--reset_every', metavar='reset_iter', type=int, default=0, help='Reset the code every N iterations')
parser.add_argument('--lr', metavar='lr', type=float, default=2.0, nargs='?', help='Learning rate')
parser.add_argument('--lr_end', metavar='lr', type=float, default=-1.0, nargs='?', help='Ending Learning rate')
parser.add_argument('--epsilon2', metavar='eps', type=float, default=1.0, nargs='?', help='Scalar for condition ')
parser.add_argument('--epsilon1', metavar='eps', type=float, default=1.0, nargs='?', help='Scalar for prior')
parser.add_argument('--epsilon3', metavar='eps', type=float, default=1.0, nargs='?', help='Scalar for noise')
parser.add_argument('--mask_epsilon', metavar='eps', type=float, default=1e-6, nargs='?', help='Scalar for mask loss')
parser.add_argument('--edge_epsilon', metavar='eps', type=float, default=1.0, nargs='?', help='Scalar for edge loss')
parser.add_argument('--content_epsilon', metavar='eps', type=float, default=1.0, nargs='?', help='Scalar for content loss')
parser.add_argument('--style_epsilon', metavar='eps', type=float, default=1.0, nargs='?', help='Scalar for style loss')
parser.add_argument('--content_layer', metavar='layer', type=str, default='conv4', nargs='?', help='Layer to use for content loss')
parser.add_argument('--mask_type', metavar='mask', type=str, default='', nargs='?', help='Mask type. Only square and random available')
parser.add_argument('--ratio_sample', metavar='eps', type=float, default=1.0, nargs='?', help='Amount to sample for random mask')
parser.add_argument('--seed', metavar='n', type=int, default=0, nargs='?', help='Random seed')
parser.add_argument('--xy', metavar='n', type=int, default=0, nargs='?', help='Spatial position for conv units')
parser.add_argument('--opt_layer', metavar='s', type=str, help='Layer at which we optimize a code')
parser.add_argument('--act_layer', metavar='s', type=str, default="fc8", help='Layer at which we activate a neuron')
parser.add_argument('--init_dir', metavar='s', type=str, default="None", help='Init image')
parser.add_argument('--write_labels', action='store_true', default=False, help='Write class labels to images')
parser.add_argument('--use_square', action='store_true', default=False, help='Whether or not to use the square')
parser.add_argument('--output_dir', metavar='b', type=str, default=".", help='Output directory for saving results')
parser.add_argument('--net_weights', metavar='b', type=str, default=settings.encoder_weights, help='Weights of the net being visualized')
parser.add_argument('--net_definition', metavar='b', type=str, default=settings.encoder_definition, help='Definition of the net being visualized')
args = parser.parse_args()
# Default to constant learning rate
if args.lr_end < 0:
args.lr_end = args.lr
# summary
print("-------------")
print(" units: %s xy: %s" % (args.units, args.xy))
print(" n_iters: %s" % args.n_iters)
print(" reset_every: %s" % args.reset_every)
print(" save_every: %s" % args.save_every)
print(" threshold: %s" % args.threshold)
print(" epsilon1: %s" % args.epsilon1)
print(" epsilon2: %s" % args.epsilon2)
print(" epsilon3: %s" % args.epsilon3)
print(" mask_epsilon: %s" % args.mask_epsilon)
print(" edge_epsilon: %s" % args.edge_epsilon)
print(" content_epsilon: %s" % args.content_epsilon)
print(" style_epsilon: %s" % args.style_epsilon)
print(" mask_type: %s" % args.mask_type)
print(" content_layer: %s" % args.content_layer)
print(" start learning rate: %s" % args.lr)
print(" end learning rate: %s" % args.lr_end)
print(" seed: %s" % args.seed)
print(" opt_layer: %s" % args.opt_layer)
print(" act_layer: %s" % args.act_layer)
print(" init_file: %s" % args.init_dir)
print("-------------")
print(" output dir: %s" % args.output_dir)
print(" net weights: %s" % args.net_weights)
print(" net definition: %s" % args.net_definition)
print("-------------")
# encoder and generator for images
encoder = caffe.Net(settings.encoder_definition, settings.encoder_weights, caffe.TEST)
generator = caffe.Net(settings.generator_definition, settings.generator_weights, caffe.TEST)
# condition network, here an image classification net
net = caffe.Classifier(args.net_definition, args.net_weights,
mean = np.float32([104.0, 117.0, 123.0]), # ImageNet mean
channel_swap = (2,1,0)) # the reference model has channels in BGR order instead of RGB
edge_detector= caffe.Net(settings.edge_definition, caffe.TEST)
# make Sobel operator for edge detection
laplace = np.array((0, -1, 0, -1, 4, -1, 0, -1, 0), dtype=np.float32).reshape((3,3))
edge_detector.params['laplace'][0].data[0, 0, :, :] = laplace # horizontal
# Fix the seed
np.random.seed(args.seed)
args = util.AttributeDict(vars(args))
# Separate the dash-separated list of units into numbers
conditions = [ { "unit": int(u), "xy": args.xy } for u in args.units.split("_") ]
files_to_read = [os.path.join(args.init_dir, f) for f in os.listdir(args.init_dir) if 'lena' in f]
attributes = ['content_epsilon','style_epsilon', 'edge_epsilon']
# attributes = ['edge_epsilon']#,'style_epsilon', 'edge_epsilon']
masks = ['random', 'laplace', 'square_random', 'square_laplace']
eps = [(0,0,0),
(1e-4,0, 0),
(0,1e-6,0),
(0,0,1e-2),
(0,1e-6,1e-2)]
output_dir = args.output_dir
images_to_save = []
for image_file in files_to_read:
image_name = re.split('\.|/', image_file)[-2]
print('image_name',image_name)
image_path = os.path.join(args.output_dir, image_name)
if not os.path.exists(image_path):
os.makedirs(image_path)
images_col = None
for i, (edge,content,style) in enumerate(eps):
sampler = ClassConditionalSampler()
start_image = sampler.load_image(shape=encoder.blobs["data"].data.shape,path=image_file, output_dir=output_dir, save=False)
if images_col is None:
images_col = [start_image.copy()]
print('running', image_file,i)
mask = get_mask(start_image, args.mask_type, inverse=False)
start_code= sampler.get_code(encoder=encoder, data=start_image, layer=args.opt_layer, mask=mask)
output_image, list_samples = sampler.sampling( condition_net=net, image_encoder=encoder, image_net=net, image_generator=generator, edge_detector=edge_detector,
gen_in_layer=settings.generator_in_layer, gen_out_layer=settings.generator_out_layer, start_code=start_code,
n_iters=args.n_iters, lr=args.lr, lr_end=args.lr_end, threshold=args.threshold,
layer=args.act_layer, conditions=conditions,
epsilon1=args.epsilon1, epsilon2=args.epsilon2, epsilon3=args.epsilon3,
mask_epsilon=args.mask_epsilon, content_epsilon=content,
style_epsilon=style, edge_epsilon=edge,
content_layer=args.content_layer,
output_dir=output_dir, mask=mask, input_image=start_image,
reset_every=args.reset_every, save_every=args.save_every)
print('Saving {} for {}'.format(i,image_name))
images_col.append(output_image)
file_path = os.path.join(image_path, str(i) +'.jpg')
util.save_image(output_image, file_path)
images_to_save.append(images_col)
filename = "%s/%s_%04d_%04d_%s_h_%s_%s_%s__%s.jpg" % (
args.output_dir,
'loss_survey',
conditions[0]["unit"],
args.n_iters,
args.lr,
str(args.epsilon1),
str(args.epsilon2),
str(args.epsilon3),
args.seed
)
util.save_checkerboard(images_to_save, filename, labels=['ground truth',
'no loss',
'edge loss' ,
'content loss',
'style loss',
'content + style'] )
if __name__ == '__main__':
main()
|
philkuz/ppgn
|
sampling_all_losses.py
|
Python
|
mit
| 12,411
|
[
"NEURON"
] |
8ae6de728a039f075116768d15de908fcfeafc2706e545bf75478690d4c607c0
|
#!/usr/bin/env python
import vtk
import numpy as np
from vmtk import vmtkscripts
from vmtk import vtkvmtk
import argparse
import itertools
import os
import copy
# map the average crossectional area to the boundary reference info
def Execute(args):
print("evaluate centerlines")
reader_ctr = vmtkscripts.vmtkSurfaceReader()
reader_ctr.InputFileName = args.centerlines
reader_ctr.Execute()
centerlines = reader_ctr.Surface
reader_surf = vmtkscripts.vmtkSurfaceReader()
reader_surf.InputFileName = args.surface
reader_surf.Execute()
surface = reader_surf.Surface
locator_cell = vtk.vtkPointLocator()
locator_cell.SetDataSet(surface)
locator_cell.BuildLocator()
reader_br = vmtkscripts.vmtkSurfaceReader()
reader_br.InputFileName = args.boundary_reference
reader_br.Execute()
boundary_reference = reader_br.Surface
terminal_pts = boundary_reference.GetNumberOfPoints()
main_body_id = 695041
for i in range(terminal_pts):
pt = boundary_reference.GetPoint(i) #B
ctr_ptId = locator_cell.FindClosestPoint(pt)
vmtkscripts.vmtkSurfaceClipperCenterline2
boundary_reference.GetPointData().AddArray(avg_area)
writer = vmtkscripts.vmtkSurfaceWriter()
writer.OutputFileName = args.out_file
writer.Input = boundary_reference
writer.Execute()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='map outlet average crossectional area to boundary reference information')
parser.add_argument("-b", dest="boundary_reference", required=True, help="input bounadry reference ", metavar="FILE")
parser.add_argument("-c", dest="centerlines", required=True, help="input centerlines", metavar="FILE")
parser.add_argument("-s", dest="surface", required=True, help="surface to clip", metavar="FILE")
parser.add_argument("-o", dest="out_file", required=True, help="clipped surface reference information", metavar="FILE")
args = parser.parse_args()
#print(args)
Execute(args)
|
kayarre/Tools
|
vmtk/clip_outlets_subset.py
|
Python
|
bsd-2-clause
| 2,097
|
[
"VTK"
] |
2c0d18cd4a479c7077f28779bcc14b1d70926302c84afb9ad783680d2a57ca60
|
""" The SGE TimeLeft utility interrogates the SGE batch system for the
current CPU consumed, as well as its limit.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import re
import time
import socket
from DIRAC import S_OK, S_ERROR
from DIRAC.Resources.Computing.BatchSystems.TimeLeft.TimeLeft import runCommand
from DIRAC.Resources.Computing.BatchSystems.TimeLeft.ResourceUsage import ResourceUsage
class SGEResourceUsage(ResourceUsage):
"""
This is the SGE plugin of the TimeLeft Utility
"""
def __init__(self):
""" Standard constructor
"""
super(SGEResourceUsage, self).__init__('SGE', 'JOB_ID')
self.queue = os.environ.get('QUEUE')
sgePath = os.environ.get('SGE_BINARY_PATH')
if sgePath:
os.environ['PATH'] += ':' + sgePath
self.log.verbose('JOB_ID=%s, QUEUE=%s' % (self.jobID, self.queue))
self.startTime = time.time()
def getResourceUsage(self):
""" Returns S_OK with a dictionary containing the entries CPU, CPULimit,
WallClock, WallClockLimit, and Unit for current slot.
"""
cmd = 'qstat -f -j %s' % (self.jobID)
result = runCommand(cmd)
if not result['OK']:
return result
cpu = None
cpuLimit = None
wallClock = None
wallClockLimit = None
lines = str(result['Value']).split('\n')
for line in lines:
if re.search('usage.*cpu.*', line):
match = re.search(r'cpu=([\d,:]*),', line)
if match:
cpuList = match.groups()[0].split(':')
try:
newcpu = 0.
if len(cpuList) == 3:
newcpu = float(cpuList[0]) * 3600 + \
float(cpuList[1]) * 60 + \
float(cpuList[2])
elif len(cpuList) == 4:
newcpu = float(cpuList[0]) * 24 * 3600 + \
float(cpuList[1]) * 3600 + \
float(cpuList[2]) * 60 + \
float(cpuList[3])
if not cpu or newcpu > cpu:
cpu = newcpu
except ValueError:
self.log.warn('Problem parsing "%s" for CPU consumed' % line)
if re.search('hard resource_list.*cpu.*', line):
match = re.search(r'_cpu=(\d*)', line)
if match:
cpuLimit = float(match.groups()[0])
match = re.search(r'_rt=(\d*)', line)
if match:
wallClockLimit = float(match.groups()[0])
else:
self.log.warn("No hard limits found")
# Some SGE batch systems apply CPU scaling factor to the CPU consumption figures
if cpu:
factor = _getCPUScalingFactor()
if factor:
cpu = cpu / factor
consumed = {'CPU': cpu,
'CPULimit': cpuLimit,
'WallClock': wallClock,
'WallClockLimit': wallClockLimit}
if None in consumed.values():
missed = [key for key, val in consumed.items() if val is None]
msg = 'Could not determine parameter'
self.log.warn('Could not determine parameter', ','.join(missed))
self.log.debug('This is the stdout from the batch system call\n%s' % (result['Value']))
else:
self.log.debug("TimeLeft counters complete:", str(consumed))
if cpuLimit or wallClockLimit:
# We have got a partial result from SGE
if not cpuLimit:
# Take some margin
consumed['CPULimit'] = wallClockLimit * 0.8
if not wallClockLimit:
consumed['WallClockLimit'] = cpuLimit / 0.8
if not cpu:
consumed['CPU'] = time.time() - self.startTime
if not wallClock:
consumed['WallClock'] = time.time() - self.startTime
self.log.debug("TimeLeft counters restored:", str(consumed))
return S_OK(consumed)
else:
msg = 'Could not determine necessary parameters'
self.log.info(msg, ':\nThis is the stdout from the batch system call\n%s' % (result['Value']))
retVal = S_ERROR(msg)
retVal['Value'] = consumed
return retVal
def _getCPUScalingFactor():
host = socket.getfqdn()
cmd = 'qconf -se %s' % host
result = runCommand(cmd)
if not result['OK']:
return None
lines = str(result['Value']).split('\n')
for line in lines:
if re.search('usage_scaling', line):
match = re.search(r'cpu=([\d,\.]*),', line)
if match:
return float(match.groups()[0])
return None
|
yujikato/DIRAC
|
src/DIRAC/Resources/Computing/BatchSystems/TimeLeft/SGEResourceUsage.py
|
Python
|
gpl-3.0
| 4,360
|
[
"DIRAC"
] |
4401182489b1828c8f9fce639a7a9c5c27bea9f0b2109ec9dfbe6fd1ca8c8845
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from peacock.Input import OutputNames, InputTree, ExecutableInfo
from peacock.utils import Testing
import datetime
from PyQt5 import QtWidgets
class Tests(Testing.PeacockTester):
qapp = QtWidgets.QApplication([])
def create_tree(self, input_file):
app_info = ExecutableInfo.ExecutableInfo()
app_info.setPath(Testing.find_moose_test_exe())
self.assertTrue(app_info.valid())
input_tree = InputTree.InputTree(app_info)
input_tree.setInputFile(input_file)
return input_tree
def testOutputFiles(self):
input_file = "../../common/transient.i"
input_tree = self.create_tree(input_file)
output_names = OutputNames.getOutputFiles(input_tree, input_file)
self.assertEqual(output_names, ["out_transient.e"])
outputs = input_tree.getBlockInfo("/Outputs")
file_base = outputs.getParamInfo("file_base")
file_base.value = "new_file_base"
outputs.parameters_list.remove("file_base")
del outputs.parameters["file_base"]
output_names = OutputNames.getOutputFiles(input_tree, input_file)
self.assertEqual(output_names, ["transient_out.e"])
def testOversample(self):
input_file = "../../common/oversample.i"
input_tree = self.create_tree(input_file)
output_names = OutputNames.getOutputFiles(input_tree, input_file)
self.assertEqual(output_names, ["out_transient.e", "oversample_2.e"])
outputs = input_tree.getBlockInfo("/Outputs")
outputs.parameters_list.remove("file_base")
del outputs.parameters["file_base"]
output_names = OutputNames.getOutputFiles(input_tree, input_file)
self.assertEqual(output_names, ["oversample_out.e", "oversample_2.e"])
outputs = input_tree.getBlockInfo("/Outputs/refine_2")
t = outputs.getTypeBlock()
t.parameters_list.remove("file_base")
del t.parameters["file_base"]
output_names = OutputNames.getOutputFiles(input_tree, input_file)
self.assertEqual(output_names, ["oversample_out.e", "oversample_refine_2.e"])
def testDate(self):
input_file = "../../common/transient_with_date.i"
input_tree = self.create_tree(input_file)
output_names = OutputNames.getOutputFiles(input_tree, input_file)
utc = datetime.datetime.utcnow()
self.assertEqual(output_names, ["with_date.e", "with_date_%s.e" % utc.strftime("%Y-%m-%d")])
def testPostprocessor(self):
input_file = "../../common/transient.i"
input_tree = self.create_tree(input_file)
output_names = OutputNames.getPostprocessorFiles(input_tree, input_file)
self.assertEqual(output_names, ["out_transient.csv"])
outputs = input_tree.getBlockInfo("/Outputs")
outputs.parameters_list.remove("file_base")
del outputs.parameters["file_base"]
output_names = OutputNames.getPostprocessorFiles(input_tree, input_file)
self.assertEqual(output_names, ["transient_out.csv"])
def testVectorPostprocessor(self):
input_file = "../../common/time_data.i"
input_tree = self.create_tree(input_file)
output_names = OutputNames.getVectorPostprocessorFiles(input_tree, input_file)
self.assertEqual(output_names, ["time_data_line_sample_*.csv"])
outputs = input_tree.getBlockInfo("/Outputs")
p = outputs.getParamInfo("file_base")
p.value = "foo"
output_names = OutputNames.getVectorPostprocessorFiles(input_tree, input_file)
self.assertEqual(output_names, ["foo_line_sample_*.csv"])
outputs.parameters_list.remove("file_base")
del outputs.parameters["file_base"]
output_names = OutputNames.getVectorPostprocessorFiles(input_tree, input_file)
self.assertEqual(output_names, ["time_data_out_line_sample_*.csv"])
if __name__ == '__main__':
Testing.run_tests()
|
nuclear-wizard/moose
|
python/peacock/tests/input_tab/OutputNames/test_OutputNames.py
|
Python
|
lgpl-2.1
| 4,243
|
[
"MOOSE"
] |
1a536ecaa03693cebd8827b1adebac57cb46e839097dc02303801134d6d6f9a2
|
from rest_framework import serializers
from cocomapapp.models import Tag, Topic, Post, Relation, Vote, Visit
from django.contrib.auth.models import User
from rest_framework_bulk import (BulkListSerializer, BulkSerializerMixin)
class RelationSerializer(serializers.ModelSerializer):
class Meta:
model = Relation
fields = ('id', 'topic_from','topic_to','label', 'positive_reaction_count', 'negative_reaction_count')
class VisitSerializer(serializers.ModelSerializer):
#topic = TopicSerializer()
#user = UserSerializer()
class Meta:
model = Visit
fields = ('id', 'user', 'topic', 'visit_date')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'last_login', 'is_superuser', 'username', 'first_name', 'last_name', 'email', 'is_staff', 'votes', 'visits')
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('wikidataID', 'name', 'hidden_tags', 'topics', 'posts', 'created_at', 'updated_at')
class VoteSerializer(serializers.ModelSerializer):
#post = PostSerializer(read_only=True)
#user = UserSerializer(read_only=True)
class Meta:
model = Vote
fields = ('id', 'user', 'post', 'is_positive')
class PostSerializer(serializers.ModelSerializer):
votes = VoteSerializer(read_only=True)
class Meta:
model = Post
fields = ('id', 'content', 'user', 'tags', 'topic', 'positive_reaction_count', 'negative_reaction_count', 'accuracy', 'votes', 'created_at', 'updated_at')
class PostNestedSerializer(serializers.ModelSerializer):
tags = TagSerializer(many=True)
user = UserSerializer()
class Meta:
model = Post
fields = ('id', 'content', 'user', 'tags', 'topic', 'positive_reaction_count', 'negative_reaction_count', 'accuracy', 'votes', 'created_at', 'updated_at')
class TopicSerializer(serializers.ModelSerializer):
posts = PostSerializer(many=True, read_only=True)
#tags = TagSerializer(many=True)
#user = UserSerializer()
#relates_to = RelationSerializer(many=True)
visits = VisitSerializer(many=True, read_only=True)
class Meta:
model = Topic
fields = ('id', 'name', 'user', 'relates_to', 'tags', 'posts', 'hotness', 'visits', 'created_at', 'updated_at')
class TopicNestedSerializer(serializers.ModelSerializer):
posts = PostNestedSerializer(many=True, read_only=True)
tags = TagSerializer(many=True)
user = UserSerializer()
relates_to = RelationSerializer(many=True)
class Meta:
model = Topic
fields = ('id', 'name', 'user', 'relates_to', 'tags', 'posts', 'hotness', 'visits', 'created_at', 'updated_at')
class HotTopicsSerializer(serializers.ModelSerializer):
#tags = TagSerializer(many=True)
#user = UserSerializer()
class Meta:
model = Topic
fields = ('id', 'name', 'user', 'tags', 'created_at', 'updated_at')
class RelationBulkSerializer(BulkSerializerMixin, serializers.ModelSerializer):
class Meta(object):
model = Relation
# only necessary in DRF3
list_serializer_class = BulkListSerializer
fields = ('id', 'topic_from','topic_to','label')
|
bounswe/bounswe2016group11
|
cocomapapp/serializers.py
|
Python
|
apache-2.0
| 3,232
|
[
"VisIt"
] |
33b7c10b3f19ca710b0fd726101e327fbc9120be143a528697b284165400f685
|
# -*- coding: utf-8 -*-
"""Tests for the HTML summary assembler."""
import os
import tempfile
import unittest
from pybel.examples import sialic_acid_graph
try:
import bio2bel_hgnc
import bio2bel_entrez
except ImportError:
bio2bel_hgnc = None
bio2bel_entrez = None
class TestSummaryAssembler(unittest.TestCase):
"""Tests for the assemblers."""
def test_summary_to_html_path(self):
"""Test to_html_path."""
import pybel_tools.assembler.html
with tempfile.TemporaryDirectory() as tmpdirname:
path = os.path.join(tmpdirname, 'summary.html')
pybel_tools.assembler.html.to_html_path(graph=sialic_acid_graph, path=path)
self.assertTrue(os.path.exists(path))
with open(path) as file:
contents = file.read()
self.assertIn('<html', contents)
self.assertIn('PTPN6', contents)
@unittest.skip('Need to upgrade Bio2BEL to PyBEL 14')
def test_ideogram_to_html_path(self):
"""Test to_html_path."""
import pybel_tools.assembler.ideogram
with tempfile.TemporaryDirectory() as tmpdirname:
path = os.path.join(tmpdirname, 'ideogram.html')
pybel_tools.assembler.ideogram.to_html_path(graph=sialic_acid_graph, path=path)
self.assertTrue(os.path.exists(path))
with open(path) as file:
contents = file.read()
self.assertIn('<html', contents)
self.assertIn('PTPN6', contents)
|
pybel/pybel-tools
|
tests/test_assemblers/test_summary_html.py
|
Python
|
mit
| 1,535
|
[
"Pybel"
] |
5a74d6265717fda0d9f056734b51f2136f45784caafa7e274b3de28bcbc1a58b
|
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import sys
import os
import re
import csv
import copy
import warnings
from optparse import OptionParser
from gnuradio import filter
try:
import numpy as np
except ImportError:
raise SystemExit('Please install NumPy to run this script (https://www.np.org/)')
try:
import numpy.fft as fft_detail
except ImportError:
raise SystemExit('Could not import fft implementation of numpy')
try:
from numpy import poly1d
except ImportError:
raise SystemExit('Please install NumPy to run this script (https://www.np.org)')
try:
from scipy import signal
except ImportError:
raise SystemExit('Please install SciPy to run this script (https://www.scipy.org)')
try:
from PyQt5 import Qt, QtCore, QtWidgets
except ImportError:
raise SystemExit('Please install PyQt5 to run this script (https://www.riverbankcomputing.com/software/pyqt/download5)')
try:
import pyqtgraph as pg
except ImportError:
raise SystemExit('Please install pyqtgraph to run this script (http://www.pyqtgraph.org)')
try:
from gnuradio.filter.pyqt_filter_stacked import Ui_MainWindow
except ImportError:
raise SystemExit('Could not import from pyqt_filter_stacked. Please build with "pyuic5 pyqt_filter_stacked.ui -o pyqt_filter_stacked.py"')
try:
from gnuradio.filter.banditems import *
except ImportError:
raise SystemExit('Could not import from banditems. Please check whether banditems.py is in the library path')
try:
from gnuradio.filter.polezero_plot import *
except ImportError:
raise SystemExit('Could not import from polezero_plot. Please check whether polezero_plot.py is in the library path')
# Behavior is not quite working on 3.8 - TODO
# try:
# from gnuradio.filter.idealbanditems import *
# except ImportError:
# raise SystemExit('Could not import from idealbanditems. Please check whether idealbanditems.py is in the library path')
try:
from gnuradio.filter.api_object import *
except ImportError:
raise SystemExit('Could not import from api_object. Please check whether api_object.py is in the library path')
try:
from gnuradio.filter.fir_design import *
except ImportError:
raise SystemExit('Could not import from fir_design. Please check whether fir_design.py is in the library path')
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s): return s
# Gnuradio Filter design tool main window
class gr_plot_filter(QtGui.QMainWindow):
def __init__(self, options, callback=None, restype=""):
QtGui.QWidget.__init__(self, None)
self.gui = Ui_MainWindow()
self.callback = callback
# Set Global pyqtgraph options
pg.setConfigOption('foreground', 'k') # Default foreground color for text, lines, axes, etc.
pg.setConfigOption('background', None) # Default background for GraphicsView.
pg.setConfigOptions(antialias=True) # Draw lines with smooth edges at the cost of reduced performance.
self.gui.setupUi(self)
# Remove other filter combobox entry if some restriction is specified.
if restype == "iir":
ind = self.gui.fselectComboBox.findText("FIR")
if ind != -1:
self.gui.fselectComboBox.removeItem(ind)
elif restype == "fir":
ind = self.gui.fselectComboBox.findText("IIR(scipy)")
if ind != -1:
self.gui.fselectComboBox.removeItem(ind)
self.gui.action_save.triggered.connect(self.action_save_dialog)
self.gui.action_open.triggered.connect(self.action_open_dialog)
self.gui.filterTypeComboBox.currentIndexChanged['const QString&'].connect(self.changed_filter_type)
self.gui.iirfilterBandComboBox.currentIndexChanged['const QString&'].connect(self.changed_iirfilter_band)
self.gui.filterDesignTypeComboBox.currentIndexChanged['const QString&'].connect(self.changed_filter_design_type)
self.gui.fselectComboBox.currentIndexChanged['const QString&'].connect(self.changed_fselect)
self.gui.iirfilterTypeComboBox.currentIndexChanged['const QString&'].connect(self.set_order)
self.gui.designButton.released.connect(self.design)
# self.gui.tabGroup.currentChanged['int'].connect(self.tab_changed)
self.gui.nfftEdit.textEdited['QString'].connect(self.nfft_edit_changed)
self.gui.actionQuick_Access.triggered.connect(self.action_quick_access)
self.gui.actionSpec_Widget.triggered.connect(self.action_spec_widget)
self.gui.actionResponse_Widget.triggered.connect(self.action_response_widget)
self.gui.actionDesign_Widget.triggered.connect(self.action_design_widget)
self.gui.actionMagnitude_Response.triggered.connect(self.set_actmagresponse)
self.gui.actionGrid_2.triggered.connect(self.set_actgrid)
self.gui.actionPhase_Respone.triggered.connect(self.set_actphase)
self.gui.actionGroup_Delay.triggered.connect(self.set_actgdelay)
self.gui.actionFilter_Coefficients.triggered.connect(self.set_actfcoeff)
self.gui.actionBand_Diagram.triggered.connect(self.set_actband)
# self.gui.actionIdeal_Band.triggered.connect(self.set_drawideal)
self.gui.actionPole_Zero_Plot_2.triggered.connect(self.set_actpzplot)
self.gui.actionGridview.triggered.connect(self.set_switchview)
self.gui.actionPlot_select.triggered.connect(self.set_plotselect)
self.gui.actionPhase_Delay.triggered.connect(self.set_actpdelay)
self.gui.actionImpulse_Response.triggered.connect(self.set_actimpres)
self.gui.actionStep_Response.triggered.connect(self.set_actstepres)
self.gui.mfmagPush.released.connect(self.set_mfmagresponse)
self.gui.mfphasePush.released.connect(self.set_mfphaseresponse)
self.gui.mfgpdlyPush.released.connect(self.set_mfgroupdelay)
self.gui.mfphdlyPush.released.connect(self.set_mfphasedelay)
self.gui.mfoverlayPush.clicked.connect(self.set_mfoverlay)
self.gui.conjPush.clicked.connect(self.set_conj)
self.gui.mconjPush.clicked.connect(self.set_mconj)
self.gui.addzeroPush.clicked.connect(self.set_zeroadd)
self.gui.maddzeroPush.clicked.connect(self.set_mzeroadd)
self.gui.addpolePush.clicked.connect(self.set_poleadd)
self.gui.maddpolePush.clicked.connect(self.set_mpoleadd)
self.gui.delPush.clicked.connect(self.set_delpz)
self.gui.mdelPush.clicked.connect(self.set_mdelpz)
self.gui.mttapsPush.clicked.connect(self.set_mttaps)
self.gui.mtstepPush.clicked.connect(self.set_mtstep)
self.gui.mtimpPush.clicked.connect(self.set_mtimpulse)
self.gui.checkKeepcur.stateChanged['int'].connect(self.set_bufferplots)
self.gui.checkGrid.stateChanged['int'].connect(self.set_grid)
self.gui.checkMagres.stateChanged['int'].connect(self.set_magresponse)
self.gui.checkGdelay.stateChanged['int'].connect(self.set_gdelay)
self.gui.checkPhase.stateChanged['int'].connect(self.set_phase)
self.gui.checkFcoeff.stateChanged['int'].connect(self.set_fcoeff)
self.gui.checkBand.stateChanged['int'].connect(self.set_band)
self.gui.checkPzplot.stateChanged['int'].connect(self.set_pzplot)
self.gui.checkPdelay.stateChanged['int'].connect(self.set_pdelay)
self.gui.checkImpulse.stateChanged['int'].connect(self.set_impres)
self.gui.checkStep.stateChanged['int'].connect(self.set_stepres)
self.gridenable = False
self.mfoverlay = False
self.mtoverlay = False
self.iir = False
self.mfmagresponse = True
self.mfphaseresponse = False
self.mfgroupdelay = False
self.mfphasedelay = False
self.mttaps = True
self.mtstep = False
self.mtimpulse = False
self.gui.designButton.setShortcut(QtCore.Qt.Key_Return)
self.taps = []
self.a = []
self.b = []
self.fftdB = []
self.fftDeg = []
self.groupDelay = []
self.phaseDelay = []
self.gridview = 0
self.params = []
self.nfftpts = int(10000)
self.gui.nfftEdit.setText(str(self.nfftpts))
self.firFilters = ("Low Pass", "Band Pass", "Complex Band Pass", "Band Notch",
"High Pass", "Root Raised Cosine", "Gaussian", "Half Band")
self.optFilters = ("Low Pass", "Band Pass", "Complex Band Pass",
"Band Notch", "High Pass", "Half Band")
self.set_windowed()
# Initialize to LPF.
self.gui.filterTypeWidget.setCurrentWidget(self.gui.firlpfPage)
self.gui.iirfilterTypeComboBox.hide()
self.gui.iirfilterBandComboBox.hide()
self.gui.adComboBox.hide()
self.gui.addpolePush.setEnabled(False)
self.gui.maddpolePush.setEnabled(False)
# Create plots.
self.plots = {'FREQ': None, 'TIME': None, 'PHASE': None, 'GROUP': None,
'IMPRES': None, 'STEPRES': None, 'PDELAY': None}
self.mplots = {'mFREQ': None, 'mTIME': None}
self.plots['FREQ'] = self.gui.freqPlot
self.plots['TIME'] = self.gui.timePlot
self.plots['PHASE'] = self.gui.phasePlot
self.plots['GROUP'] = self.gui.groupPlot
self.plots['IMPRES'] = self.gui.impresPlot
self.plots['STEPRES'] = self.gui.stepresPlot
self.plots['PDELAY'] = self.gui.pdelayPlot
# for i in self.plots:
# self.plots[i] = pg.PlotWidget(enableMenu=False, viewBox=CustomViewBox())
# self.plots[i].plotItem.vb = CustomViewBox()
self.mplots['mFREQ'] = self.gui.mfreqPlot
self.mplots['mTIME'] = self.gui.mtimePlot
# for i in self.mplots:
# self.mplots[i] = pg.PlotWidget(enableMenu=False, viewBox=CustomViewBox())
# # Add plots to layouts.
# self.gui.freqTab.layout().addWidget(self.plots['FREQ'])
# self.gui.timeTab.layout().addWidget(self.plots['TIME'])
# self.gui.phaseTab.layout().addWidget(self.plots['PHASE'])
# self.gui.groupTab.layout().addWidget(self.plots['GROUP'])
# self.gui.impresTab.layout().addWidget(self.plots['IMPRES'])
# self.gui.stepresTab.layout().addWidget(self.plots['STEPRES'])
# self.gui.pdelayTab.layout().addWidget(self.plots['PDELAY'])
# self.gui.mfreqTab.layout().addWidget(self.mplots['mFREQ'])
# self.gui.mtimeTab.layout().addWidget(self.mplots['mTIME'])
# Set Axis Labels.
self.labelstyle11b = {'font-family': 'Helvetica', 'font-size': '11pt', 'font-weight': 'bold'}
self.plots['FREQ'].setLabel('bottom', 'Frequency', units='Hz', **self.labelstyle11b)
self.plots['FREQ'].setLabel('left', 'Magnitude', units='dB', **self.labelstyle11b)
self.plots['TIME'].setLabel('bottom', 'Tap number', **self.labelstyle11b)
self.plots['TIME'].setLabel('left', 'Amplitude', **self.labelstyle11b)
self.plots['PHASE'].setLabel('bottom', 'Frequency', units='Hz', **self.labelstyle11b)
self.plots['PHASE'].setLabel('left', 'Phase', units='Radians', **self.labelstyle11b)
self.plots['GROUP'].setLabel('bottom', 'Frequency', units='Hz', **self.labelstyle11b)
self.plots['GROUP'].setLabel('left', 'Delay', units='seconds', **self.labelstyle11b)
self.plots['IMPRES'].setLabel('bottom', 'n', units='Samples', **self.labelstyle11b)
self.plots['IMPRES'].setLabel('left', 'Amplitude', **self.labelstyle11b)
self.plots['STEPRES'].setLabel('bottom', 'n', units='Samples', **self.labelstyle11b)
self.plots['STEPRES'].setLabel('left', 'Amplitude', **self.labelstyle11b)
self.plots['PDELAY'].setLabel('bottom', 'Frequency', units='Hz', **self.labelstyle11b)
self.plots['PDELAY'].setLabel('left', 'Phase Delay', units='Radians', **self.labelstyle11b)
self.labelstyle9b = {'font-family': 'Helvetica', 'font-size': '9pt', 'font-weight': 'bold'}
self.mplots['mTIME'].setLabel('bottom', 'n', units='Samples/taps', **self.labelstyle9b)
self.mplots['mTIME'].setLabel('left', 'Amplitude', **self.labelstyle9b)
# Set up axes.
for i in self.plots:
axis = self.plots[i].getAxis('bottom')
axis.setStyle(tickLength=-10)
axis = self.plots[i].getAxis('left')
axis.setStyle(tickLength=-10)
for i in self.mplots:
axis = self.mplots[i].getAxis('bottom')
axis.setStyle(tickLength=-10)
axis = self.mplots[i].getAxis('left')
axis.setStyle(tickLength=-10)
# Set up plot curves.
self.rcurve = self.plots['TIME'].plot(title="Real")
self.icurve = self.plots['TIME'].plot(title="Imag")
self.mtimecurve = self.mplots['mTIME'].plot(title="PSD")
self.mtimecurve_stems = self.mplots['mTIME'].plot(connect='pairs', name='Stems')
self.mtimecurve_i_stems = self.mplots['mTIME'].plot(connect='pairs', name='Stems')
self.mtimecurve_i = self.mplots['mTIME'].plot(title="Impulse Response Imag")
self.plots['FREQ'].enableAutoRange(enable=True)
self.freqcurve = self.plots['FREQ'].plot(title="PSD")
# For the frequency view, set up linked x axes
self.primary_freq_overlay = self.mplots['mFREQ']
self.mfreqcurve = self.primary_freq_overlay.plot(title="PSD")
self.secondary_freq_overlay_vb = CustomViewBox()
self.primary_freq_overlay.scene().addItem(self.secondary_freq_overlay_vb)
self.primary_freq_overlay.getAxis('right').linkToView(self.secondary_freq_overlay_vb)
self.mfreqcurve2 = pg.PlotCurveItem()
# self.secondary_freq_overlay_vb.setGeometry(self.primary_freq_overlay.plotItem.vb.sceneBoundingRect())
self.secondary_freq_overlay_vb.setXLink(self.primary_freq_overlay)
self.secondary_freq_overlay_vb.addItem(self.mfreqcurve2)
self.primary_freq_overlay.plotItem.vb.sigResized.connect(self.updateViews)
self.phasecurve = self.plots['PHASE'].plot(title="Phase")
self.groupcurve = self.plots['GROUP'].plot(title="Group Delay")
self.imprescurve_stems = self.plots['IMPRES'].plot(connect='pairs', name='Stems')
self.imprescurve = self.plots['IMPRES'].plot(title="Impulse Response")
self.imprescurve_i_stems = self.plots['IMPRES'].plot(connect='pairs', name='Stems')
self.imprescurve_i = self.plots['IMPRES'].plot(title="Impulse Response Imag")
self.steprescurve_stems = self.plots['STEPRES'].plot(connect='pairs', name='Stems')
self.steprescurve = self.plots['STEPRES'].plot(title="Step Response")
self.steprescurve_i_stems = self.plots['STEPRES'].plot(connect='pairs', name='Stems')
self.steprescurve_i = self.plots['STEPRES'].plot(title="Step Response Imag")
self.pdelaycurve = self.plots['PDELAY'].plot(title="Phase Delay")
# Disable Ideal Band for now
# self.idbanditems = IdealBandItems()
self.set_defaultpen()
# Assigning items.
self.lpfitems = lpfItems
self.hpfitems = hpfItems
self.bpfitems = bpfItems
self.bnfitems = bnfItems
# Connect signals.
self.lpfitems[0].attenChanged.connect(self.set_fatten)
self.hpfitems[0].attenChanged.connect(self.set_fatten)
self.bpfitems[0].attenChanged.connect(self.set_fatten)
self.bnfitems[0].attenChanged.connect(self.set_fatten)
# Populate the Band-diagram scene.
self.scene = QtGui.QGraphicsScene()
self.scene.setSceneRect(0,0,250,250)
lightback = QtGui.qRgb(0xf8, 0xf8, 0xff)
backbrush = Qt.QBrush(Qt.QColor(lightback))
self.scene.setBackgroundBrush(backbrush)
self.gui.bandView.setScene(self.scene)
self.gui.mbandView.setScene(self.scene)
# Install Canvas picker for pz-plot.
self.cpicker = CanvasPicker(self.gui.pzPlot)
self.cpicker.curveChanged.connect(self.set_curvetaps)
self.cpicker.mouseposChanged.connect(self.set_statusbar)
self.cpicker2 = CanvasPicker(self.gui.mpzPlot)
self.cpicker2.curveChanged.connect(self.set_mcurvetaps)
self.cpicker2.mouseposChanged.connect(self.set_mstatusbar)
# Edit boxes for band-diagrams (Not required todate so may be remove?).
"""
self.lpfpassEdit = QtGui.QLineEdit()
self.lpfpassEdit.setMaximumSize(QtCore.QSize(75,20))
self.lpfpassEdit.setText('Not set')
self.lpfstartproxy = QtGui.QGraphicsProxyWidget()
self.lpfstartproxy.setWidget(self.lpfpassEdit)
self.lpfstartproxy.setPos(400,30)
self.lpfstopEdit = QtGui.QLineEdit()
self.lpfstopEdit.setMaximumSize(QtCore.QSize(75,20))
self.lpfstopEdit.setText('Not set')
self.lpfstopproxy = QtGui.QGraphicsProxyWidget()
self.lpfstopproxy.setWidget(self.lpfstopEdit)
self.lpfstopproxy.setPos(400,50)
self.lpfitems.append(self.lpfstartproxy)
self.lpfitems.append(self.lpfstopproxy)
"""
self.populate_bandview(self.lpfitems)
# Set up validators for edit boxes.
self.intVal = Qt.QIntValidator(None)
self.dblVal = Qt.QDoubleValidator(None)
self.gui.nfftEdit.setValidator(self.intVal)
self.gui.sampleRateEdit.setValidator(self.dblVal)
self.gui.filterGainEdit.setValidator(self.dblVal)
self.gui.endofLpfPassBandEdit.setValidator(self.dblVal)
self.gui.startofLpfStopBandEdit.setValidator(self.dblVal)
self.gui.lpfStopBandAttenEdit.setValidator(self.dblVal)
self.gui.lpfPassBandRippleEdit.setValidator(self.dblVal)
self.gui.startofBpfPassBandEdit.setValidator(self.dblVal)
self.gui.endofBpfPassBandEdit.setValidator(self.dblVal)
self.gui.bpfTransitionEdit.setValidator(self.dblVal)
self.gui.bpfStopBandAttenEdit.setValidator(self.dblVal)
self.gui.bpfPassBandRippleEdit.setValidator(self.dblVal)
self.gui.startofBnfStopBandEdit.setValidator(self.dblVal)
self.gui.endofBnfStopBandEdit.setValidator(self.dblVal)
self.gui.bnfTransitionEdit.setValidator(self.dblVal)
self.gui.bnfStopBandAttenEdit.setValidator(self.dblVal)
self.gui.bnfPassBandRippleEdit.setValidator(self.dblVal)
self.gui.endofHpfStopBandEdit.setValidator(self.dblVal)
self.gui.startofHpfPassBandEdit.setValidator(self.dblVal)
self.gui.hpfStopBandAttenEdit.setValidator(self.dblVal)
self.gui.hpfPassBandRippleEdit.setValidator(self.dblVal)
self.gui.rrcSymbolRateEdit.setValidator(self.dblVal)
self.gui.rrcAlphaEdit.setValidator(self.dblVal)
self.gui.rrcNumTapsEdit.setValidator(self.dblVal)
self.gui.gausSymbolRateEdit.setValidator(self.dblVal)
self.gui.gausBTEdit.setValidator(self.dblVal)
self.gui.gausNumTapsEdit.setValidator(self.dblVal)
self.gui.iirendofLpfPassBandEdit.setValidator(self.dblVal)
self.gui.iirstartofLpfStopBandEdit.setValidator(self.dblVal)
self.gui.iirLpfPassBandAttenEdit.setValidator(self.dblVal)
self.gui.iirLpfStopBandRippleEdit.setValidator(self.dblVal)
self.gui.iirstartofHpfPassBandEdit.setValidator(self.dblVal)
self.gui.iirendofHpfStopBandEdit.setValidator(self.dblVal)
self.gui.iirHpfPassBandAttenEdit.setValidator(self.dblVal)
self.gui.iirHpfStopBandRippleEdit.setValidator(self.dblVal)
self.gui.iirstartofBpfPassBandEdit.setValidator(self.dblVal)
self.gui.iirendofBpfPassBandEdit.setValidator(self.dblVal)
self.gui.iirendofBpfStopBandEdit1.setValidator(self.dblVal)
self.gui.iirstartofBpfStopBandEdit2.setValidator(self.dblVal)
self.gui.iirBpfPassBandAttenEdit.setValidator(self.dblVal)
self.gui.iirBpfStopBandRippleEdit.setValidator(self.dblVal)
self.gui.iirendofBsfPassBandEdit1.setValidator(self.dblVal)
self.gui.iirstartofBsfPassBandEdit2.setValidator(self.dblVal)
self.gui.iirstartofBsfStopBandEdit.setValidator(self.dblVal)
self.gui.iirendofBsfStopBandEdit.setValidator(self.dblVal)
self.gui.iirBsfPassBandAttenEdit.setValidator(self.dblVal)
self.gui.iirBsfStopBandRippleEdit.setValidator(self.dblVal)
self.gui.besselordEdit.setValidator(self.intVal)
self.gui.iirbesselcritEdit1.setValidator(self.dblVal)
self.gui.iirbesselcritEdit2.setValidator(self.dblVal)
self.gui.nTapsEdit.setText("0")
self.filterWindows = {"Hamming Window" : filter.firdes.WIN_HAMMING,
"Hann Window" : filter.firdes.WIN_HANN,
"Blackman Window" : filter.firdes.WIN_BLACKMAN,
"Rectangular Window" : filter.firdes.WIN_RECTANGULAR,
"Kaiser Window" : filter.firdes.WIN_KAISER,
"Blackman-harris Window" : filter.firdes.WIN_BLACKMAN_hARRIS}
self.EQUIRIPPLE_FILT = 6 # const for equiripple filter window types.
# Disable functionality that is not quite working in 3.8
self.gui.checkKeepcur.setEnabled(False)
self.gui.actionIdeal_Band.setEnabled(False)
self.show()
def updateViews(self):
# for linking overlay graphs on GridView freq plots
self.secondary_freq_overlay_vb.setGeometry(self.primary_freq_overlay.plotItem.vb.sceneBoundingRect())
# Set up curve pens, lines, and symbols.
def set_defaultpen(self):
blue = QtGui.qRgb(0x00, 0x00, 0xFF)
blueBrush = Qt.QBrush(Qt.QColor(blue))
red = QtGui.qRgb(0xFF, 0x00, 0x00)
redBrush = Qt.QBrush(Qt.QColor(red))
self.freqcurve.setPen(pg.mkPen('b', width=1.5))
self.rcurve.setPen(None)
self.rcurve.setSymbol('o')
self.rcurve.setSymbolPen('b')
self.rcurve.setSymbolBrush(Qt.QBrush(Qt.Qt.gray))
self.rcurve.setSymbolSize(8)
self.icurve.setPen(None)
self.icurve.setSymbol('o')
self.icurve.setSymbolPen('r')
self.icurve.setSymbolBrush(Qt.QBrush(Qt.Qt.gray))
self.icurve.setSymbolSize(8)
self.imprescurve_stems.setPen(pg.mkPen('b', width=1.5))
self.imprescurve.setPen(None)
self.imprescurve.setSymbol('o')
self.imprescurve.setSymbolPen('b')
self.imprescurve.setSymbolBrush(Qt.QBrush(Qt.Qt.gray))
self.imprescurve.setSymbolSize(8)
self.imprescurve_i_stems.setPen(pg.mkPen('b', width=1.5))
self.imprescurve_i.setPen(None)
self.imprescurve_i.setSymbol('o')
self.imprescurve_i.setSymbolPen('r')
self.imprescurve_i.setSymbolBrush(Qt.QBrush(Qt.Qt.gray))
self.imprescurve_i.setSymbolSize(8)
self.steprescurve_stems.setPen(pg.mkPen('b', width=1.5))
self.steprescurve.setPen(None)
self.steprescurve.setSymbol('o')
self.steprescurve.setSymbolPen('b')
self.steprescurve.setSymbolBrush(Qt.QBrush(Qt.Qt.gray))
self.steprescurve.setSymbolSize(8)
self.steprescurve_i_stems.setPen(pg.mkPen('b', width=1.5))
self.steprescurve_i.setPen(None)
self.steprescurve_i.setSymbol('o')
self.steprescurve_i.setSymbolPen('r')
self.steprescurve_i.setSymbolBrush(Qt.QBrush(Qt.Qt.gray))
self.steprescurve_i.setSymbolSize(8)
self.phasecurve.setPen(pg.mkPen('b', width=1.5))
self.groupcurve.setPen(pg.mkPen('b', width=1.5))
self.pdelaycurve.setPen(pg.mkPen('b', width=1.5))
# self.idbanditems.setLinetype()
self.mfreqcurve.setPen(pg.mkPen('b', width=1.5))
self.mfreqcurve2.setPen(pg.mkPen('r', width=1.5))
self.mtimecurve.setPen(None)
self.mtimecurve.setSymbol('o')
self.mtimecurve.setSymbolPen('b')
self.mtimecurve.setSymbolBrush(Qt.QBrush(Qt.Qt.gray))
self.mtimecurve.setSymbolSize(8)
self.mtimecurve_stems.setPen(pg.mkPen('b', width=1.5))
self.mtimecurve_i_stems.setPen(pg.mkPen('b', width=1.5))
self.mtimecurve_i.setPen(None)
self.mtimecurve_i.setSymbol('o')
self.mtimecurve_i.setSymbolPen('r')
self.mtimecurve_i.setSymbolBrush(Qt.QBrush(Qt.Qt.gray))
self.mtimecurve_i.setSymbolSize(8)
def changed_fselect(self, ftype):
strftype = ftype
if(ftype == "FIR"):
self.gui.iirfilterTypeComboBox.hide()
self.gui.iirfilterBandComboBox.hide()
self.gui.adComboBox.hide()
self.gui.filterDesignTypeComboBox.show()
self.gui.globalParamsBox.show()
self.gui.filterTypeComboBox.show()
self.gui.filterTypeWidget.setCurrentWidget(self.gui.firlpfPage)
self.gui.tabGroup.addTab(self.gui.timeTab, _fromUtf8("Filter Taps"))
self.gui.mttapsPush.setEnabled(True)
self.gui.addpolePush.setEnabled(False)
self.gui.maddpolePush.setEnabled(False)
elif(ftype.startswith("IIR")):
self.gui.filterDesignTypeComboBox.hide()
self.gui.globalParamsBox.hide()
self.gui.filterTypeComboBox.hide()
self.gui.iirfilterTypeComboBox.show()
self.gui.adComboBox.show()
self.gui.iirfilterBandComboBox.show()
self.gui.filterTypeWidget.setCurrentWidget(self.gui.iirlpfPage)
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.timeTab))
self.gui.mttapsPush.setEnabled(False)
self.gui.addpolePush.setEnabled(True)
self.gui.maddpolePush.setEnabled(True)
#self.design()
def set_order(self, ftype):
strftype = ftype
if(ftype == "Bessel"):
self.gui.filterTypeWidget.setCurrentWidget(self.gui.iirbesselPage)
else:
self.changed_iirfilter_band(self.gui.iirfilterBandComboBox.currentText())
#self.design()
def changed_iirfilter_band(self, ftype):
strftype = ftype
iirftype = self.gui.iirfilterTypeComboBox.currentText()
if(ftype == "Low Pass"):
if(iirftype == "Bessel"):
self.gui.filterTypeWidget.setCurrentWidget(self.gui.iirbesselPage)
else:
self.gui.filterTypeWidget.setCurrentWidget(self.gui.iirlpfPage)
elif(ftype == "Band Pass"):
if(iirftype == "Bessel"):
self.gui.filterTypeWidget.setCurrentWidget(self.gui.iirbesselPage)
else:
self.gui.filterTypeWidget.setCurrentWidget(self.gui.iirbpfPage)
elif(ftype == "Band Stop"):
if(iirftype == "Bessel"):
self.gui.filterTypeWidget.setCurrentWidget(self.gui.iirbesselPage)
else:
self.gui.filterTypeWidget.setCurrentWidget(self.gui.iirbsfPage)
elif(ftype == "High Pass"):
if(iirftype == "Bessel"):
self.gui.filterTypeWidget.setCurrentWidget(self.gui.iirbesselPage)
else:
self.gui.filterTypeWidget.setCurrentWidget(self.gui.iirhpfPage)
#self.design()
def changed_filter_type(self, ftype):
strftype = ftype
if(ftype == "Low Pass"):
self.gui.filterTypeWidget.setCurrentWidget(self.gui.firlpfPage)
self.remove_bandview()
self.populate_bandview(self.lpfitems)
elif(ftype == "Band Pass"):
self.gui.filterTypeWidget.setCurrentWidget(self.gui.firbpfPage)
self.remove_bandview()
self.populate_bandview(self.bpfitems)
elif(ftype == "Complex Band Pass"):
self.gui.filterTypeWidget.setCurrentWidget(self.gui.firbpfPage)
self.remove_bandview()
self.populate_bandview(self.bpfitems)
elif(ftype == "Band Notch"):
self.gui.filterTypeWidget.setCurrentWidget(self.gui.firbnfPage)
self.remove_bandview()
self.populate_bandview(self.bnfitems)
elif(ftype == "High Pass"):
self.gui.filterTypeWidget.setCurrentWidget(self.gui.firhpfPage)
self.remove_bandview()
self.populate_bandview(self.hpfitems)
elif(ftype == "Root Raised Cosine"):
self.gui.filterTypeWidget.setCurrentWidget(self.gui.rrcPage)
elif(ftype == "Gaussian"):
self.gui.filterTypeWidget.setCurrentWidget(self.gui.gausPage)
elif(ftype == "Half Band"):
self.gui.filterTypeWidget.setCurrentWidget(self.gui.firhbPage)
#self.design()
def changed_filter_design_type(self, design):
if(design == "Equiripple"):
self.set_equiripple()
else:
self.set_windowed()
#self.design()
def set_equiripple(self):
# Stop sending the signal for this function.
self.gui.filterTypeComboBox.blockSignals(True)
self.equiripple = True
self.gui.lpfPassBandRippleLabel.setVisible(True)
self.gui.lpfPassBandRippleEdit.setVisible(True)
self.gui.bpfPassBandRippleLabel.setVisible(True)
self.gui.bpfPassBandRippleEdit.setVisible(True)
self.gui.bnfPassBandRippleLabel.setVisible(True)
self.gui.bnfPassBandRippleEdit.setVisible(True)
self.gui.hpfPassBandRippleLabel.setVisible(True)
self.gui.hpfPassBandRippleEdit.setVisible(True)
# Save current type and repopulate the combo box for
# filters this window type can handle.
currenttype = self.gui.filterTypeComboBox.currentText()
items = self.gui.filterTypeComboBox.count()
for i in range(items):
self.gui.filterTypeComboBox.removeItem(0)
self.gui.filterTypeComboBox.addItems(self.optFilters)
# If the last filter type was valid for this window type,
# go back to it; otherwise, reset.
try:
index = self.optFilters.index(currenttype)
self.gui.filterTypeComboBox.setCurrentIndex(index)
except ValueError:
pass
# Tell gui its ok to start sending this signal again.
self.gui.filterTypeComboBox.blockSignals(False)
def set_windowed(self):
# Stop sending the signal for this function.
self.gui.filterTypeComboBox.blockSignals(True)
self.equiripple = False
self.gui.lpfPassBandRippleLabel.setVisible(False)
self.gui.lpfPassBandRippleEdit.setVisible(False)
self.gui.bpfPassBandRippleLabel.setVisible(False)
self.gui.bpfPassBandRippleEdit.setVisible(False)
self.gui.bnfPassBandRippleLabel.setVisible(False)
self.gui.bnfPassBandRippleEdit.setVisible(False)
self.gui.hpfPassBandRippleLabel.setVisible(False)
self.gui.hpfPassBandRippleEdit.setVisible(False)
# Save current type and repopulate the combo box for
# filters this window type can handle.
currenttype = self.gui.filterTypeComboBox.currentText()
items = self.gui.filterTypeComboBox.count()
for i in range(items):
self.gui.filterTypeComboBox.removeItem(0)
self.gui.filterTypeComboBox.addItems(self.firFilters)
# If the last filter type was valid for this window type,
# go back to it; otherwise, reset.
try:
index = self.optFilters.index(currenttype)
self.gui.filterTypeComboBox.setCurrentIndex(index)
except ValueError:
pass
# Tell gui its ok to start sending this signal again.
self.gui.filterTypeComboBox.blockSignals(False)
def design(self):
ret = True
fs,r = getfloat(self.gui.sampleRateEdit.text())
ret = r and ret
gain,r = getfloat(self.gui.filterGainEdit.text())
ret = r and ret
winstr = self.gui.filterDesignTypeComboBox.currentText()
ftype = self.gui.filterTypeComboBox.currentText()
fsel = self.gui.fselectComboBox.currentText()
if (fsel == "FIR"):
self.b, self.a = [],[]
if(ret):
self.design_fir(ftype, fs, gain, winstr)
elif (fsel.startswith("IIR")):
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
self.design_iir()
if len(w):
reply = QtGui.QMessageBox.information(self, "BadCoefficients",
str(w[-1].message),
QtGui.QMessageBox.Ok)
# Do FIR design.
def design_fir(self, ftype, fs, gain, winstr):
self.iir = False
self.cpicker.set_iir(False)
self.cpicker2.set_iir(False)
if(winstr == "Equiripple"):
designer = {"Low Pass" : design_opt_lpf,
"Band Pass" : design_opt_bpf,
"Complex Band Pass" : design_opt_cbpf,
"Band Notch" : design_opt_bnf,
"Half Band" : design_opt_hb,
"High Pass" : design_opt_hpf}
taps,params,r = designer[ftype](fs, gain, self)
else:
designer = {"Low Pass" : design_win_lpf,
"Band Pass" : design_win_bpf,
"Complex Band Pass" : design_win_cbpf,
"Band Notch" : design_win_bnf,
"High Pass" : design_win_hpf,
"Half Band" : design_win_hb,
"Root Raised Cosine" : design_win_rrc,
"Gaussian" : design_win_gaus}
wintype = self.filterWindows[winstr]
taps,params,r = designer[ftype](fs, gain, wintype, self)
if(r):
if self.gridview:
self.params = params
self.update_fft(taps, params)
self.set_mfmagresponse()
self.set_mttaps()
self.gui.nTapsEdit.setText(str(self.taps.size))
else:
self.draw_plots(taps,params)
zeros = self.get_zeros()
poles = self.get_poles()
self.gui.pzPlot.insertZeros(zeros)
self.gui.pzPlot.insertPoles(poles)
self.gui.mpzPlot.insertZeros(zeros)
self.gui.mpzPlot.insertPoles(poles)
self.update_fcoeff()
# self.set_drawideal()
# Return taps if callback is enabled.
if self.callback:
retobj = ApiObject()
retobj.update_all("fir", self.params, self.taps, 1)
self.callback(retobj)
# Do IIR design.
def design_iir(self):
iirftype = self.gui.iirfilterTypeComboBox.currentText()
iirbtype = self.gui.iirfilterBandComboBox.currentText()
atype = self.gui.adComboBox.currentText()
self.taps = []
self.iir = True
ret = True
params = []
besselparams = []
self.cpicker.set_iir(True)
self.cpicker2.set_iir(True)
iirft = {"Elliptic" : 'ellip',
"Butterworth" : 'butter',
"Chebyshev-1" : 'cheby1',
"Chebyshev-2" : 'cheby2',
"Bessel" : 'bessel' }
sanalog = {"Analog (rad/second)" : 1,
"Digital (normalized 0-1)" : 0 }
paramtype = { 1 : "analog",
0 : "digital" }
iirabbr = {
"Low Pass" : "lpf",
"High Pass" : "hpf",
"Band Pass" : "bpf",
"Band Stop" : "bnf" }
iirboxes = {"Low Pass" : [float(self.gui.iirendofLpfPassBandEdit.text()),
float(self.gui.iirstartofLpfStopBandEdit.text()),
float(self.gui.iirLpfPassBandAttenEdit.text()),
float(self.gui.iirLpfStopBandRippleEdit.text())],
"High Pass" : [float(self.gui.iirstartofHpfPassBandEdit.text()),
float(self.gui.iirendofHpfStopBandEdit.text()),
float(self.gui.iirHpfPassBandAttenEdit.text()),
float(self.gui.iirHpfStopBandRippleEdit.text())],
"Band Pass" : [float(self.gui.iirstartofBpfPassBandEdit.text()),
float(self.gui.iirendofBpfPassBandEdit.text()),
float(self.gui.iirendofBpfStopBandEdit1.text()),
float(self.gui.iirstartofBpfStopBandEdit2.text()),
float(self.gui.iirBpfPassBandAttenEdit.text()),
float(self.gui.iirBpfStopBandRippleEdit.text())],
"Band Stop" : [float(self.gui.iirendofBsfPassBandEdit1.text()),
float(self.gui.iirstartofBsfPassBandEdit2.text()),
float(self.gui.iirstartofBsfStopBandEdit.text()),
float(self.gui.iirendofBsfStopBandEdit.text()),
float(self.gui.iirBsfPassBandAttenEdit.text()),
float(self.gui.iirBsfStopBandRippleEdit.text())] }
# Remove Ideal band-diagrams if IIR.
# self.set_drawideal()
for i in range(len(iirboxes[iirbtype])):
params.append(iirboxes[iirbtype][i])
if len(iirboxes[iirbtype]) == 6:
params = [params[:2],params[2:4],params[4],params[5]]
if(iirftype == "Bessel"):
if iirbtype == "Low Pass" or iirbtype == "High Pass":
besselparams.append(float(self.gui.iirbesselcritEdit1.text()))
else:
besselparams.append(getfloat(self.gui.iirbesselcritEdit1.text()))
besselparams.append(getfloat(self.gui.iirbesselcritEdit2.text()))
order = int(self.gui.besselordEdit.text())
try:
(self.b, self.a) = signal.iirfilter(order, besselparams, btype=iirbtype.replace(' ', '').lower(),
analog=sanalog[atype], ftype=iirft[iirftype], output='ba')
except StandardError as e:
reply = QtGui.QMessageBox.information(self, "IIR design error", e.args[0],
QtGui.QMessageBox.Ok)
(self.z, self.p, self.k) = signal.tf2zpk(self.b, self.a)
iirparams = {"filttype": iirft[iirftype], "bandtype": iirabbr[iirbtype], "filtord": order,
"paramtype": paramtype[sanalog[atype]], "critfreq": besselparams}
else:
try:
(self.b, self.a) = signal.iirdesign(params[0], params[1], params[2], params[3],
analog=sanalog[atype], ftype=iirft[iirftype], output='ba')
except StandardError as e:
reply = QtGui.QMessageBox.information(self, "IIR design error", e.args[0],
QtGui.QMessageBox.Ok)
(self.z, self.p, self.k) = signal.tf2zpk(self.b, self.a)
# Create parameters.
iirparams = {"filttype": iirft[iirftype], "bandtype": iirabbr[iirbtype],
"paramtype": paramtype[sanalog[atype]], "pbedge": params[0], "sbedge": params[1],
"gpass": params[2], "gstop": params[3]}
self.gui.pzPlot.insertZeros(self.z)
self.gui.pzPlot.insertPoles(self.p)
self.gui.mpzPlot.insertZeros(self.z)
self.gui.mpzPlot.insertPoles(self.p)
self.iir_plot_all(self.z, self.p, self.k)
self.update_fcoeff()
self.gui.nTapsEdit.setText("-")
self.params = iirparams
# Return api_object if callback is enabled.
if self.callback:
retobj = ApiObject()
retobj.update_all("iir", self.params, (self.b, self.a), 1)
self.callback(retobj)
# IIR Filter design plot updates.
def iir_plot_all(self,z,p,k):
self.b,self.a = signal.zpk2tf(z,p,k)
w,h = signal.freqz(self.b,self.a)
self.fftdB = 20 * np.log10 (abs(h))
self.freq = w / max(w)
self.fftDeg = np.unwrap(np.arctan2(np.imag(h),np.real(h)))
self.groupDelay = -np.diff(self.fftDeg)
self.phaseDelay = -self.fftDeg[1:] / self.freq[1:]
if self.gridview:
self.set_mfmagresponse()
self.set_mtimpulse()
else:
self.update_freq_curves()
self.update_phase_curves()
self.update_group_curves()
self.update_pdelay_curves()
self.update_step_curves()
self.update_imp_curves()
def nfft_edit_changed(self, nfft):
infft,r = getint(nfft)
if(r and (infft != self.nfftpts)):
self.nfftpts = infft
self.update_freq_curves()
# def tab_changed(self, tab):
# if(tab == 0):
# self.update_freq_curves()
# if(tab == 1):
# self.update_time_curves()
# if(tab == 2):
# self.update_phase_curves()
# if(tab == 3):
# self.update_group_curves()
def get_fft(self, fs, taps, Npts):
fftpts = fft_detail.fft(taps, Npts)
self.freq = np.linspace(start=0, stop=fs, num=Npts, endpoint=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.fftdB = 20.0*np.log10(abs(fftpts))
if any(self.fftdB == float('-inf')):
sys.stderr.write('Filter design failed (taking log10 of 0).\n')
self.fftdB = np.zeros([len(fftpts)])
self.fftDeg = np.unwrap(np.angle(fftpts))
self.groupDelay = -np.diff(self.fftDeg)
self.phaseDelay = -self.fftDeg[1:] / self.freq[1:]
def update_time_curves(self):
ntaps = len(self.taps)
if(ntaps < 1):
return
# Set Data.
if(type(self.taps[0]) == scipy.complex128):
self.rcurve.setData(np.arange(ntaps), self.taps.real)
self.icurve.setData(np.arange(ntaps), self.taps.imag)
else:
self.rcurve.setData(np.arange(ntaps), self.taps)
self.icurve.setData([],[]);
if self.mttaps:
if(type(self.taps[0]) == scipy.complex128):
self.mtimecurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(self.taps.real.shape[0], dtype=int),
self.taps.real)).flatten())
self.mtimecurve.setData(np.arange(ntaps), self.taps.real)
self.mtimecurve_i_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(self.taps.imag.shape[0], dtype=int),
self.taps.imag)).flatten())
self.mtimecurve_i.setData(np.arange(ntaps), self.taps.imag)
else:
self.mtimecurve.setData(np.arange(ntaps), self.taps)
self.mtimecurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(self.taps.shape[0], dtype=int),
self.taps)).flatten())
self.mtimecurve_i_stems.setData([],[])
self.mtimecurve_i.setData([],[])
# Configure plots.
if self.mtoverlay:
self.mplots['mTIME'].setMouseEnabled(x=True, y=True)
else:
self.mplots['mTIME'].setMouseEnabled(x=False, y=False)
self.mplots['mTIME'].showAxis('right', False)
# Set plot limits and reset axis zoom.
self.plot_auto_limit(self.plots['TIME'], xMin=0, xMax=ntaps)
self.plot_auto_limit(self.mplots['mTIME'], xMin=0, xMax=ntaps)
def update_step_curves(self):
ntaps = len(self.taps)
if((ntaps < 1) and (not self.iir)):
return
# Set Data.
if self.iir:
stepres = self.step_response(self.b,self.a)
ntaps = 50
else:
stepres = self.step_response(self.taps)
if(type(stepres[0]) == np.complex128):
self.steprescurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(stepres.real.shape[0], dtype=int),
stepres.real)).flatten())
self.steprescurve.setData(np.arange(ntaps), stepres.real)
self.steprescurve_i_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(stepres.imag.shape[0], dtype=int),
stepres.imag)).flatten())
self.steprescurve_i.setData(np.arange(ntaps), stepres.imag)
else:
self.steprescurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(stepres.shape[0], dtype=int),
stepres)).flatten())
self.steprescurve.setData(np.arange(ntaps), stepres)
self.steprescurve_i_stems.setData([],[])
self.steprescurve_i.setData([],[])
if self.mtstep:
if(type(stepres[0]) == np.complex128):
self.mtimecurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(stepres.real.shape[0], dtype=int),
stepres.real)).flatten())
self.mtimecurve.setData(np.arange(ntaps), stepres.real)
self.mtimecurve_i_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(stepres.imag.shape[0], dtype=int),
stepres.imag)).flatten())
self.mtimecurve_i.setData(np.arange(ntaps), stepres.imag)
else:
self.mtimecurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(stepres.shape[0], dtype=int),
stepres)).flatten())
self.mtimecurve.setData(np.arange(ntaps), stepres)
self.mtimecurve_i_stems.setData([],[])
self.mtimecurve_i.setData([],[])
# Configure plots.
if self.mtoverlay:
self.mplots['mTIME'].setMouseEnabled(x=True, y=True)
else:
self.mplots['mTIME'].setMouseEnabled(x=False, y=False)
self.mplots['mTIME'].showAxis('right', False)
# Set plot limits and reset axis zoom.
self.plot_auto_limit(self.plots['STEPRES'], xMin=0, xMax=ntaps)
self.plot_auto_limit(self.mplots['mTIME'], xMin=0, xMax=ntaps)
def update_imp_curves(self):
ntaps = len(self.taps)
if((ntaps < 1) and (not self.iir)):
return
# Set Data.
if self.iir:
impres = self.impulse_response(self.b, self.a)
ntaps = 50
else:
impres = self.impulse_response(self.taps)
if(type(impres[0]) == np.complex128):
self.imprescurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(impres.real.shape[0], dtype=int),
impres.real)).flatten())
self.imprescurve.setData(np.arange(ntaps), impres.real)
self.imprescurve_i_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(impres.imag.shape[0], dtype=int),
impres.imag)).flatten())
self.imprescurve_i.setData(np.arange(ntaps), impres.imag)
else:
self.imprescurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(impres.shape[0], dtype=int),
impres)).flatten())
if self.mtimpulse:
if(type(impres[0]) == np.complex128):
self.mtimecurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(impres.real.shape[0], dtype=int),
impres.real)).flatten())
self.mtimecurve.setData(np.arange(ntaps), impres.real)
self.mtimecurve_i_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(impres.imag.shape[0], dtype=int),
impres.imag)).flatten())
self.mtimecurve_i.setData(np.arange(ntaps), impres.imag)
else:
self.mtimecurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(impres.shape[0], dtype=int),
impres)).flatten())
self.mtimecurve.setData(np.arange(ntaps), impres)
self.mtimecurve_i_stems.setData([],[])
self.mtimecurve_i.setData([],[])
# Configure plots.
if self.mtoverlay:
self.mplots['mTIME'].setMouseEnabled(x=True, y=True)
else:
self.mplots['mTIME'].setMouseEnabled(x=False, y=False)
self.mplots['mTIME'].showAxis('right', False)
# Set plot limits and reset axis zoom.
self.plot_auto_limit(self.plots['IMPRES'], xMin=0, xMax=ntaps)
self.plot_auto_limit(self.mplots['mTIME'], xMin=0, xMax=ntaps)
def plot_secondary(self):
if (self.mfoverlay):
if self.last_mfreq_plot == "freq":
self.mfmagresponse = True
self.update_freq_curves(True)
elif self.last_mfreq_plot == "phase":
self.mfphaseresponse = True
self.update_phase_curves(True)
elif self.last_mfreq_plot == "group":
self.mfgroupdelay = True
self.update_group_curves(True)
elif self.last_mfreq_plot == "pdelay":
self.mfphasedelay = True
self.update_pdelay_curves(True)
self.mplots['mFREQ'].showAxis('right', True)
else:
self.mplots['mFREQ'].setMouseEnabled(x=False, y=False)
self.mplots['mFREQ'].showAxis('right', False)
self.mfreqcurve2.setData([],[])
def update_freq_curves(self, secondary=False):
npts = len(self.fftdB)
if(npts < 1):
return
# Set Data.
if self.iir:
self.freqcurve.setData(self.freq[:npts-1], self.fftdB[:npts-1])
else:
self.freqcurve.setData(self.freq[:int(npts//2)], self.fftdB[:int(npts//2)])
if self.mfmagresponse:
curve = self.mfreqcurve
if secondary:
curve = self.mfreqcurve2
if self.iir:
curve.setData(self.freq[:npts-1], self.fftdB[:npts-1])
else:
curve.setData(self.freq[:int(npts//2)], self.fftdB[:int(npts//2)])
# Set axes to new scales.
# Set plot limits and reset axis zoom.
if self.iir:
xmax = self.freq[npts-1]
else:
xmax = self.freq[npts//2]
xmin = self.freq[0]
self.plot_auto_limit(self.plots['FREQ'], xMin=xmin, xMax=xmax)
self.plot_auto_limit(self.mplots['mFREQ'], xMin=xmin, xMax=xmax)
if secondary:
self.mplots['mFREQ'].setLabel('right', 'Magnitude', units='dB', **self.labelstyle9b)
else:
self.mplots['mFREQ'].setLabel('left', 'Magnitude', units='dB', **self.labelstyle9b)
if not secondary:
self.plot_secondary()
self.last_mfreq_plot = 'freq'
def update_phase_curves(self, secondary=False):
npts = len(self.fftDeg)
if(npts < 1):
return
# Set Data.
if self.iir:
self.phasecurve.setData(self.freq[:npts-1], self.fftDeg[:npts-1])
else:
self.phasecurve.setData(self.freq[:int(npts//2)], self.fftDeg[:int(npts//2)])
if self.mfphaseresponse:
curve = self.mfreqcurve
if secondary:
curve = self.mfreqcurve2
if self.iir:
curve.setData(self.freq[:npts-1], self.fftDeg[:npts-1])
else:
curve.setData(self.freq[:int(npts//2)], self.fftDeg[:int(npts//2)])
# Set plot limits and reset axis zoom.
if self.iir:
xmax = self.freq[npts-1]
else:
xmax = self.freq[npts//2]
xmin = self.freq[0]
self.plot_auto_limit(self.plots['PHASE'], xMin=xmin, xMax=xmax)
self.plot_auto_limit(self.mplots['mFREQ'], xMin=xmin, xMax=xmax)
# Set Axis title.
if secondary:
self.mplots['mFREQ'].setLabel('right', 'Phase', units='Radians', **self.labelstyle9b)
else:
self.mplots['mFREQ'].setLabel('left', 'Phase', units='Radians', **self.labelstyle9b)
if not secondary:
self.plot_secondary()
self.last_mfreq_plot = 'phase'
def update_group_curves(self, secondary=False):
npts = len(self.groupDelay)
if(npts < 1):
return
# Set Data.
if self.iir:
self.groupcurve.setData(self.freq[:npts-1], self.groupDelay[:npts-1])
else:
self.groupcurve.setData(self.freq[:int(npts//2)], self.groupDelay[:int(npts//2)])
if self.mfgroupdelay:
curve = self.mfreqcurve
if secondary:
curve = self.mfreqcurve2
if self.iir:
curve.setData(self.freq[:npts-1], self.groupDelay[:npts-1])
else:
curve.setData(self.freq[:int(npts//2)], self.groupDelay[:int(npts//2)])
# Configure plots.
if self.mtoverlay:
self.mplots['mFREQ'].setMouseEnabled(x=True, y=True)
else:
self.mplots['mFREQ'].setMouseEnabled(x=False, y=False)
self.mplots['mFREQ'].showAxis('right', False)
# Set plot limits and reset axis zoom.
if self.iir:
xmax = self.freq[npts-1]
else:
xmax = self.freq[npts//2]
xmin = self.freq[0]
self.plot_auto_limit(self.plots['GROUP'], xMin=xmin, xMax=xmax)
self.plot_auto_limit(self.mplots['mFREQ'], xMin=xmin, xMax=xmax)
# Set Axis title.
if secondary:
self.mplots['mFREQ'].setLabel('right', 'Delay', units='seconds', **self.labelstyle9b)
else:
self.mplots['mFREQ'].setLabel('left', 'Delay', units='seconds', **self.labelstyle9b)
if not secondary:
self.plot_secondary()
self.last_mfreq_plot = 'group'
def update_pdelay_curves(self, secondary=False):
npts = len(self.phaseDelay)
if(npts < 1):
return
# Set Data.
if self.iir:
self.pdelaycurve.setData(self.freq[:npts-1], self.phaseDelay[:npts-1])
else:
self.pdelaycurve.setData(self.freq[:int(npts//2)], self.phaseDelay[:int(npts//2)])
if self.mfphasedelay:
curve = self.mfreqcurve
if secondary:
curve = self.mfreqcurve2
if self.iir:
curve.setData(self.freq[:npts-1], self.phaseDelay[:npts-1])
else:
curve.setData(self.freq[:int(npts//2)], self.phaseDelay[:int(npts//2)])
# Set plot limits and reset axis zoom.
if self.iir:
xmax = self.freq[npts-1]
else:
xmax = self.freq[npts//2]
xmin = self.freq[0]
self.plot_auto_limit(self.plots['PDELAY'], xMin=xmin, xMax=xmax)
self.plot_auto_limit(self.mplots['mFREQ'], xMin=xmin, xMax=xmax)
# Set Axis title.
if secondary:
self.mplots['mFREQ'].setLabel('right', 'Phase Delay', **self.labelstyle9b)
else:
self.mplots['mFREQ'].setLabel('left', 'Phase Delay', **self.labelstyle9b)
if not secondary:
self.plot_secondary()
self.last_mfreq_plot = 'pdelay'
def plot_auto_limit(self, plot, xMin=None, xMax=None, yMin=None, yMax=None):
plot.setLimits(xMin=None, xMax=None, yMin=None, yMax=None)
plot.autoRange()
view = plot.viewRange()
xmin = view[0][0] if xMin is None else xMin
xmax = view[0][1] if xMax is None else xMax
ymin = view[1][0] if yMin is None else yMin
ymax = view[1][1] if yMax is None else yMax
plot.setLimits(xMin=xmin, xMax=xmax, yMin=ymin, yMax=ymax)
def action_quick_access(self):
# Hides quick access widget if unselected.
if (self.gui.quickFrame.isHidden()):
self.gui.quickFrame.show()
else:
self.gui.quickFrame.hide()
def action_spec_widget(self):
# Hides spec widget if unselected.
if (self.gui.filterspecView.isHidden()):
self.gui.filterspecView.show()
else:
self.gui.filterspecView.hide()
def action_response_widget(self):
if (self.gui.tabGroup.isHidden()):
self.gui.tabGroup.show()
else:
self.gui.tabGroup.hide()
def action_design_widget(self):
# Hides design widget if unselected.
if (self.gui.filterFrame.isHidden()):
self.gui.filterFrame.show()
else:
self.gui.filterFrame.hide()
# Saves and attach the plots for comparison.
def set_bufferplots(self):
if (self.gui.checkKeepcur.checkState() == 0 ):
# Detach and delete all plots if unchecked.
for c in self.bufferplots:
c.detach()
self.replot_all()
self.bufferplots = []
else:
self.bufferplots = []
# Iterate through tabgroup children and copy curves.
for i in range(self.gui.tabGroup.count()):
page = self.gui.tabGroup.widget(i)
for item in page.children():
if isinstance(item, Qwt.QwtPlot):
# Change colours as both plots overlay.
colours = [QtCore.Qt.darkYellow,QtCore.Qt.black]
for c in item.itemList():
if isinstance(c, Qwt.QwtPlotCurve):
dup = Qwt.QwtPlotCurve()
dpen = c.pen()
dsym = c.symbol()
dsym.setPen(Qt.QPen(colours[0]))
dsym.setSize(Qt.QSize(6, 6))
dpen.setColor(colours[0])
del colours[0]
dup.setPen(dpen)
dup.setSymbol(dsym)
dup.setRenderHint(Qwt.QwtPlotItem.RenderAntialiased)
dup.setData([c.x(i) for i in range(c.dataSize())],
[c.y(i) for i in range(c.dataSize())])
self.bufferplots.append(dup)
self.bufferplots[-1].attach(item)
def set_grid(self):
if (self.gui.checkGrid.checkState() == 0):
self.gridenable = False
for i in self.plots:
self.plots[i].showGrid(x=False, y=False)
for i in self.mplots:
self.mplots[i].showGrid(x=False, y=False)
else:
self.gridenable = True
if self.gridview:
for i in self.mplots:
self.mplots[i].showGrid(x=True, y=True)
else:
for i in self.plots:
self.plots[i].showGrid(x=True, y=True)
def set_actgrid(self):
if (self.gui.actionGrid_2.isChecked() == 0 ):
self.gridenable = False
for i in self.plots:
self.plots[i].showGrid(x=False, y=False)
for i in self.mplots:
self.mplots[i].showGrid(x=False, y=False)
else:
self.gridenable = True
if self.gridview:
for i in self.mplots:
self.mplots[i].showGrid(x=True, y=True)
else:
for i in self.plots:
self.plots[i].showGrid(x=True, y=True)
def set_magresponse(self):
if (self.gui.checkMagres.checkState() == 0 ):
self.magres = False
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.freqTab))
else:
self.magres = True
self.gui.tabGroup.addTab(self.gui.freqTab, _fromUtf8("Magnitude Response"))
self.update_freq_curves()
def set_actmagresponse(self):
if (self.gui.actionMagnitude_Response.isChecked() == 0 ):
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.freqTab))
else:
self.gui.tabGroup.addTab(self.gui.freqTab, _fromUtf8("Magnitude Response"))
self.update_freq_curves()
def set_switchview(self):
if (self.gui.actionGridview.isChecked() == 0 ):
self.gridview = 0
self.set_defaultpen()
self.set_actgrid()
self.gui.stackedWindows.setCurrentIndex(0)
if self.iir:
self.iir_plot_all(self.z,self.p,self.k)
else:
self.draw_plots(self.taps,self.params)
else:
self.gridview = 1
self.set_actgrid()
self.gui.stackedWindows.setCurrentIndex(1)
self.update_freq_curves()
self.update_time_curves()
# self.set_drawideal()
def set_plotselect(self):
if (self.gui.actionPlot_select.isChecked() == 0 ):
self.gui.mfgroupBox.hide()
self.gui.mtgroupBox.hide()
self.gui.pzgroupBox.hide()
self.gui.mpzgroupBox.hide()
else:
self.gui.mfgroupBox.show()
self.gui.mtgroupBox.show()
self.gui.pzgroupBox.show()
self.gui.mpzgroupBox.show()
def replot_all(self):
self.plots['TIME'].replot()
self.mplots['mTIME'].replot()
self.plots['FREQ'].replot()
self.mplots['mFREQ'].replot()
self.plots['PHASE'].replot()
self.plots['GROUP'].replot()
self.plots['IMPRES'].replot()
self.plots['STEPRES'].replot()
self.plots['PDELAY'].replot()
def detach_allgrid(self):
for i in self.plots:
i.showGrid(x=False, y=False)
def set_mfmagresponse(self):
if self.mfoverlay:
self.mfmagresponse = True
else:
self.mfmagresponse = not(self.mfmagresponse)
# if not self.mfoverlay:
self.mfphasedelay = False
self.mfgroupdelay = False
self.mfphaseresponse = False
self.update_freq_curves()
def set_mfphaseresponse(self):
if self.mfoverlay:
self.mfphaseresponse = True
else:
self.mfphaseresponse = not(self.mfphaseresponse)
# if not self.mfoverlay:
self.mfphasedelay = False
self.mfgroupdelay = False
self.mfmagresponse = False
self.update_phase_curves()
def set_mfgroupdelay(self):
if self.mfoverlay:
self.mfgroupdelay = True
else:
self.mfgroupdelay = not(self.mfgroupdelay)
# if not self.mfoverlay:
self.mfphasedelay = False
self.mfphaseresponse = False
self.mfmagresponse = False
self.update_group_curves()
def set_mfphasedelay(self):
if self.mfoverlay:
self.mfphasedelay = True
else:
self.mfphasedelay = not(self.mfphasedelay)
# if not self.mfoverlay:
self.mfgroupdelay = False
self.mfphaseresponse = False
self.mfmagresponse = False
self.update_pdelay_curves()
def ifinlist(self,a,dlist):
for d in dlist:
if self.compare_instances(a,d):
return True
return False
def compare_instances(self,a,b):
if a is b:
return True
else:
return False
def detach_firstattached(self, plot):
items = plot.itemList()
plot.enableAxis(Qwt.QwtPlot.yRight)
if len(items) > 2:
yaxis=items[0].yAxis()
items[2].setPen(items[0].pen())
items[2].setYAxis(yaxis)
items[0].detach()
else:
items[1].setYAxis(Qwt.QwtPlot.yRight)
if plot is self.mplots['mFREQ']:
items[1].setPen(QtGui.QPen(QtCore.Qt.red, 1, QtCore.Qt.SolidLine))
self.set_actgrid()
def update_fft(self, taps, params):
self.params = params
self.taps = np.array(taps)
self.get_fft(self.params["fs"], self.taps, self.nfftpts)
def set_mfoverlay(self):
self.mfoverlay = not(self.mfoverlay)
def set_conj(self):
self.cpicker.set_conjugate()
def set_mconj(self):
self.cpicker2.set_conjugate()
def set_zeroadd(self):
self.cpicker.add_zero()
def set_mzeroadd(self):
self.cpicker2.add_zero()
def set_poleadd(self):
self.cpicker.add_pole()
def set_mpoleadd(self):
self.cpicker2.add_pole()
def set_delpz(self):
self.cpicker.delete_pz()
def set_mdelpz(self):
self.cpicker2.delete_pz()
def set_mttaps(self):
self.mttaps = not(self.mttaps)
if not self.mfoverlay:
self.mtstep = False
self.mtimpulse = False
self.update_time_curves()
def set_mtstep(self):
self.mtstep = not(self.mtstep)
if not self.mfoverlay:
self.mttaps = False
self.mtimpulse = False
self.update_step_curves()
def set_mtimpulse(self):
self.mtimpulse = not(self.mtimpulse)
if not self.mfoverlay:
self.mttaps = False
self.mtstep = False
self.update_imp_curves()
def set_gdelay(self):
if (self.gui.checkGdelay.checkState() == 0 ):
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.groupTab))
else:
self.gui.tabGroup.addTab(self.gui.groupTab, _fromUtf8("Group Delay"))
self.update_freq_curves()
def set_actgdelay(self):
if (self.gui.actionGroup_Delay.isChecked() == 0 ):
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.groupTab))
else:
self.gui.tabGroup.addTab(self.gui.groupTab, _fromUtf8("Group Delay"))
self.update_freq_curves()
def set_phase(self):
if (self.gui.checkPhase.checkState() == 0 ):
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.phaseTab))
else:
self.gui.tabGroup.addTab(self.gui.phaseTab, _fromUtf8("Phase Response"))
self.update_freq_curves()
def set_actphase(self):
if (self.gui.actionPhase_Respone.isChecked() == 0 ):
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.phaseTab))
else:
self.gui.tabGroup.addTab(self.gui.phaseTab, _fromUtf8("Phase Response"))
self.update_freq_curves()
def set_fcoeff(self):
if (self.gui.checkFcoeff.checkState() == 0 ):
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.fcTab))
else:
self.gui.tabGroup.addTab(self.gui.fcTab, _fromUtf8("Filter Coefficients"))
self.update_fcoeff()
def set_actfcoeff(self):
if (self.gui.actionFilter_Coefficients.isChecked() == 0 ):
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.fcTab))
else:
self.gui.tabGroup.addTab(self.gui.fcTab, _fromUtf8("Filter Coefficients"))
self.update_fcoeff()
def set_band(self):
if (self.gui.checkBand.checkState() == 0 ):
self.gui.filterspecView.removeTab(self.gui.filterspecView.indexOf(self.gui.bandDiagram))
else:
self.gui.filterspecView.addTab(self.gui.bandDiagram, _fromUtf8("Band Diagram"))
def set_actband(self):
if (self.gui.actionBand_Diagram.isChecked() == 0 ):
self.gui.filterspecView.removeTab(self.gui.filterspecView.indexOf(self.gui.bandDiagram))
else:
self.gui.filterspecView.addTab(self.gui.bandDiagram, _fromUtf8("Band Diagram"))
# def set_drawideal(self):
# fsel = self.gui.fselectComboBox.currentText()
# if self.gridview and not(self.mfoverlay):
# plot = self.mplots['mFREQ']
# else:
# plot = self.plots['FREQ']
# if (self.gui.actionIdeal_Band.isChecked() == 0 or fsel == "IIR(scipy)"):
# self.idbanditems.detach_allidealcurves(plot)
# elif(self.params):
# ftype = self.gui.filterTypeComboBox.currentText()
# self.idbanditems.attach_allidealcurves(plot)
# self.idbanditems.plotIdealCurves(ftype, self.params, plot)
# plot.replot()
def set_pzplot(self):
if (self.gui.checkPzplot.checkState() == 0 ):
self.gui.filterspecView.removeTab(self.gui.filterspecView.indexOf(self.gui.poleZero))
else:
self.gui.filterspecView.addTab(self.gui.poleZero, _fromUtf8("Pole-Zero Plot"))
def set_actpzplot(self):
if (self.gui.actionPole_Zero_Plot_2.isChecked() == 0 ):
self.gui.filterspecView.removeTab(self.gui.filterspecView.indexOf(self.gui.poleZero))
else:
self.gui.filterspecView.addTab(self.gui.poleZero, _fromUtf8("Pole-Zero Plot"))
def set_pdelay(self):
if (self.gui.checkPzplot.checkState() == 0 ):
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.pdelayTab))
else:
self.gui.tabGroup.addTab(self.gui.pdelayTab, _fromUtf8("Phase Delay"))
def set_actpdelay(self):
if (self.gui.actionPhase_Delay.isChecked() == 0 ):
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.pdelayTab))
else:
self.gui.tabGroup.addTab(self.gui.pdelayTab, _fromUtf8("Phase Delay"))
def set_impres(self):
if (self.gui.checkImpulse.checkState() == 0 ):
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.impresTab))
else:
self.gui.tabGroup.addTab(self.gui.impresTab, _fromUtf8("Impulse Response"))
def set_actimpres(self):
if (self.gui.actionImpulse_Response.isChecked() == 0 ):
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.impresTab))
else:
self.gui.tabGroup.addTab(self.gui.impresTab, _fromUtf8("Impulse Response"))
def set_stepres(self):
if (self.gui.checkStep.checkState() == 0 ):
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.stepresTab))
else:
self.gui.tabGroup.addTab(self.gui.stepresTab, _fromUtf8("Step Response"))
def set_actstepres(self):
if (self.gui.actionStep_Response.isChecked() == 0 ):
self.gui.tabGroup.removeTab(self.gui.tabGroup.indexOf(self.gui.stepresTab))
else:
self.gui.tabGroup.addTab(self.gui.stepresTab, _fromUtf8("Step Response"))
def populate_bandview(self,fitems):
for item in fitems:
if (item.isWidgetType()):
self.scene.addWidget(item)
else:
self.scene.addItem(item)
def remove_bandview(self):
for item in list(self.scene.items()):
self.scene.removeItem(item)
def set_fatten(self,atten):
ftype = self.gui.filterTypeComboBox.currentText()
if (ftype == "Low Pass"):
boxatten,r = getfloat(self.gui.lpfStopBandAttenEdit.text())
self.gui.lpfStopBandAttenEdit.setText(str(atten+boxatten))
if ftype == "High Pass":
boxatten,r = getfloat(self.gui.hpfStopBandAttenEdit.text())
self.gui.hpfStopBandAttenEdit.setText(str(atten+boxatten))
if ftype == "Band Pass":
boxatten,r = getfloat(self.gui.bpfStopBandAttenEdit.text())
self.gui.bpfStopBandAttenEdit.setText(str(atten+boxatten))
if ftype == "Band Notch":
boxatten,r = getfloat(self.gui.bnfStopBandAttenEdit.text())
self.gui.bnfStopBandAttenEdit.setText(str(atten+boxatten))
if ftype == "Complex Band Pass":
boxatten,r = getfloat(self.gui.bpfStopBandAttenEdit.text())
self.gui.bpfStopBandAttenEdit.setText(str(atten+boxatten))
def set_curvetaps(self, zeros_poles):
zr, pl = zeros_poles
if self.iir:
self.z = zr
self.p = pl
self.iir_plot_all(self.z,self.p,self.k)
self.gui.mpzPlot.insertZeros(zr)
self.gui.mpzPlot.insertPoles(pl)
self.update_fcoeff()
if self.callback:
retobj = ApiObject()
retobj.update_all("iir", self.params, (self.b, self.a), 1)
self.callback(retobj)
else:
hz = poly1d(zr,r=1)
# print hz.c.
self.taps = hz.c*self.taps[0]
self.draw_plots(self.taps,self.params)
self.update_fcoeff()
# update the pzplot in other view.
zeros = self.get_zeros()
poles = self.get_poles()
self.gui.mpzPlot.insertZeros(zeros)
self.gui.mpzPlot.insertPoles(poles)
self.gui.nTapsEdit.setText(str(self.taps.size))
if self.callback:
retobj = ApiObject()
retobj.update_all("fir", self.params, self.taps, 1)
self.callback(retobj)
def set_mcurvetaps(self, zeros_poles):
zr, pl = zeros_poles
if self.iir:
self.z = zr
self.p = pl
self.iir_plot_all(self.z,self.p,self.k)
self.gui.pzPlot.insertZeros(zr)
self.gui.pzPlot.insertPoles(pl)
self.update_fcoeff()
if self.callback:
retobj = ApiObject()
retobj.update_all("iir", self.params, (self.b, self.a), 1)
self.callback(retobj)
else:
hz = poly1d(zr,r=1)
# print hz.c.
self.taps = hz.c*self.taps[0]
if self.gridview:
self.update_fft(self.taps, self.params)
self.set_mfmagresponse()
self.set_mttaps()
else:
self.draw_plots(self.taps,self.params)
self.update_fcoeff()
# update the pzplot in other view.
zeros = self.get_zeros()
poles = self.get_poles()
self.gui.pzPlot.insertZeros(zeros)
self.gui.pzPlot.insertPoles(poles)
self.gui.nTapsEdit.setText(str(self.taps.size))
if self.callback:
retobj = ApiObject()
retobj.update_all("fir", self.params, self.taps, 1)
self.callback(retobj)
def set_statusbar(self, point):
x, y = point
if x == None:
self.gui.pzstatusBar.showMessage("")
else:
self.gui.pzstatusBar.showMessage("X: "+str(x)+" Y: "+str(y))
def set_mstatusbar(self, point):
x, y = point
if x == None:
self.gui.mpzstatusBar.showMessage("")
else:
self.gui.mpzstatusBar.showMessage("X: "+str(x)+" Y: "+str(y))
def get_zeros(self):
hz = poly1d(self.taps,r=0)
return hz.r
def get_poles(self):
if len(self.taps):
hp = zeros(len(self.taps)-1,complex)
return hp
else:
return []
def impulse_response(self, b, a=1):
length = len(b)
if self.iir:
length = 50
impulse = np.repeat(0., length)
impulse[0] = 1.
x = np.arange(0, length)
response = signal.lfilter(b, a, impulse)
return response
def step_response(self, b, a=1):
length = len(b)
if self.iir:
length = 50
impulse = np.repeat(0., length)
impulse[0] = 1.
x = np.arange(0, length)
response = signal.lfilter(b, a, impulse)
step = np.cumsum(response)
return step
def update_fcoeff(self):
fcoeff=""
if self.iir:
fcoeff="b = " + ','.join(str(e) for e in self.b) +"\na = " + ','.join(str(e) for e in self.a)
else:
fcoeff="taps = " + ','.join(str(e) for e in self.taps)
self.gui.filterCoeff.setText(fcoeff)
self.gui.mfilterCoeff.setText(fcoeff)
def action_save_dialog(self):
file_dialog_output = QtGui.QFileDialog.getSaveFileName(self, "Save CSV Filter File", ".", "")
filename = file_dialog_output[0]
try:
handle = open(filename, "w")
except IOError:
reply = QtGui.QMessageBox.information(self, 'File Name',
("Could not save to file: %s" % filename),
QtGui.QMessageBox.Ok)
return
csvhandle = csv.writer(handle, delimiter=",")
# Indicate FIR/IIR for easy reading.
if self.iir:
csvhandle.writerow(["restype","iir"])
else:
csvhandle.writerow(["restype","fir"])
for k in list(self.params.keys()):
csvhandle.writerow([k, self.params[k]])
if self.iir:
csvhandle.writerow(["b",] + list(self.b))
csvhandle.writerow(["a",] + list(self.a))
else:
csvhandle.writerow(["taps",] + list(self.taps))
handle.close()
def action_open_dialog(self):
file_dialog_output = QtGui.QFileDialog.getOpenFileName(self, "Open CSV Filter File", ".", "")
if(len(file_dialog_output) == 0):
return
# file_dialog_output returns tuple of (filename, file filter)
filename = file_dialog_output[0]
try:
handle = open(filename, "r")
except IOError:
reply = QtGui.QMessageBox.information(self, 'File Name',
("Could not open file: %s" % filename),
QtGui.QMessageBox.Ok)
return
csvhandle = csv.reader(handle, delimiter=",")
b_a={}
taps = []
params = {}
for row in csvhandle:
if (row[0] == "restype"):
restype = row[1]
elif(row[0] == "taps"):
testcpx = re.findall(r"[+-]?\d+\.*\d*[Ee]?[-+]?\d+j", row[1])
if(len(testcpx) > 0): # it's a complex
taps = [complex(r) for r in row[1:]]
else:
taps = [float(r) for r in row[1:]]
elif(row[0] == "b" or row[0] == "a"):
testcpx = re.findall(r"[+-]?\d+\.*\d*[Ee]?[-+]?\d+j", row[1])
if(len(testcpx) > 0): # it's a complex
b_a[row[0]] = [complex(r) for r in row[1:]]
else:
b_a[row[0]]= [float(r) for r in row[1:]]
else:
testcpx = re.findall(r"[+-]?\d+\.*\d*[Ee]?[-+]?\d+j", row[1])
if(len(testcpx) > 0): # it's a complex
params[row[0]] = complex(row[1])
else: # assume it's a float.
try: # if it's not a float, its a string.
params[row[0]] = float(row[1])
except ValueError:
params[row[0]] = row[1]
handle.close()
if restype == "fir":
self.iir = False
self.gui.fselectComboBox.setCurrentIndex(0)
self.draw_plots(taps, params)
zeros = self.get_zeros()
poles = self.get_poles()
self.gui.pzPlot.insertZeros(zeros)
self.gui.pzPlot.insertPoles(poles)
self.gui.mpzPlot.insertZeros(zeros)
self.gui.mpzPlot.insertPoles(poles)
self.gui.sampleRateEdit.setText(str(params["fs"]))
self.gui.filterGainEdit.setText(str(params["gain"]))
# Set up GUI parameters for each filter type.
if(params["filttype"] == "lpf"):
self.gui.filterTypeComboBox.setCurrentIndex(0)
self.gui.filterDesignTypeComboBox.setCurrentIndex(int(params["wintype"]))
self.gui.endofLpfPassBandEdit.setText(str(params["pbend"]))
self.gui.startofLpfStopBandEdit.setText(str(params["sbstart"]))
self.gui.lpfStopBandAttenEdit.setText(str(params["atten"]))
if(params["wintype"] == self.EQUIRIPPLE_FILT):
self.gui.lpfPassBandRippleEdit.setText(str(params["ripple"]))
elif(params["filttype"] == "bpf"):
self.gui.filterTypeComboBox.setCurrentIndex(1)
self.gui.filterDesignTypeComboBox.setCurrentIndex(int(params["wintype"]))
self.gui.startofBpfPassBandEdit.setText(str(params["pbstart"]))
self.gui.endofBpfPassBandEdit.setText(str(params["pbend"]))
self.gui.bpfTransitionEdit.setText(str(params["tb"]))
self.gui.bpfStopBandAttenEdit.setText(str(params["atten"]))
if(params["wintype"] == self.EQUIRIPPLE_FILT):
self.gui.bpfPassBandRippleEdit.setText(str(params["ripple"]))
elif(params["filttype"] == "cbpf"):
self.gui.filterTypeComboBox.setCurrentIndex(2)
self.gui.filterDesignTypeComboBox.setCurrentIndex(int(params["wintype"]))
self.gui.startofBpfPassBandEdit.setText(str(params["pbstart"]))
self.gui.endofBpfPassBandEdit.setText(str(params["pbend"]))
self.gui.bpfTransitionEdit.setText(str(params["tb"]))
self.gui.bpfStopBandAttenEdit.setText(str(params["atten"]))
if(params["wintype"] == self.EQUIRIPPLE_FILT):
self.gui.bpfPassBandRippleEdit.setText(str(params["ripple"]))
elif(params["filttype"] == "bnf"):
self.gui.filterTypeComboBox.setCurrentIndex(3)
self.gui.filterDesignTypeComboBox.setCurrentIndex(int(params["wintype"]))
self.gui.startofBnfStopBandEdit.setText(str(params["sbstart"]))
self.gui.endofBnfStopBandEdit.setText(str(params["sbend"]))
self.gui.bnfTransitionEdit.setText(str(params["tb"]))
self.gui.bnfStopBandAttenEdit.setText(str(params["atten"]))
if(params["wintype"] == self.EQUIRIPPLE_FILT):
self.gui.bnfPassBandRippleEdit.setText(str(params["ripple"]))
elif(params["filttype"] == "hpf"):
self.gui.filterTypeComboBox.setCurrentIndex(4)
self.gui.filterDesignTypeComboBox.setCurrentIndex(int(params["wintype"]))
self.gui.endofHpfStopBandEdit.setText(str(params["sbend"]))
self.gui.startofHpfPassBandEdit.setText(str(params["pbstart"]))
self.gui.hpfStopBandAttenEdit.setText(str(params["atten"]))
if(params["wintype"] == self.EQUIRIPPLE_FILT):
self.gui.hpfPassBandRippleEdit.setText(str(params["ripple"]))
elif(params["filttype"] == "rrc"):
self.gui.filterTypeComboBox.setCurrentIndex(5)
self.gui.filterDesignTypeComboBox.setCurrentIndex(int(params["wintype"]))
self.gui.rrcSymbolRateEdit.setText(str(params["srate"]))
self.gui.rrcAlphaEdit.setText(str(params["alpha"]))
self.gui.rrcNumTapsEdit.setText(str(params["ntaps"]))
elif(params["filttype"] == "gaus"):
self.gui.filterTypeComboBox.setCurrentIndex(6)
self.gui.filterDesignTypeComboBox.setCurrentIndex(int(params["wintype"]))
self.gui.gausSymbolRateEdit.setText(str(params["srate"]))
self.gui.gausBTEdit.setText(str(params["bt"]))
self.gui.gausNumTapsEdit.setText(str(params["ntaps"]))
else:
self.iir = True
self.b, self.a = b_a["b"],b_a["a"]
(self.z,self.p,self.k) = signal.tf2zpk(self.b, self.a)
self.gui.pzPlot.insertZeros(self.z)
self.gui.pzPlot.insertPoles(self.p)
self.gui.mpzPlot.insertZeros(self.z)
self.gui.mpzPlot.insertPoles(self.p)
self.iir_plot_all(self.z,self.p,self.k)
self.update_fcoeff()
self.gui.nTapsEdit.setText("-")
self.params = params
# Set GUI for IIR type.
iirft = { "ellip" : 0,
"butter" : 1,
"cheby1" : 2,
"cheby2" : 3,
"bessel" : 4 }
paramtype = { "analog" : 1,
"digital" : 0 }
bandpos = {
"lpf" : 0,
"bpf" : 1,
"bnf" : 2,
"hpf" : 3}
iirboxes = {"lpf" : [self.gui.iirendofLpfPassBandEdit,
self.gui.iirstartofLpfStopBandEdit,
self.gui.iirLpfPassBandAttenEdit,
self.gui.iirLpfStopBandRippleEdit],
"hpf" : [self.gui.iirstartofHpfPassBandEdit,
self.gui.iirendofHpfStopBandEdit,
self.gui.iirHpfPassBandAttenEdit,
self.gui.iirHpfStopBandRippleEdit],
"bpf" : [self.gui.iirstartofBpfPassBandEdit,
self.gui.iirendofBpfPassBandEdit,
self.gui.iirendofBpfStopBandEdit1,
self.gui.iirstartofBpfStopBandEdit2,
self.gui.iirBpfPassBandAttenEdit,
self.gui.iirBpfStopBandRippleEdit],
"bnf" : [self.gui.iirendofBsfPassBandEdit1,
self.gui.iirstartofBsfPassBandEdit2,
self.gui.iirstartofBsfStopBandEdit,
self.gui.iirendofBsfStopBandEdit,
self.gui.iirBsfPassBandAttenEdit,
self.gui.iirBsfStopBandRippleEdit] }
self.gui.fselectComboBox.setCurrentIndex(1)
self.gui.iirfilterTypeComboBox.setCurrentIndex(iirft[params["filttype"]])
self.gui.iirfilterBandComboBox.setCurrentIndex(bandpos[params["bandtype"]])
if params["filttype"] == "bessel":
critfreq = map(float, params["critfreq"][1:-1].split(','))
self.gui.besselordEdit.setText(str(params["filtord"]))
self.gui.iirbesselcritEdit1.setText(str(critfreq[0]))
self.gui.iirbesselcritEdit2.setText(str(critfreq[1]))
else:
self.gui.adComboBox.setCurrentIndex(paramtype[params["paramtype"]])
if len(iirboxes[params["bandtype"]]) == 4:
sdata = [params["pbedge"], params["sbedge"], params["gpass"], params["gstop"]]
else:
pbedge = list(map(float, params["pbedge"][1:-1].split(',')))
sbedge = list(map(float, params["sbedge"][1:-1].split(',')))
sdata = [pbedge[0], pbedge[1], sbedge[0],
sbedge[1], params["gpass"], params["gstop"]]
cboxes = iirboxes[params["bandtype"]]
for i in range(len(cboxes)):
cboxes[i].setText(str(sdata[i]))
def draw_plots(self, taps, params):
self.params = params
self.taps = np.array(taps)
if self.params:
self.get_fft(self.params["fs"], self.taps, self.nfftpts)
self.update_time_curves()
self.update_freq_curves()
self.update_phase_curves()
self.update_group_curves()
self.update_pdelay_curves()
self.update_step_curves()
self.update_imp_curves()
self.gui.nTapsEdit.setText(str(self.taps.size))
class CustomViewBox(pg.ViewBox):
def __init__(self, *args, **kwds):
pg.ViewBox.__init__(self, *args, **kwds)
self.setMouseMode(self.RectMode)
# Reimplement right-click to zoom out.
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.RightButton:
self.autoRange()
def mouseDragEvent(self, ev):
if ev.button() == QtCore.Qt.RightButton:
ev.ignore()
else:
pg.ViewBox.mouseDragEvent(self, ev)
def setup_options():
usage="%prog: [options] (input_filename)"
description = ""
parser = OptionParser(conflict_handler="resolve",
usage=usage, description=description)
return parser
def launch(args, callback=None, restype=""):
parser = setup_options()
(options, args) = parser.parse_args ()
if callback is None:
app = Qt.QApplication(args)
gplt = gr_plot_filter(options, callback, restype)
app.exec_()
if gplt.iir:
retobj = ApiObject()
retobj.update_all("iir", gplt.params, (gplt.b, gplt.a), 1)
return retobj
else:
retobj = ApiObject()
retobj.update_all("fir", gplt.params, gplt.taps, 1)
return retobj
else:
gplt = gr_plot_filter(options, callback, restype)
return gplt
def main(args):
parser = setup_options()
(options, args) = parser.parse_args ()
app = Qt.QApplication(args)
gplt = gr_plot_filter(options)
app.exec_()
app.deleteLater()
sys.exit()
if __name__ == '__main__':
main(sys.argv)
|
trabucayre/gnuradio
|
gr-filter/python/filter/design/filter_design.py
|
Python
|
gpl-3.0
| 90,858
|
[
"Gaussian"
] |
6267ebd9c311c1e75ad283e0fb91cb962d6cd03c15385c36e00d5ff82fc17aab
|
# Variables used throughout
# Think of them as 'Global-ish'
# Settings
settings_list = []
SFX = 0
MUSIC = 1
PARTICLES = 2
WORLD_UNLOCKED = 3
SENSITIVITY = 4
SETTING_FULLSCREEN = 5
# MUSIC
MENU_MUSIC = 0
WORLD_ONE = 1
WORLD_TWO = 2
WORLD_THREE = 3
BOSS_MUSIC = 4
# Screen resolution stuff here
SCREEN_WIDTH = 1024
SCREEN_HEIGHT = 768
FULLSCREEN = True
ANTI_ALIAS = True
MOUSE_DEFAULT_POSITION = (512, 384)
FRAMES_PER_SECOND = 30.0
# Sound Channel Constants
MUSIC_CHANNEL = 0
PLAYER_CHANNEL = 1
OW_CHANNEL = 2
BAAKE_CHANNEL = 3
BOSS_CHANNEL = 4
PICKUP_CHANNEL = 5
# Boundary Stuff here
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
BOUND_STYLE_NONE = 0
BOUND_STYLE_CLAMP = 1
BOUND_STYLE_WRAP = 2
BOUND_STYLE_REFLECT = 3
BOUND_STYLE_KILL = 4
BOUND_STYLE_CUSTOM = 5
# Bullet constants
COLLIDE_STYLE_HURT = 0
COLLIDE_STYLE_REFLECT = 1
COLLIDE_STYLE_NOVA = 2
COLLIDE_STYLE_NONE = 3
BULLET_SPEED = 30
# Actor types, for use in spawning
ACTOR_NONE = -1
ACTOR_PLAYER = 0
ACTOR_BULLET = 1
ACTOR_MOONO = 2
ACTOR_BAAKE = 3
ACTOR_ROKUBI = 4
ACTOR_BATTO = 5
ACTOR_HAOYA = 6
ACTOR_BOKKO = 7
ACTOR_HAKTA = 8
ACTOR_RAAYU = 9
ACTOR_PAAJO = 10
ACTOR_YUREI = 11
ACTOR_BOSS_TUT = 12
ACTOR_BAAKE_BOSS = 13
ACTOR_MOONO_BOSS = 14
# Actor types, for use in collision
#ACTOR_PLAYER = 0
#ACTOR_BULLET = 1
ACTOR_TYPE_ENEMY = 2
ACTOR_TYPE_BAAKE = 3
ACTOR_TYPE_PICKUP = 4
ACTOR_TYPE_BOSS = 5
# Menu Constants
START_GAME = True
EXIT_GAME = False
# Font alignment enumerations
TOP_LEFT = 0
TOP_MIDDLE = 1
TOP_RIGHT = 2
CENTER_LEFT = 3
CENTER_MIDDLE = 4
CENTER_RIGHT = 5
BOTTOM_LEFT = 6
BOTTOM_MIDDLE = 7
BOTTOM_RIGHT = 8
# Menu Enumerations
START_GAME = 1
WORLD_MENU = 2
OPTION_MENU = 3
CREDIT_MENU = 4
EXIT_GAME = 9
RESUME_GAME = 1
WORLD1 = 2
WORLD2 = 3
WORLD3 = 4
TUTORIAL = 1
SOUND_MENU = 2
DISPLAY_MENU = 1
TOGGLE_SFX = 1
TOGGLE_MUSIC = 2
TOGGLE_PARTICLES = 1
TOGGLE_FULLSCREEN = 2
CHANGE_SENSITIVITY = 4
EXIT_OPTIONS = 9
ENABLE_JOYSTICK = 1
ENABLE_MOUSE = 2
ENABLE_KEYBOARD = 3
NEXT_WORLD = 2
HIGH_SCORE = 1
# Define font path
FONT_PATH = 'data/fonts/SF Espresso Shack Bold.ttf'
# Define a fill color RGB
FILL_COLOR = 233, 234, 187
FONT_COLOR = 43, 37, 22
FONT_INACTIVE_COLOR = 125, 108, 65
# World Constants
DEFEAT_STAGE = 15 * FRAMES_PER_SECOND
# Group enumerations
POWERUP_GROUP = 0
ENEMY_GROUP = 1
BOSS_GROUP = 2
TEXT_GROUP = 3
EFFECTS_GROUP = 4
# Temporary Enumerations for how many stages/levels are in the game
MAX_STAGE = 3 #+1 for a total of four stages
MAX_LEVEL = 4 #+1 for a total of four levels
MAX_WORLD = 3 #+1 for a total of four worlds
# Enumerations for actor_list
STAGE_SPAWNED = 0
ACTOR_TYPE = 1
SPAWN_RATE = 2
DEFAULT_SPAWN = 3
# Enumerations for enemy_list
TIME_TO_SPAWN = 0
#ACTOR_TYPE = 1
#MAX_SPAWN_RATE = 2
NUMBER_SPAWNED = 3
# Score enumerations
TUTORIAL_HIGH_SCORE = 0
WORLD_1_HIGH_SCORE = 1
WORLD_2_HIGH_SCORE = 2
WORLD_3_HIGH_SCORE = 3
|
JoshuaSkelly/TroubleInCloudLand
|
utils/settings.py
|
Python
|
mit
| 2,861
|
[
"ESPResSo"
] |
18cd42d9d8c36b469cfb8b4dc61859ed9077559a70b9bdccef6fe07e107eea19
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
import shutil
import subprocess
import tempfile
import textwrap
import filecmp
import pytest
from unittest import TestCase, skip, TestLoader, TextTestRunner
from urllib.parse import urlparse
from uuid import uuid4
import os, sys
import argparse
import collections
import timeout_decorator
import urllib.request, urllib.error, urllib.parse
import shutil
import glob
import traceback
import io
from datetime import datetime
import tsv
from toil_vg.vg_mapeval import get_default_mapeval_options, make_mapeval_plan, run_mapeval
from toil_vg.vg_toil import parse_args
from toil_vg.context import Context
from toil_vg.vg_common import make_url, toil_call
from toil_vg.vg_construct import run_make_haplo_thread_graphs
from toil.common import Toil
from toil.job import Job
log = logging.getLogger(__name__)
class VGCITest(TestCase):
"""
Continuous Integration VG tests. All depend on toil-vg being installed. Along with
toil[aws,mesos]. They are somewhat derived from the toil-vg unittests, but are
much slower.
"""
def setUp(self):
# Make sure logging is available for all the tests
logging.basicConfig()
self.workdir = tempfile.mkdtemp()
# for checking calling f1
self.f1_threshold = 0.015
# What (additional) portion of reads are allowed to get worse scores
# when moving to a more inclusive reference?
self.worse_threshold = 0.005
self.input_store = 'https://vg-data.s3.amazonaws.com/bakeoff'
self.vg_docker = None
self.container = None # Use default in toil-vg, which is Docker
self.verify = True
self.do_teardown = True
self.baseline = 's3://vg-data/vg_ci/vgci_regression_baseline'
self.cores = 8
self.sim_chunk_size = 100000
self.force_outstore = False
self.loadCFG()
# These are samples that are in 1KG but not in the bakeoff snp1kg graphs.
self.bakeoff_removed_samples = set(['NA128{}'.format(x) for x in range(77, 94)])
def tearDown(self):
if self.do_teardown:
shutil.rmtree(self.workdir)
def loadCFG(self):
""" It's a hassle passing parameters through pytest. Hack
around for now by loading from a file of key/value pairs. """
if os.path.isfile('vgci_cfg.tsv'):
with io.open('vgci_cfg.tsv', 'r', encoding='utf8') as f:
for line in f:
toks = line.split()
if len(toks) == 2 and toks[0][0] != '#':
# override vg docker (which defaults to value from vg_config.py)
if toks[0] == 'vg-docker-version':
self.vg_docker = toks[1]
# can use "Docker", "Singularity" or "None" (the string) as a container system
if toks[0] == 'container':
self.container = toks[1]
# dont verify output. tests will pass if they dont crash or timeout
elif toks[0] == 'verify' and toks[1].lower() == 'false':
self.verify = False
# dont delete the working directory
elif toks[0] == 'teardown' and toks[1].lower() == 'false':
self.do_teardown = False
# override the working directory (defaults to temp)
elif toks[0] == 'workdir':
self.workdir = toks[1]
elif toks[0] == 'baseline':
self.baseline = toks[1]
elif toks[0] == 'cores':
self.cores = int(toks[1])
elif toks[0] == 'force_outstore' and toks[1].lower() == 'true':
self.force_outstore = True
def _toil_vg_io_opts(self):
""" Some common toil-vg options we always want to use """
opts = ['--realTimeLogging', '--realTimeStderr', '--logInfo', '--workDir', self.workdir]
if self.force_outstore:
opts += ['--force_outstore']
return opts
def _jobstore(self, tag = ''):
return os.path.join(self.workdir, 'jobstore{}'.format(tag))
def _outstore_name(self, tag = ''):
return 'outstore-{}'.format(tag)
def _outstore(self, tag = ''):
return os.path.join(self.workdir, self._outstore_name(tag))
def _input(self, filename):
return os.path.join(self.input_store, filename)
def _bakeoff_coords(self, region):
if region == 'BRCA1':
return 17, 43044293
elif region == 'BRCA2':
return 13, 32314860
elif region == 'SMA':
return 5, 69216818
elif region == 'LRC-KIR':
return 19, 54025633
elif region == 'MHC':
return 6, 28510119
elif 'CHR' in region:
return int(region.replace('CHR', '')), 0
return None, None
def _read_baseline_file(self, tag, path):
""" read a (small) text file from the baseline store """
if self.baseline.startswith('s3://'):
toks = self.baseline[5:].split('/')
bname = toks[0]
keyname = '/{}/outstore-{}/{}'.format('/'.join(toks[1:]), tag, path)
# Convert to a public HTTPS URL
url = 'https://{}.s3.amazonaws.com{}'.format(bname, keyname)
# And download it
try:
connection = urllib.request.urlopen(url)
return connection.read().decode('utf-8')
except urllib.error.HTTPError as e:
if e.code == 404 or e.code == 403:
# Baseline file doesn't yet exist. Give an empty string.
# Nonexistent things give 403 to prevent enumeration.
return ""
else:
# Something else is wrong
raise
else:
# Assume it's a raw path.
with io.open(os.path.join(self.baseline, 'outstore-{}'.format(tag), path), 'r', encoding='utf8') as f:
return f.read()
def _read_baseline_float(self, tag, path, error_val = '-inf'):
""" read a single float from a file, returning -inf something went wrong """
try:
return float(self._read_baseline_file(tag, path).strip())
except:
return float(error_val)
def _get_remote_file(self, src, tgt):
"""
get a file from a store
src must be a URL.
"""
if not os.path.exists(os.path.dirname(tgt)):
os.makedirs(os.path.dirname(tgt))
if src.startswith('s3://'):
toks = src[5:].split('/')
bname = toks[0]
keyname = '/' + '/'.join(toks[1:])
# Convert to a public HTTPS URL
src = 'https://{}.s3.amazonaws.com{}'.format(bname, keyname)
log.info('Download {}...\n'.format(src))
with open(tgt, 'wb') as f:
# Download the file from the URL, in binary mode.
connection = urllib.request.urlopen(src)
shutil.copyfileobj(connection, f)
def _begin_message(self, name = None, is_tsv = False, ):
""" Used by mine-logs.py to flag that we're about to write something we want to mine
Anything in stdout that's not within these tags does not make it to the report """
token = '<VGCI'
if name:
token += ' name = "{}"'.format(name)
if is_tsv:
token += ' tsv = "True"'
token += '>'
print('\n{}'.format(token))
def _end_message(self):
""" Finish writing something mineable to stdout """
print('</VGCI>\n')
def _toil_vg_index(self, chrom, graph_path, xg_path, gcsa_path, misc_opts, dir_tag, file_tag):
"""
Wrap toil-vg index. Files passed are copied from store instead of
computed. If "SKIP" is used as a filename, don't create or copy that
index. Otherwise, pass None as a filename to compute that index.
"""
job_store = self._jobstore(dir_tag)
out_store = self._outstore(dir_tag)
opts = ' '.join(self._toil_vg_io_opts()) + ' '
if self.vg_docker:
opts += '--vg_docker {} '.format(self.vg_docker)
if self.container:
opts += '--container {} '.format(self.container)
if chrom:
opts += '--chroms {} '.format(chrom)
if graph_path:
opts += '--graphs {} '.format(graph_path)
if xg_path:
if xg_path != "SKIP":
self._get_remote_file(xg_path, os.path.join(out_store, os.path.basename(xg_path)))
else:
opts += '--xg_index '
if gcsa_path:
if gcsa_path != "SKIP":
self._get_remote_file(gcsa_path, os.path.join(out_store, os.path.basename(gcsa_path)))
self._get_remote_file(gcsa_path + '.lcp', os.path.join(out_store, os.path.basename(gcsa_path) + '.lcp'))
else:
opts += '--gcsa_index '
opts += '--index_name {}'.format(file_tag)
if misc_opts:
opts += ' {} '.format(misc_opts)
cmd = 'toil-vg index {} {} {}'.format(job_store, out_store, opts)
sys.stderr.write("Running toil-vg indexing: {}".format(cmd))
subprocess.check_call(cmd, shell=True)
def _toil_vg_run(self, sample_name, chrom, graph_path, xg_path, gcsa_path, fq_path,
true_vcf_path, fasta_path, interleaved, mapper, misc_opts, genotype, tag):
""" Wrap toil-vg run as a shell command. Expects reads to be in single fastq
inputs can be None if toil-vg supports not having them (ie don't need to
include gcsa_path if want to reindex)
"""
job_store = self._jobstore(tag)
out_store = self._outstore(tag)
opts = ' '.join(self._toil_vg_io_opts()) + ' '
if self.vg_docker:
opts += '--vg_docker {} '.format(self.vg_docker)
if self.container:
opts += '--container {} '.format(self.container)
if chrom:
opts += '--chroms {} '.format(chrom)
if graph_path:
opts += '--graphs {} '.format(graph_path)
if xg_path:
opts += '--xg_index {} '.format(xg_path)
if gcsa_path:
opts += '--gcsa_index {} '.format(gcsa_path)
if fq_path:
opts += '--fastq {} '.format(fq_path)
if true_vcf_path:
opts += '--vcfeval_baseline {} '.format(true_vcf_path)
opts += '--vcfeval_fasta {} '.format(fasta_path)
opts += '--vcfeval_opts \' --ref-overlap --vcf-score-field GQ\' '
if interleaved:
opts += '--interleaved '
opts += '--mapper {} '.format(mapper)
if misc_opts:
opts += ' {} '.format(misc_opts)
opts += '--gcsa_index_cores {} \
--alignment_cores {} --calling_cores {} --calling_cores {} --vcfeval_cores {} '.format(
self.cores, self.cores, self.cores, int(max(1, self.cores / 4)),
int(max(1, self.cores / 2)), self.cores)
cmd = 'toil-vg run {} {} {} {}'.format(job_store, sample_name, out_store, opts)
print(("Run toil-vg with {}".format(cmd)))
subprocess.check_call(cmd, shell=True)
def _make_gbwt(self, sample, vg_file, vcf_file, region, tag=''):
"""
Compute the GBWT index of the haplotypes in the given VCF. Returns the path to a GBWT file.
Excludes the given sample from the index.
"""
job_store = self._jobstore(tag)
out_store = self._outstore(tag)
out_store_name = self._outstore_name(tag)
# What do we want to override from the default toil-vg config?
overrides = argparse.Namespace(
# toil-vg options
vg_docker = self.vg_docker,
container = self.container,
# Toil options
realTimeLogging = True,
logLevel = "INFO",
maxCores = self.cores
)
# Make the context
context = Context(out_store, overrides)
# The unfiltered and filtered vcf file
uf_vcf_file = os.path.join(self.workdir, 'uf-' + os.path.basename(vcf_file))
f_vcf_file = os.path.join(self.workdir, 'f-' + os.path.basename(vcf_file))
if not f_vcf_file.endswith('.gz'):
f_vcf_file += '.gz'
# Get the inputs
self._get_remote_file(vg_file, os.path.join(out_store, os.path.basename(vg_file)))
self._get_remote_file(vcf_file, uf_vcf_file)
# Exclude source sample to make the results not circular
with context.get_toil(job_store) as toil:
cmd = ['bcftools', 'view', os.path.basename(uf_vcf_file), '-s', '^' + sample, '-O', 'z']
toil.start(Job.wrapJobFn(toil_call, context, cmd,
work_dir = os.path.abspath(self.workdir),
out_path = os.path.abspath(f_vcf_file)))
cmd = ['tabix', '-f', '-p', 'vcf', os.path.basename(f_vcf_file)]
toil.start(Job.wrapJobFn(toil_call, context, cmd,
work_dir = os.path.abspath(self.workdir)))
os.remove(uf_vcf_file)
# Make the gbwt of the input graph
# Don't bother with the GCSA
index_name = 'index-gbwt'
chrom, offset = self._bakeoff_coords(region)
self._toil_vg_index(chrom, vg_file, None, "SKIP",
'--vcf_phasing {} --xg_index_cores {} --gbwt_index'.format(
os.path.abspath(f_vcf_file), self.cores), tag, index_name)
gbwt_path = os.path.join(out_store, index_name + '.gbwt')
# Drop the extra xg
os.remove(os.path.join(out_store, index_name + '.xg'))
os.remove(f_vcf_file)
os.remove(f_vcf_file + '.tbi')
return gbwt_path
def _make_thread_indexes(self, sample, vg_file, vcf_file, region, tag=''):
""" Given a graph, then we extract two threads from the
given sample as their own graphs, then return an xg index for each.
this only supports one chromosome at a time, presently.
the indexes are written as thread_0.xg and thread_1.xg in the
output store (derived from tag parameter like other methods)
Returns paths to a full xg file for the graph, an xg for haplotype 0 of
the sample, and an xg for haplotype 1 of the sample.
"""
job_store = self._jobstore(tag)
out_store = self._outstore(tag)
out_store_name = self._outstore_name(tag)
# What do we want to override from the default toil-vg config?
overrides = argparse.Namespace(
# toil-vg options
vg_docker = self.vg_docker,
container = self.container,
force_outstore = self.force_outstore,
# Toil options
realTimeLogging = True,
realTimeStderr = True,
logLevel = "INFO",
maxCores = self.cores
)
# Make the context
context = Context(out_store, overrides)
# The unfiltered and filtered vcf file
uf_vcf_file = os.path.join(self.workdir, 'uf-' + os.path.basename(vcf_file))
f_vcf_file = os.path.join(self.workdir, 'f-' + os.path.basename(vcf_file))
if not f_vcf_file.endswith('.gz'):
f_vcf_file += '.gz'
# Get the inputs
self._get_remote_file(vg_file, os.path.join(out_store, os.path.basename(vg_file)))
self._get_remote_file(vcf_file, uf_vcf_file)
# Reduce our VCF to just the sample of interest to save time downstream
with context.get_toil(job_store) as toil:
cmd = ['bcftools', 'view', os.path.basename(uf_vcf_file), '-s', sample, '-O', 'z']
toil.start(Job.wrapJobFn(toil_call, context, cmd,
work_dir = os.path.abspath(self.workdir),
out_path = os.path.abspath(f_vcf_file)))
cmd = ['tabix', '-f', '-p', 'vcf', os.path.basename(f_vcf_file)]
toil.start(Job.wrapJobFn(toil_call, context, cmd,
work_dir = os.path.abspath(self.workdir)))
os.remove(uf_vcf_file)
# Make the gbwt of the input graph
index_name = 'index-gbwt'
chrom, offset = self._bakeoff_coords(region)
self._toil_vg_index(chrom, vg_file, None, 'SKIP',
'--vcf_phasing {} --xg_index_cores {} --gbwt_index'.format(
os.path.abspath(f_vcf_file), self.cores), tag, index_name)
gbwt_path = os.path.join(out_store, index_name + '.gbwt')
xg_path = os.path.join(out_store, index_name + '.xg')
os.remove(f_vcf_file)
os.remove(f_vcf_file + '.tbi')
# Extract both haplotypes of the given sample as their own graphs
with context.get_toil(job_store) as toil:
main_job = Job.wrapJobFn(run_make_haplo_thread_graphs,
context,
toil.importFile(make_url(vg_file)),
os.path.basename(vg_file),
'baseline',
[chrom],
toil.importFile(make_url(xg_path)),
sample,
[0,1],
toil.importFile(make_url(gbwt_path)))
haplo_ids = toil.start(main_job)
thread_0_file = os.path.join(out_store, 'thread_0')
thread_1_file = os.path.join(out_store, 'thread_1')
toil.exportFile(haplo_ids[0], make_url(thread_0_file + '.vg'))
toil.exportFile(haplo_ids[1], make_url(thread_1_file + '.vg'))
# then index them
for thread_vg in [thread_0_file, thread_1_file]:
self._toil_vg_index(chrom, thread_vg + '.vg', None, 'SKIP', None, tag,
os.path.basename(thread_vg))
return xg_path, thread_0_file + '.xg', thread_1_file + '.xg'
def _print_vcfeval_summary_table(self, summary_path, baseline_f1, threshold, header=True, name=None):
with io.open(summary_path, 'r', encoding='utf8') as summary_file:
for i, line in enumerate(summary_file):
if i != 1 and (header or i != 0):
toks = line.split()
if i > 1 and name:
toks = [name] + toks
if i == 0:
if name:
toks = ['Name'] + toks
toks = toks[0:-1] + ['F1', 'Baseline F1', 'Test Threshold']
elif i == 2:
toks += [baseline_f1, threshold]
elif i > 2:
toks += ['N/A', 'N/A']
print('\t'.join([str(tok) for tok in toks]))
def _verify_f1(self, sample, tag='', threshold=None):
# grab the f1.txt file from the output store
if sample:
f1_name = '{}_vcfeval_output_f1.txt'.format(sample)
else:
f1_name = 'vcfeval_output_f1.txt'
f1_path = os.path.join(self._outstore(tag), f1_name)
with io.open(f1_path, 'r', encoding='utf8') as f1_file:
f1_score = float(f1_file.readline().strip())
try:
baseline_f1 = self._read_baseline_float(tag, f1_name)
except:
# Couldn't read the baseline. Maybe it doesn't exist (yet)
baseline_f1 = 0
# print the whole table in tags that mine-logs can read
summary_path = f1_path[0:-6] + 'summary.txt'
self._begin_message('vcfeval Results'.format(
f1_score, baseline_f1, threshold), is_tsv=True)
self._print_vcfeval_summary_table(summary_path, baseline_f1, threshold)
self._end_message()
# compare with threshold
if not threshold:
threshold = self.f1_threshold
self.assertGreaterEqual(f1_score, baseline_f1 - threshold)
def _test_bakeoff(self, region, graph, skip_indexing, mapper='map', tag_ext='', misc_opts=None,
genotype=False):
""" Run bakeoff F1 test for NA12878 """
assert not tag_ext or tag_ext.startswith('-')
tag = '{}-{}{}'.format(region, graph, tag_ext)
chrom, offset = self._bakeoff_coords(region)
if skip_indexing:
xg_path = None
gcsa_path = self._input('{}-{}.gcsa'.format(graph, region))
else:
xg_path = None
gcsa_path = None
extra_opts = '--vcf_offsets {}'.format(offset)
if misc_opts:
extra_opts += ' {}'.format(misc_opts)
# these are the options these tests were trained on. specify here instead of relying
# on them being baked into toil-vg
extra_opts += ' --min_mapq 15 --filter_opts \' -r 0.9 -fu -m 1 -q 15 -D 999\''
self._toil_vg_run('NA12878', chrom,
self._input('{}-{}.vg'.format(graph, region)),
xg_path, gcsa_path,
self._input('platinum_NA12878_{}.fq.gz'.format(region)),
self._input('platinum_NA12878_{}.vcf.gz'.format(region)),
self._input('chr{}.fa.gz'.format(chrom)), True, mapper,
extra_opts, genotype, tag)
if self.verify:
self._verify_f1('NA12878', tag)
def _mapeval_vg_run(self, reads, base_xg_path, sim_xg_paths,
source_path_names, fasta_path, test_index_bases,
test_names, score_baseline_name, mappers,
paired_only, sim_opts, sim_fastq, more_mpmap_opts, tag):
""" Wrap toil-vg mapeval.
Evaluates realignments (to the linear reference and to a set of graphs)
of reads simulated from a single "base" graph. Realignments are
evaluated based on how close the realignments are to the original
simulated source position. Simulations are done inside this function.
sim_xg_paths are xg filenames used for simulation. base_xg_path is an xg
filename used for everything else like annotation and mapping.
sim_xg_paths can be [base_xg_path]
Simulates the given number of reads (reads), from the given XG files
(sim_xg_paths), optionally restricted to a set of named embedded paths
(source_path_namess). Uses the given FASTA (fasta_path) as a BWA
reference for comparing vg and BWA alignments within mapeval.
(Basically, BWA against the linear reference functions as a negative
control "graph" to compare against the real test graphs.)
test_index_bases specifies a list of basenames (without extension) for a
.xg, .gcsa, and .gcsa.lcp file set, one per of graph that is to be
compared.
test_names has one entry per graph to be compared, and specifies where
the realigned read GAM files should be saved.
score_baseline_name, if not None, is a name from test_names to be used
as a score baseline for comparing all the realignment scores against.
tag is a unique slug for this test/run, which determines the Toil job
store name to use, and the location where the output files should be
saved.
"""
job_store = self._jobstore(tag)
out_store = self._outstore(tag)
# start by simulating some reads
# TODO: why are we using strings here when we could use much safer lists???
opts = ' '.join(self._toil_vg_io_opts()) + ' '
if self.vg_docker:
opts += '--vg_docker {} '.format(self.vg_docker)
if self.container:
opts += '--container {} '.format(self.container)
# note, using the same seed only means something if using same
# number of chunks. we make that explicit here
sim_chunks = int(reads / self.sim_chunk_size)
if reads % self.sim_chunk_size:
sim_chunks += 1
opts += '--maxCores {} --sim_chunks {} --seed {} '.format(self.cores, sim_chunks, 8)
if sim_opts:
opts += '--sim_opts \'{}\' '.format(sim_opts)
if sim_fastq:
opts += '--fastq {} '.format(sim_fastq)
opts += '--annotate_xg {} '.format(base_xg_path)
for source_path_name in source_path_names:
opts += '--path {} '.format(source_path_name)
cmd = 'toil-vg sim {} {} {} {} --gam {} --fastq_out'.format(
job_store, ' '.join(sim_xg_paths), int(reads / 2), out_store, opts)
subprocess.check_call(cmd, shell=True)
# then run mapeval
# What do we want to override from the default toil-vg config?
overrides = argparse.Namespace(
# toil-vg options
vg_docker = self.vg_docker,
container = self.container,
alignment_cores = self.cores,
# Toil options
realTimeLogging = True,
realTimeStderr = True,
logLevel = "INFO",
maxCores = self.cores,
# toil-vg map options
# don't waste time sharding reads since we only run on one node
single_reads_chunk = True,
mpmap_opts = ['-B', '-F GAM'],
more_mpmap_opts = more_mpmap_opts,
force_outstore = self.force_outstore
)
# Make the context
context = Context(out_store, overrides)
# And what options to configure the mapeval run do we want? These have
# to get turned into a plan in order to import all the files with names
# derived algorithmically from the names given here. TODO: move
# positional/required arguments out of this somehow? So we can just use
# this to get the default optional settings and fill in the required
# things as file IDs?
mapeval_options = get_default_mapeval_options()
mapeval_options.truth = make_url(os.path.join(out_store, 'true.pos'))
mapeval_options.bwa = True
mapeval_options.paired_only = paired_only
mapeval_options.fasta = make_url(fasta_path)
mapeval_options.index_bases = [make_url(x) for x in test_index_bases]
mapeval_options.gam_names = test_names
mapeval_options.fastq = [make_url(os.path.join(out_store, 'sim.fq.gz'))]
# We have 150 bp reads reduced to a point position, at a resolution of
# only the nearest 100 bp (on the primary graph). How close do two such
# point positions need to be to say the read is in the right place?
mapeval_options.mapeval_threshold = 200
if score_baseline_name is not None:
mapeval_options.compare_gam_scores = score_baseline_name
if 'mpmap' in mappers and more_mpmap_opts:
# If we're doing more than one mpmap test, disable vg map
# Just checking here to make sure old logic preserved across interface change
assert mappers == ['mpmap']
mapeval_options.mappers = mappers
mapeval_options.ignore_quals = 'mpmap' in mappers and not sim_fastq
# Make Toil
with context.get_toil(job_store) as toil:
# Make a plan by importing those files specified in the mapeval
# options
plan = make_mapeval_plan(toil, mapeval_options)
# Make a job to run the mapeval workflow, using all these various imported files.
main_job = Job.wrapJobFn(run_mapeval,
context,
mapeval_options,
plan.xg_file_ids,
[],
plan.gcsa_file_ids,
plan.gbwt_file_ids,
[],
[],
plan.id_range_file_ids,
plan.snarl_file_ids,
plan.vg_file_ids,
plan.gam_file_ids,
plan.reads_gam_file_id,
None,
None,
plan.reads_fastq_file_ids,
plan.fasta_file_id,
plan.bwa_index_ids,
None,
plan.bam_file_ids,
plan.pe_bam_file_ids,
plan.true_read_stats_file_id)
# Output files all live in the out_store, but if we wanted to we could export them also/instead.
# Run the root job
returned = toil.start(main_job)
# TODO: I want to do the evaluation here, working with file IDs, but
# since we put the results in the out store maybe it really does
# make sense to just go through the files in the out store.
def _filter_position_file(self, position_file, out_file):
""" Filter reads that fail score check out of a position comparison file
Return number of reads filtered """
# check if a read has been filtered by looking in the primary score comparison output
reads_map = dict()
def is_filtered(read, method):
if method not in reads_map:
reads_map[method] = set()
try:
# -se not currently in filenames. ugh.
name = method[0:-3] if method.endswith('-se') else method
score_path = os.path.join(os.path.dirname(position_file),
'{}.compare.primary.scores'.format(name))
with io.open(score_path, 'r', encoding='utf8') as score_file:
for line in score_file:
toks = line.split(", ")
if int(toks[1]) < 0:
reads_map[method].add(toks[0])
except:
pass
return read in reads_map[method]
# scan postions, filtering all reads in our set
filter_count = 0
with io.open(position_file, 'r', encoding='utf8') as pf, io.open(out_file, 'w', encoding='utf8') as of:
for i, line in enumerate(pf):
toks = line.rstrip().split()
if i == 0:
ridx = toks.index('read')
aidx = toks.index('aligner')
if i == 0 or not is_filtered(toks[ridx], toks[aidx].strip('"')):
of.write(line)
else:
filter_count += 1
return filter_count
def _mapeval_r_plots(self, tag, positive_control=None, negative_control=None,
control_include=['snp1kg', 'primary', 'common1kg'], min_reads_for_filter_plots=100):
""" Compute the mapeval r plots (ROC and QQ) """
out_store = self._outstore(tag)
out_store_name = self._outstore_name(tag)
job_store = self._jobstore(tag)
# What do we want to override from the default toil-vg config?
overrides = argparse.Namespace(
container = self.container,
# Toil options
realTimeLogging = True,
realTimeStderr = True,
logLevel = "INFO",
maxCores = self.cores
)
# Lookup names list with -pe and -se attached
def pe_se(names):
names_e = [[x, '{}-se'.format(x), '{}-pe'.format(x)] for x in names if x]
return [y for x in names_e for y in x]
# Make the context
context = Context(out_store, overrides)
with context.get_toil(job_store) as toil:
try:
for rscript in ['pr', 'qq', 'roc']:
# pull the scripts from where we expect them relative to being in vg/
# and put them in the work directory. This is ugly but keeps the
# docker interfacing simple.
shutil.copy2('scripts/plot-{}.R'.format(rscript), os.path.abspath(self.workdir))
# if controls specified, filter into their own plot so things don't get too busy
if positive_control or negative_control:
nc_name = 'position.results.no.control.tsv'
co_name = 'position.results.control.tsv'
with io.open(os.path.join(out_store, 'position.results.tsv'), 'r', encoding='utf8') as pr_file,\
io.open(os.path.join(out_store, nc_name), 'w', encoding='utf8') as nc_file,\
io.open(os.path.join(out_store, co_name), 'w', encoding='utf8') as co_file:
aidx = None
for i, line in enumerate(pr_file):
toks = line.rstrip().split()
if i == 0:
aidx = toks.index('aligner')
if i == 0 or toks[aidx].strip('"') in pe_se(control_include +
[positive_control, negative_control]):
co_file.write(line)
if i == 0 or toks[aidx].strip('"') not in pe_se(
[positive_control, negative_control]):
nc_file.write(line)
else:
nc_name = 'position.results.tsv'
co_name = None
plot_names = [(nc_name, '')]
if co_name:
plot_names.append((co_name, '.control'))
# make a plot where we ignore reads that fail score
cur_plot_names = [pn for pn in plot_names]
for name,tag in plot_names:
pf_name = name.replace('.tsv', '.primary.filter.tsv')
if self._filter_position_file(
os.path.join(out_store, name),
os.path.join(out_store, pf_name)) > min_reads_for_filter_plots:
plot_names.append((pf_name, tag + '.primary.filter'))
else:
if os.path.isfile(os.path.join(out_store, pf_name)):
os.remove(os.path.join(out_store, pf_name))
for tsv_file, out_name in plot_names:
cmd = ['Rscript', 'plot-{}.R'.format(rscript),
os.path.join(out_store_name, tsv_file),
os.path.join(out_store_name, '{}{}.svg'.format(rscript, out_name))]
toil.start(Job.wrapJobFn(toil_call, context, cmd,
work_dir = os.path.abspath(self.workdir)))
os.remove(os.path.join(self.workdir, 'plot-{}.R'.format(rscript)))
if os.path.isfile(os.path.join(self.workdir, 'Rplots.pdf')):
os.remove(os.path.join(self.workdir, 'Rplots.pdf'))
except Exception as e:
log.warning("Failed to generate ROC and QQ plots with Exception: {}".format(e))
def _tsv_to_dict(self, stats, row_1 = 1):
""" convert tsv string into dictionary """
stats_dict = dict()
for line in stats.split('\n')[row_1:]:
toks = line.split()
if len(toks) > 1:
stats_dict[toks[0]] = [float(x) for x in toks[1:]]
return stats_dict
def _verify_mapeval(self, reads, read_source_graph, score_baseline_name,
positive_control, negative_control, tag, acc_threshold,
auc_threshold):
"""
Check the simulated mapping evaluation results.
read_source_graph is the name of the graph that the reads were generated
from; we'll compare the scores realigned to that graph against the
scores that the generated reads had.
score_baseline_name is the name of the graph we compared scores against;
we will chack that reads increase in score in the other graphs against
that graph and complain if they don't. It may be None, in which case
scores are only compared against the scores the reads got when
simulated.
"""
# Make some plots in the outstore
self._mapeval_r_plots(tag, positive_control, negative_control)
stats_path = os.path.join(self._outstore(tag), 'stats.tsv')
with io.open(stats_path, 'r', encoding='utf8') as stats:
stats_tsv = stats.read()
baseline_tsv = self._read_baseline_file(tag, 'stats.tsv')
# Dict from aligner to a list of float stat values, in order
stats_dict = self._tsv_to_dict(stats_tsv)
# Dict from aligner to a list of float stat values, in order
baseline_dict = self._tsv_to_dict(baseline_tsv)
# print out a table of mapeval results
table_name = 'map eval results'
if positive_control:
table_name += ' (*: positive control)'
if negative_control:
table_name += ' (**: negative control)'
self._begin_message(table_name, is_tsv=True)
# How many different columns do we want to see in the stats files?
# We need to pad shorter rows with 0s
stats_columns = 5 # read count, accuracy, AUC, QQ-plot r value, max F1
print('\t'.join(['Method', 'Acc.', 'Baseline Acc.', 'AUC', 'Baseline AUC', 'Max F1', 'Baseline F1']))
for key in sorted(set(list(baseline_dict.keys()) + list(stats_dict.keys()))):
# What values do we have for the graph this run?
sval = list(stats_dict.get(key, []))
while len(sval) < stats_columns:
sval.append('DNE')
# And what baseline values do we have stored?
bval = list(baseline_dict.get(key, []))
while len(bval) < stats_columns:
bval.append('DNE')
method = key
if not key.endswith('-pe'):
# to be consistent with plots
method += '-se'
if positive_control and key in [positive_control, positive_control + '-pe']:
method += '*'
if negative_control and key in [negative_control, negative_control + '-pe']:
method += '**'
def r4(s):
return round(s, 5) if isinstance(s, float) else s
row = [method]
for metric_index in [1, 2, 4]:
# For each metric, compare stat to baseline
stat_val = str(r4(sval[metric_index]))
baseline_val = str(r4(bval[metric_index]))
if stat_val != 'DNE' and baseline_val != 'DNE':
if sval[metric_index] < bval[metric_index]:
# Stat got worse
stat_val = '↓ {}'.format(stat_val)
elif sval[metric_index] > bval[metric_index]:
# Stat got better
stat_val = '↑ {}'.format(stat_val)
row.append(stat_val)
row.append(baseline_val)
print('\t'.join(row))
self._end_message()
# test the mapeval results, only looking at baseline keys
for key, val in list(baseline_dict.items()):
if key in stats_dict:
# For each graph we have a baseline and stats for, compare the
# columns we actually have in both.
if len(stats_dict[key]) > 0:
self.assertEqual(stats_dict[key][0], reads)
if len(stats_dict[key]) > 1 and len(val) > 1:
# Compare accuracy stats
self.assertGreaterEqual(stats_dict[key][1], val[1] - acc_threshold)
if len(stats_dict[key]) > 2 and len(val) > 2:
# Compare AUC stats. Make sure to patch up 0 AUCs from perfect classification.
new_auc = stats_dict[key][2] if stats_dict[key][2] != 0 else 1
old_auc = val[2] if val[2] != 0 else 1
self.assertGreaterEqual(new_auc, old_auc - auc_threshold)
if len(stats_dict[key]) > 4 and len(val) > 4:
self.assertGreaterEqual(stats_dict[key][4], val[4] - acc_threshold)
if len(stats_dict[key]) != len(val):
log.warning('Key {} has {} baseline entries and {} stats'.format(key, len(val), len(stats_dict[key])))
else:
log.warning('Key {} from baseline not found in stats'.format(key))
# This holds the condition names we want a better score than
score_baselines = ['input']
if score_baseline_name is not None:
score_baselines.append(score_baseline_name)
for compare_against in score_baselines:
# For each graph/condition we want to compare scores against
# Now look at the stats for comparing scores on all graphs vs. scores on this particular graph.
score_stats_path = os.path.join(self._outstore(tag), 'score.stats.{}.tsv'.format(compare_against))
if os.path.exists(score_stats_path):
# If the score comparison was run, make sure not too many reads
# get worse moving from simulated to realigned scores, or moving
# from the baseline graph to the other (more inclusive) graphs.
try:
# Parse out the baseline stat values (not for the baseline
# graph; we shouldn't have called these both "baseline")
baseline_tsv = self._read_baseline_file(tag, 'score.stats.{}.tsv'.format(compare_against))
baseline_dict = self._tsv_to_dict(baseline_tsv)
except:
# Maybe there's no baseline file saved yet
# Synthesize one of the right shape
baseline_dict = collections.defaultdict(lambda: [0, 0])
# Parse out the real stat values
score_stats_dict = self._tsv_to_dict(io.open(score_stats_path, 'r', encoding='utf8').read())
for key in list(score_stats_dict.keys()):
# For every kind of graph
if compare_against == 'input' and (key != read_source_graph and
key != read_source_graph + '-pe'):
# Only compare simulated read scores to the scores the
# reads get when aligned against the graph they were
# simulated from.
continue
# Guess where the file for individual read score differences for this graph is
# TODO: get this file's name/ID from the actual Toil code
read_comparison_path = os.path.join(self._outstore(tag), '{}.compare.{}.scores'.format(key, compare_against))
for line in io.open(read_comparison_path, 'r', encoding='utf8'):
if line.strip() == '':
continue
# Break each line of the CSV
parts = line.split(', ')
# Fields are read name, score difference, aligned score, baseline score
read_name = parts[0]
score_diff = int(parts[1])
if score_diff < 0:
# Complain about anyone who goes below 0.
log.warning('Read {} has a negative score increase of {} on graph {} vs. {}'.format(
read_name, score_diff, key, compare_against))
if key not in baseline_dict:
# We might get new graphs that aren't in the baseline file.
log.warning('Key {} missing from score baseline dict for {}. Inserting...'.format(key, compare_against))
# Store 0 for the read count, and 1 for the portion that got worse.
# We need a conservative default baseline so new tests will pass.
baseline_dict[key] = [0, 1]
# Report on its stats after dumping reads, so that if there are
# too many bad reads and the stats are terrible we still can see
# the reads.
print('{} vs. {} Worse: {} Baseline: {} Threshold: {}'.format(
key, compare_against, score_stats_dict[key][1], baseline_dict[key][1], self.worse_threshold))
# Make sure all the reads came through
self.assertEqual(score_stats_dict[key][0], reads)
if not key.endswith('-pe'):
# Skip paired-end cases because their pair partners can
# pull them around. Also they are currently subject to
# substantial nondeterministic alignment differences
# based on the assignment of reads to threads.
# Make sure not too many got worse
self.assertLessEqual(score_stats_dict[key][1], baseline_dict[key][1] + self.worse_threshold)
def _test_mapeval(self, reads, region, baseline_graph, test_graphs, score_baseline_graph=None,
positive_control=None, negative_control=None, sample=None,
source_path_names=set(), mappers=['map'], paired_only=False,
assembly="hg38", tag_ext="", acc_threshold=0, auc_threshold=0,
sim_opts='-l 150 -p 500 -v 50 -e 0.05 -i 0.01', sim_fastq=None,
more_mpmap_opts=None):
""" Run simulation on a bakeoff graph
Simulate the given number of reads from the given baseline_graph
(snp1kg, primary, etc.) and realign them against all the graphs in the
test_graph list.
If a sample is specified, baseline_graph must be a graph with allele
paths in it (--alt_paths passed to toil-vg construct) so that the subset
of the graph for that sample can be used for read simulation.
If instead source_path_names is specified, it must be a collection of
path names that exist in baseline_graph. Reads will be simulated evenly
across the named paths (not weighted according to path length).
Needs to know the bekeoff region that is being run, in order to look up
the actual graphs files for each graph type.
Verifies that the realignments are sufficiently good.
If score_baseline_graph is set to a graph name from test_graphs,
computes score differences for reach read against that baseline.
If postive_control or negative_control in tests_graphs, compute separate
ROC/QQ plots with just those and the baseline graph (and don't plot them
in the normal plots)
If a sample name is specified, extract a thread for each of its haplotype
from the baseline graph using the gpbwt and simulate only from the threads
"""
assert not tag_ext or tag_ext.startswith('-')
tag = 'sim-{}-{}{}'.format(region, baseline_graph, tag_ext)
# compute the xg indexes from scratch
for graph in set([baseline_graph] + test_graphs):
chrom, offset = self._bakeoff_coords(region)
vg_path = self._input('{}-{}.vg'.format(graph, region))
self._toil_vg_index(str(chrom), vg_path, None, self._input('{}-{}.gcsa'.format(graph, region)),
None, tag, '{}-{}'.format(graph, region))
# compute the haplotype graphs to simulate from
if sample:
# Can't use source paths with a sample
assert(len(source_path_names) == 0)
# We need to make one XG per sample haplotype
if sample in self.bakeoff_removed_samples:
# Unlike the other bakeoff graphs (snp1kg-region.vg), this one contains NA12878 and family
vg_path = self._input('{}_all_samples-{}.vg'.format(baseline_graph, region))
else:
vg_path = self._input('{}-{}.vg'.format(baseline_graph, region))
vcf_path = self._input('1kg_{}-{}.vcf.gz'.format(assembly, region))
xg_path, thread1_xg_path, thread2_xg_path = self._make_thread_indexes(
sample, vg_path, vcf_path, region, tag)
sim_xg_paths = [thread1_xg_path, thread2_xg_path]
else:
# Just use the one XG, and maybe restrict to paths in it.
xg_path = os.path.join(self._outstore(tag), '{}-{}'.format(baseline_graph, region) + '.xg')
sim_xg_paths = [xg_path]
fasta_path = self._input('{}.fa'.format(region))
test_index_bases = []
for test_graph in test_graphs:
test_tag = '{}-{}'.format(test_graph, region)
test_index_bases.append(os.path.join(self._outstore(tag), test_tag))
self._mapeval_vg_run(reads, xg_path, sim_xg_paths, source_path_names, fasta_path, test_index_bases,
test_graphs, score_baseline_graph, mappers, paired_only, sim_opts, sim_fastq,
more_mpmap_opts, tag)
if self.verify:
self._verify_mapeval(reads, baseline_graph, score_baseline_graph,
positive_control, negative_control, tag,
acc_threshold, auc_threshold)
def _calleval_vg_run(self, xg_path, vg_path, fasta_path, gam_path, bam_path,
truth_vcf_path, bed_regions_path, sample, chrom, offset, tag):
""" Wrap toil-vg calleval.
"""
job_store = self._jobstore(tag)
out_store = self._outstore(tag)
# run calleval
cmd = ['toil-vg', 'calleval', job_store, out_store]
if self.vg_docker:
cmd += ['--vg_docker', self.vg_docker]
# Test test_call_chr21_snp1kg has been running out of memory on 32 GB
# nodes, maybe due to too many calling jobs running at once. The
# default memory limit is 4 GB, so we double that to 8 GB so we retain
# some parallelism while hopefully not going way over the limits and
# OOM-ing.
cmd += ['--calling_mem', '8G']
cmd += ['--calling_cores', str(min(4, self.cores))]
cmd += ['--maxCores', str(self.cores)]
cmd += self._toil_vg_io_opts()
if self.container:
cmd += ['--container', self.container]
# run call
cmd += ['--call']
# run freebayes
if bam_path:
cmd += ['--freebayes']
cmd += ['--bams', bam_path]
cmd += ['--bam_names', 'bwa']
# gam
cmd += ['--gams', gam_path]
cmd += ['--gam_names', 'vg']
cmd += ['--ref_paths', str(chrom)]
cmd += ['--min_mapq', '15', '--min_augment_coverage', '3']
cmd += ['--filter_opts', '-r 0.9 -fu -m 1 -q 15 -D 999']
cmd += ['--augment_cores', str(min(4, self.cores))]
if offset:
cmd += ['--vcf_offsets', str(offset)]
cmd += ['--sample', sample]
# xg
cmd += ['--xg_paths', xg_path]
# truth vcf
cmd += ['--vcfeval_baseline', truth_vcf_path]
if bed_regions_path:
cmd += ['--vcfeval_bed_regions', bed_regions_path]
# fasta: required for both vcfeval and freebayes
cmd += ['--vcfeval_fasta', fasta_path]
cmd += ['--vcfeval_opts', '--ref-overlap --vcf-score-field GQ']
subprocess.check_call(cmd, shell=False)
def _verify_calleval(self, tag='', threshold=None):
out_store = self._outstore(tag)
# scrape up all the possible output files
output_names = []
output_f1_paths = []
output_summary_paths = []
for output_path in os.listdir(out_store):
if output_path.endswith('_vcfeval_output_summary.txt'):
output_names.append(output_path[:-len('_vcfeval_output_summary.txt')])
output_f1_paths.append(os.path.join(
out_store, '{}_vcfeval_output_f1.txt'.format(output_names[-1])))
output_summary_paths.append(os.path.join(out_store, output_path))
# check nothing went terribly and unexpectedly wrong
self.assertTrue(len(output_names) == len(output_f1_paths) == len(output_summary_paths))
self.assertGreaterEqual(len(output_names), 1)
# todo: should we check for certain names?
# compare with threshold
if not threshold:
threshold = self.f1_threshold
# print the table, collect the scores
self._begin_message('vcfeval Results', is_tsv=True)
f1_scores = []
baseline_scores = []
for name, f1_path, summary_path in zip(output_names, output_f1_paths, output_summary_paths):
with io.open(f1_path, 'r', encoding='utf8') as f1_file:
f1_scores.append(float(f1_file.readline().strip()))
baseline_scores.append(self._read_baseline_float(tag, os.path.basename(f1_path)))
self._print_vcfeval_summary_table(summary_path, baseline_scores[-1], threshold,
header=name==output_names[0], name=name)
self._end_message()
for name, f1_score, baseline_score in zip(output_names, f1_scores, baseline_scores):
self.assertGreaterEqual(f1_score, baseline_score - threshold,
msg = 'F1={} <= (Baseline F1={} - Threshold={}) for {}'.format(
f1_score, baseline_score, threshold, name))
def _test_calleval(self, region, baseline_graph, sample, vg_path, gam_path, bam_path, vcf_path,
bed_path, fasta_path, f1_threshold, tag_ext=""):
""" Run call, genotype, and freebayes on some pre-existing alignments and compare them
to a truth set using vcfeval.
"""
assert not tag_ext or tag_ext.startswith('-')
tag = 'call-{}-{}{}'.format(region, baseline_graph, tag_ext)
# compute the xg index from scratch
chrom, offset = self._bakeoff_coords(region)
self._toil_vg_index(str(chrom), vg_path, None, "SKIP",
None, tag, '{}-{}'.format(baseline_graph, region))
xg_path = os.path.join(self._outstore(tag), '{}-{}'.format(baseline_graph, region) + '.xg')
test_index_bases = []
self._calleval_vg_run(xg_path, vg_path, fasta_path, gam_path, bam_path,
vcf_path, bed_path, sample, chrom, offset, tag)
if self.verify:
self._verify_calleval(tag=tag, threshold=f1_threshold)
#@skip("skipping test to keep runtime down")
@timeout_decorator.timeout(8000)
def test_call_chr21_snp1kg(self):
"""
calling comparison between call, genotype and freebayes on an alignment extracted
from the HG002 whole genome experiment run from the paper
"""
giab = 'https://vg-data.s3.amazonaws.com/giab/'
#self.input_store = 'https://vg-data.s3.amazonaws.com/CHR21_DEC3'
# using this one until above is fixed
self.input_store = 'https://vg-data.s3.amazonaws.com/dnanexus'
log.info("Test start at {}".format(datetime.now()))
self._test_calleval('CHR21', 'snp1kg', "HG002",
self._input('snp1kg_21.vg'),
self._input('snp1kg_HG002_21.gam'),
self._input('21.bam'),
os.path.join(giab, 'HG002_GRCh37_GIAB_highconf_CG-IllFB-IllGATKHC-Ion-10X'
'-SOLID_CHROM1-22_v.3.3.2_highconf_triophased-CHR21.vcf.gz'),
os.path.join(giab, 'HG002_GRCh37_GIAB_highconf_CG-IllFB-IllGATKHC-Ion-10X'
'-SOLID_CHROM1-22_v.3.3.2_highconf_noinconsistent.bed'),
self._input('hs37d5_chr21.fa.gz'),
0.035)
@skip("skipping test to keep runtime down")
@timeout_decorator.timeout(600)
def test_sim_brca1_snp1kg(self):
""" Mapping and calling bakeoff F1 test for BRCA1 primary graph """
# Using 100k simulated reads from snp1kg BRCA1, realign against all
# these other BRCA1 graphs and make sure the realignments are
# sufficiently good. Compare all realignment scores agaisnt the scores
# for the primary graph.
log.info("Test start at {}".format(datetime.now()))
self._test_mapeval(100000, 'BRCA1', 'snp1kg',
['primary', 'snp1kg'],
score_baseline_graph='primary',
sample='HG00096', acc_threshold=0.02, auc_threshold=0.02)
@timeout_decorator.timeout(1200)
def test_sim_mhc_snp1kg(self):
""" Mapping and calling bakeoff F1 test for BRCA1 primary graph """
log.info("Test start at {}".format(datetime.now()))
self._test_mapeval(100000, 'MHC', 'snp1kg',
['primary', 'snp1kg'],
score_baseline_graph='primary',
sample='HG00096', acc_threshold=0.02, auc_threshold=0.02)
@timeout_decorator.timeout(900)
def test_sim_mhc_cactus(self):
""" Mapping test for MHC cactus graph """
log.info("Test start at {}".format(datetime.now()))
self._test_mapeval(10000, 'MHC', 'cactus',
['snp1kg', 'cactus'],
mappers = ['map', 'mpmap'],
source_path_names=['GI568335986', 'GI568335994'], acc_threshold=0.02, auc_threshold=0.04)
@timeout_decorator.timeout(4800)
def test_sim_chr21_snp1kg(self):
log.info("Test start at {}".format(datetime.now()))
self._test_mapeval(300000, 'CHR21', 'snp1kg',
['primary', 'snp1kg', 'thresholded10'],
score_baseline_graph='primary',
sample='HG00096',
assembly="hg19",
acc_threshold=0.0075, auc_threshold=0.075, mappers = ['map', 'mpmap'],
sim_opts='-l 150 -p 570 -v 150 -e 0.01 -i 0.002')
@timeout_decorator.timeout(4400)
def test_sim_chr21_snp1kg_trained(self):
self._test_mapeval(100000, 'CHR21', 'snp1kg',
['primary', 'snp1kg'],
#score_baseline_graph='primary',
sample='HG00096',
assembly="hg19",
acc_threshold=0.0075, auc_threshold=0.075, mappers = ['map', 'mpmap'], paired_only=True,
tag_ext='-trained',
sim_opts='-p 570 -v 150 -S 4 -i 0.002 -I',
# 800k 148bp reads from Genome in a Bottle NA12878 library
# (placeholder while finding something better)
sim_fastq='ftp://ftp-trace.ncbi.nlm.nih.gov/ReferenceSamples/giab/data/NA12878/NIST_NA12878_HG001_HiSeq_300x/131219_D00360_005_BH814YADXX/Project_RM8398/Sample_U5a/U5a_AGTCAA_L002_R1_007.fastq.gz')
@skip("skipping test to keep runtime down")
@timeout_decorator.timeout(1200)
def test_sim_brca2_snp1kg_mpmap(self):
""" multipath mapper test, which is a smaller version of above. we catch all errors
so vgci doesn't report failures. vg is run only in single ended with multipath on
and off.
"""
log.info("Test start at {}".format(datetime.now()))
self._test_mapeval(50000, 'BRCA2', 'snp1kg',
['primary', 'snp1kg'],
score_baseline_graph='primary',
sample='HG00096', mappers=['map','mpmap'], tag_ext='-mpmap',
acc_threshold=0.02, auc_threshold=0.02)
@skip("skipping test to keep runtime down")
@timeout_decorator.timeout(2400)
def test_sim_chr21_snp1kg_mpmap(self):
""" multipath mapper test, which is a smaller version of above. we catch all errors
so vgci doesn't report failures. vg is run only in single ended with multipath on
and off.
"""
self._test_mapeval(100000, 'CHR21', 'snp1kg',
['primary', 'snp1kg'],
score_baseline_graph='primary',
sample='HG00096', mappers=['map','mpmap'], tag_ext='-mpmap',
acc_threshold=0.02,
sim_opts='-d 0.01 -p 1000 -v 75.0 -S 5 -I',
sim_fastq=self._input('platinum_NA12878_MHC.fq.gz'))
@timeout_decorator.timeout(3000)
def test_sim_mhc_snp1kg_mpmap(self):
""" multipath mapper test, which is a smaller version of above. we catch all errors
so vgci doesn't report failures. vg is run only in single ended with multipath on
and off.
"""
log.info("Test start at {}".format(datetime.now()))
self._test_mapeval(50000, 'MHC', 'snp1kg',
['primary', 'snp1kg'],
sample='HG00096', mappers=['mpmap'], tag_ext='-mpmap',
acc_threshold=0.02, auc_threshold=0.02,
sim_opts='-d 0.01 -p 1000 -v 75.0 -S 5 -I',
sim_fastq=self._input('platinum_NA12878_MHC.fq.gz'),
more_mpmap_opts=['-u 8'])
@timeout_decorator.timeout(4000)
def test_sim_yeast_cactus(self):
""" Yeast test based on the cactus graphs. Reads are simulated from the SK1 path
of the full graph. The other graphs are made from this graph using vg mod:
cactus_drop_SK1 : remove all elements that are only on SK1 path
cactus_SK1 : keep only SK1 path
cactus_S288c : keep only S288c (reference) path
"""
self.input_store = 'https://vg-data.s3.amazonaws.com/cactus_yeast'
log.info("Test start at {}".format(datetime.now()))
self._test_mapeval(100000, 'YEAST', 'cactus',
['cactus', 'cactus_drop_SK1', 'cactus_SK1', 'cactus_S288c'],
#score_baseline_graph='cactus_S288c',
source_path_names=['SK1.chr{}'.format(i) for i in [
'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII',
'IX', 'X', 'XI', 'XII', 'XIII', 'XIV', 'XV', 'XVI']],
#multipath=True,
#paired_only=True,
acc_threshold=0.02, auc_threshold=0.02,
sim_opts='-p 500 -v 50 -S 4 -i 0.002')
@timeout_decorator.timeout(400)
def test_map_brca1_primary(self):
""" Mapping and calling bakeoff F1 test for BRCA1 primary graph """
log.info("Test start at {}".format(datetime.now()))
self._test_bakeoff('BRCA1', 'primary', True)
@timeout_decorator.timeout(400)
def test_map_brca1_snp1kg(self):
""" Mapping and calling bakeoff F1 test for BRCA1 snp1kg graph """
log.info("Test start at {}".format(datetime.now()))
self._test_bakeoff('BRCA1', 'snp1kg', True)
@timeout_decorator.timeout(900)
def test_map_brca1_snp1kg_mpmap(self):
""" Mapping and calling bakeoff F1 test for BRCA1 snp1kg graph on mpmap.
The filter_opts are the defaults minus the identity filter because mpmap doesn't
write identities.
"""
self._test_bakeoff('BRCA1', 'snp1kg', True, mapper='mpmap', tag_ext='-mpmap',
misc_opts='--filter_opts \"-q 15 -m 1 -D 999 -s 1000\"')
@timeout_decorator.timeout(400)
def test_map_brca1_cactus(self):
""" Mapping and calling bakeoff F1 test for BRCA1 cactus graph """
log.info("Test start at {}".format(datetime.now()))
self._test_bakeoff('BRCA1', 'cactus', True)
@timeout_decorator.timeout(900)
def test_full_brca2_primary(self):
""" Indexing, mapping and calling bakeoff F1 test for BRCA2 primary graph """
log.info("Test start at {}".format(datetime.now()))
self.f1_threshold = 0.01
self._test_bakeoff('BRCA2', 'primary', False)
@timeout_decorator.timeout(900)
def test_full_brca2_snp1kg(self):
""" Indexing, mapping and calling bakeoff F1 test for BRCA2 snp1kg graph """
log.info("Test start at {}".format(datetime.now()))
self._test_bakeoff('BRCA2', 'snp1kg', False)
@timeout_decorator.timeout(900)
def test_full_brca2_cactus(self):
""" Indexing, mapping and calling bakeoff F1 test for BRCA2 cactus graph """
log.info("Test start at {}".format(datetime.now()))
self._test_bakeoff('BRCA2', 'cactus', False)
@skip("skipping test to keep runtime down")
@timeout_decorator.timeout(900)
def test_map_sma_primary(self):
""" Indexing, mapping and calling bakeoff F1 test for SMA primary graph """
log.info("Test start at {}".format(datetime.now()))
self._test_bakeoff('SMA', 'primary', True)
@skip("skipping test to keep runtime down")
@timeout_decorator.timeout(900)
def test_map_sma_snp1kg(self):
""" Indexing, mapping and calling bakeoff F1 test for SMA snp1kg graph """
log.info("Test start at {}".format(datetime.now()))
self._test_bakeoff('SMA', 'snp1kg', True)
@skip("skipping test to keep runtime down")
@timeout_decorator.timeout(900)
def test_map_sma_cactus(self):
""" Indexing, mapping and calling bakeoff F1 test for SMA cactus graph """
log.info("Test start at {}".format(datetime.now()))
self._test_bakeoff('SMA', 'cactus', True)
@skip("skipping test to keep runtime down")
@timeout_decorator.timeout(900)
def test_map_lrc_kir_primary(self):
""" Indexing, mapping and calling bakeoff F1 test for LRC-KIR primary graph """
log.info("Test start at {}".format(datetime.now()))
self._test_bakeoff('LRC-KIR', 'primary', True)
@skip("skipping test to keep runtime down")
@timeout_decorator.timeout(900)
def test_map_lrc_kir_snp1kg(self):
""" Indexing, mapping and calling bakeoff F1 test for LRC-KIR snp1kg graph """
self._test_bakeoff('LRC-KIR', 'snp1kg', True)
@skip("skipping test to keep runtime down")
@timeout_decorator.timeout(900)
def test_map_lrc_kir_cactus(self):
""" Indexing, mapping and calling bakeoff F1 test for LRC-KIR cactus graph """
log.info("Test start at {}".format(datetime.now()))
self._test_bakeoff('LRC-KIR', 'cactus', True)
@timeout_decorator.timeout(1200)
def test_map_mhc_primary(self):
""" Indexing, mapping and calling bakeoff F1 test for MHC primary graph """
log.info("Test start at {}".format(datetime.now()))
self._test_bakeoff('MHC', 'primary', True)
@timeout_decorator.timeout(2000)
def test_map_mhc_snp1kg(self):
""" Indexing, mapping and calling bakeoff F1 test for MHC snp1kg graph """
log.info("Test start at {}".format(datetime.now()))
self._test_bakeoff('MHC', 'snp1kg', True)
@skip("skipping test to keep runtime down (baseline missing as well)")
@timeout_decorator.timeout(1200)
def test_map_mhc_cactus(self):
""" Indexing, mapping and calling bakeoff F1 test for MHC cactus graph """
log.info("Test start at {}".format(datetime.now()))
self._test_bakeoff('MHC', 'cactus', True)
|
ekg/vg
|
vgci/vgci.py
|
Python
|
mit
| 69,869
|
[
"BWA"
] |
f70ee577fe600c6f775cd520f7d07c7aef9f17d0068d6f3ceb8dcfde3518db90
|
"""Class to generate redmagic randoms"""
import os
import copy
import numpy as np
import healsparse
from ..configuration import Configuration
from ..catalog import Catalog
from ..galaxy import GalaxyCatalog
from ..volumelimit import VolumeLimitMask
class RedmagicGenerateRandoms(object):
"""
Class to generate redmagic randoms from a redmagic volume limit mask.
"""
def __init__(self, config, vlim_mask_or_file, redmagic_cat_or_file):
"""
Instantiate a RedmagicGenerateRandoms object
Parameters
----------
config: `redmapper.Configuration`
Configuration object
vlim_mask_or_file: `str` or `redmapper.VolumeLimitMask`
Name of a file with the volume-limited mask information or
a volume-limit mask.
redmagic_cat_or_file: `str` or `redmapper.Catalog`
Name of redmagic file or redmagic catalog.
"""
self.config = config
if isinstance(vlim_mask_or_file, VolumeLimitMask):
self.vlim_mask = vlim_mask_or_file
elif isinstance(vlim_mask_or_file, str):
# This 0.2 is a dummy value
self.vlim_mask = VolumeLimitMask(config, 0.2, vlimfile=self.vlim_mask_or_file)
else:
raise RuntimeError("vlim_mask_or_file must be a redmapper.VolumeLimitMask or a filename")
if isinstance(redmagic_cat_or_file, GalaxyCatalog):
self.redmagic_cat = redmagic_cat_or_file
elif isinstance(redmagic_cat_or_file, str):
self.redmagic_cat = GalaxyCatalog.from_fits_file(redmagic_file)
else:
raise RuntimeError("redmagic_cat_or_file must be a redmapper.GalaxyCatalog")
def generate_randoms(self, nrandoms, filename, clobber=False, rng=None):
"""
Generate random points, and save to filename
Parameters
----------
nrandoms: `int`
Number of randoms to generate
filename: `str`
Output filename
clobber: `bool`
Clobber output file? Default is False.
rng : `np.random.RandomState`, optional
Pre-set random number generator. Default is None.
"""
if rng is None:
rng = np.random.RandomState()
if not clobber and os.path.isfile(filename):
raise RuntimeError("Random file %s already exists and clobber is False." % (filename))
min_gen = 10000
max_gen = 1000000
n_left = copy.copy(nrandoms)
ctr = 0
dtype = [('ra', 'f8'),
('dec', 'f8'),
('z', 'f4'),
('weight', 'f4')]
randcat = Catalog(np.zeros(nrandoms, dtype=dtype))
# Not used at the moment
randcat.weight[:] = 1.0
while (n_left > 0):
n_gen = np.clip(n_left * 3, min_gen, max_gen)
ra_rand, dec_rand = healsparse.make_uniform_randoms(self.vlim_mask.sparse_vlimmap,
n_gen, rng=rng)
# What are the associated z_max and fracgood?
zmax, fracgood = self.vlim_mask.calc_zmax(ra_rand, dec_rand, get_fracgood=True)
# Down-select from fracgood
r = rng.uniform(size=n_gen)
gd, = np.where(r < fracgood)
# Go back and generate more if all bad
if gd.size == 0:
continue
tempcat = Catalog(np.zeros(gd.size, dtype=dtype))
tempcat.ra = ra_rand[gd]
tempcat.dec = dec_rand[gd]
tempcat.z[:] = -1.0
zz = rng.choice(self.redmagic_cat.zredmagic, size=gd.size)
zmax = zmax[gd]
# This essentially takes each redshift and then finds a random
# point where it fits within the redshift envelope. It's a bit
# inefficient, but it preserves the redshift distribution.
zctr = 0
for i in range(tempcat.size):
if zz[zctr] < zmax[i]:
# This redshift is okay!
tempcat.z[i] = zz[zctr]
zctr += 1
gd, = np.where(tempcat.z > 0.0)
n_good = gd.size
if n_good == 0:
continue
tempcat = tempcat[gd]
if n_good > n_left:
n_good = n_left
randcat._ndarray[ctr: ctr + n_good] = tempcat._ndarray[: n_good]
ctr += n_good
n_left -= n_good
randcat.to_fits_file(filename, clobber=True)
|
erykoff/redmapper
|
redmapper/redmagic/redmagic_randoms.py
|
Python
|
apache-2.0
| 4,548
|
[
"Galaxy"
] |
5e9af54650b451235cfddb51253af4564198a01e60472e84ef31b7c5e0581ac1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='welcome/index.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('thoughtconcert.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
dilwaria/thoughtconcert
|
config/urls.py
|
Python
|
mit
| 1,610
|
[
"VisIt"
] |
b8fbc705e85f2f8763e3b6ca94b3bfd34f72387ca3a7ff38f3f25b19148bf938
|
#!/usr/bin/env python
# Copyright (C) 2014. Ben Pruitt & Nick Conway
# See LICENSE for full GPLv2 license.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
======================================================================
mascpcr: multiplex allele-specific colony (MASC) PCR design pipeline
======================================================================
``mascpcr`` is a native Python pipeline for designing multiplex allele-
specific colony (MASC) PCR primers for genome engineering applications.
Setup / installation is fairly simple (the package may be used in place or
may be install in your Python site-packages directory by running this script).
Python dependencies:
biopython https://pypi.python.org/pypi/biopython
bitarray https://pypi.python.org/pypi/bitarray/
libnano https://github.com/Wyss/libnano
mauve-py https://github.com/Wyss/mauve-py
numpy https://pypi.python.org/pypi/numpy
openpyxl https://pypi.python.org/pypi/openpyxl
primer3-py https://github.com/benpruitt/primer3-py
six https://pypi.python.org/pypi/six
See README.md for more information.
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='mascpcr',
version='0.0.1',
license='GPLv2',
author='Ben Pruitt, Nick Conway',
author_email='benjamin.pruitt@wyss.harvard.edu',
url='https://github.com/wyss/mascpcr',
description='MASC PCR design pipeline',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)'
],
packages=['mascpcr'],
install_requires=['numpy', 'bitarray', 'biopython', 'openpyxl', 'six',
'primer3-py', 'mauve-py', 'libnano'],
test_suite='tests',
scripts=['scripts/mascpcrcli', 'scripts/mascpcrfeatidx']
)
|
Wyss/mascpcr
|
setup.py
|
Python
|
gpl-2.0
| 2,882
|
[
"Biopython"
] |
c40a20cc933d388ec570bcf49bb1d49f70a7f9c7b3507ca8a09c23e7187b47f6
|
import math
import time
import random
import struct
import numpy as np
from numba import jit
from ibex.utilities import dataIO
from ibex.utilities.constants import *
from ibex.graphs.biological.util import CreateDirectoryStructure, ExtractExample, GenerateExamplesArray, ScaleFeature
@jit(nopython=True)
def ExtractAdjacencyMatrix(segmentation):
zres, yres, xres = segmentation.shape
# create a set of neighbors as a tuple with the lower label first
# if (z, y, x) is 1, the neighbor +1 is a different label
xdiff = segmentation[:,:,1:] != segmentation[:,:,:-1]
ydiff = segmentation[:,1:,:] != segmentation[:,:-1,:]
zdiff = segmentation[1:,:,:] != segmentation[:-1,:,:]
adjacency_graph = set()
for iz in range(zres):
for iy in range(yres):
for ix in range(xres):
if iz < zres - 1 and zdiff[iz,iy,ix]:
adjacency_graph.add((segmentation[iz,iy,ix], segmentation[iz+1,iy,ix]))
if iy < yres - 1 and ydiff[iz,iy,ix]:
adjacency_graph.add((segmentation[iz,iy,ix], segmentation[iz,iy+1,ix]))
if ix < xres - 1 and xdiff[iz,iy,ix]:
adjacency_graph.add((segmentation[iz,iy,ix], segmentation[iz,iy,ix+1]))
# make sure that label_one is less than label_two to avoid double edges
corrected_adjacency_graph = set()
for (label_one, label_two) in adjacency_graph:
if not label_one or not label_two: continue
if label_two < label_one: corrected_adjacency_graph.add((label_two, label_one))
else: corrected_adjacency_graph.add((label_one, label_two))
return corrected_adjacency_graph
def BaselineGraph(prefix, segmentation, seg2gold_mapping):
# get the adjacency matrix
adjacency_graph = ExtractAdjacencyMatrix(segmentation)
positive_candidates = []
negative_candidates = []
unknown_candidates = []
for (label_one, label_two) in adjacency_graph:
gold_one = seg2gold_mapping[label_one]
gold_two = seg2gold_mapping[label_two]
if gold_one < 1 or gold_two < 1: unknown_candidates.append((label_one, label_two))
elif gold_one == gold_two: positive_candidates.append((label_one, label_two))
else: negative_candidates.append((label_one, label_two))
print 'Baseline Adjacency Graph Results'
print ' Number positive edges {}'.format(len(positive_candidates))
print ' Number negative edges {}'.format(len(negative_candidates))
print ' Number unknowns edges {}'.format(len(unknown_candidates))
baseline_filename = 'edge-baselines/{}-edge-baselines.txt'.format(prefix)
with open(baseline_filename, 'w') as fd:
fd.write('Baseline Adjacency Graph Results\n')
fd.write(' Number positive edges {}\n'.format(len(positive_candidates)))
fd.write(' Number negative edges {}\n'.format(len(negative_candidates)))
fd.write(' Number unknowns edges {}\n'.format(len(unknown_candidates)))
@jit(nopython=True)
def TraverseIndividualEndpoint(segmentation, center, vector, resolution, max_label, maximum_distance):
# the maximum degrees is a function of how the endpoint vectors are generated
# the vectors have at best this resolution accuracy
maximum_radians = 0.3216
# save computation time by calculating cos(theta) here
cos_theta = math.cos(maximum_radians)
# decompress important variables
zpoint, ypoint, xpoint = center
zradius, yradius, xradius = (int(maximum_distance / resolution[IB_Z]), int(maximum_distance / resolution[IB_Y]), int(maximum_distance / resolution[IB_X]))
zres, yres, xres = segmentation.shape
label = segmentation[zpoint,ypoint,xpoint]
# # create a set of labels to ignore
labels_to_ignore = set()
# start by ignoring all labels with the same value
labels_to_ignore.add(segmentation[zpoint,ypoint,xpoint])
# keep track of what is adjacent in this cube and which potential neighbors are already on the stack
adjacency_matrix = set()
potential_neighbors = set()
zmeans = np.zeros(max_label, dtype=np.float32)
ymeans = np.zeros(max_label, dtype=np.float32)
xmeans = np.zeros(max_label, dtype=np.float32)
counts = np.zeros(max_label, dtype=np.float32)
# iterate through the window
for iz in range(zpoint - zradius, zpoint + zradius + 1):
if iz < 0 or iz > zres - 1: continue
for iy in range(ypoint - yradius, ypoint + yradius + 1):
if iy < 0 or iy > yres - 1: continue
for ix in range(xpoint - xradius, xpoint + xradius + 1):
if ix < 0 or ix > xres - 1: continue
# get the label for this location
voxel_label = segmentation[iz,iy,ix]
# skip over extracellular/unlabeled material
if not voxel_label: continue
# update the adjacency matrix
if iz < zres - 1 and voxel_label != segmentation[iz+1,iy,ix]:
adjacency_matrix.add((voxel_label, segmentation[iz+1,iy,ix]))
# update mean affinities
if voxel_label == label or segmentation[iz+1,iy,ix] == label:
index = segmentation[iz+1,iy,ix] if voxel_label == label else voxel_label
zmeans[index] += (iz + 0.5)
ymeans[index] += iy
xmeans[index] += ix
counts[index] += 1
if iy < yres - 1 and voxel_label != segmentation[iz,iy+1,ix]:
adjacency_matrix.add((voxel_label, segmentation[iz,iy+1,ix]))
# update mean affinities
if voxel_label == label or segmentation[iz,iy+1,ix] == label:
index = segmentation[iz,iy+1,ix] if voxel_label == label else voxel_label
zmeans[index] += iz
ymeans[index] += (iy + 0.5)
xmeans[index] += ix
counts[index] += 1
if ix < xres - 1 and voxel_label != segmentation[iz,iy,ix+1]:
adjacency_matrix.add((voxel_label, segmentation[iz,iy,ix+1]))
# update mean affinities
if voxel_label == label or segmentation[iz,iy,ix+1] == label:
index = segmentation[iz,iy,ix+1] if voxel_label == label else voxel_label
zmeans[index] += iz
ymeans[index] += iy
xmeans[index] += (ix + 0.5)
counts[index] += 1
# skip points that belong to the same label
# needs to be after adjacency lookup
if voxel_label in labels_to_ignore: continue
# find the distance between the center location and this one and skip if it is too far
zdiff = resolution[IB_Z] * (iz - zpoint)
ydiff = resolution[IB_Y] * (iy - ypoint)
xdiff = resolution[IB_X] * (ix - xpoint)
distance = math.sqrt(zdiff * zdiff + ydiff * ydiff + xdiff * xdiff)
if distance > maximum_distance: continue
# get a normalized vector between this point and the center
vector_to_point = (zdiff / distance, ydiff / distance, xdiff / distance)
# get the distance between the two vectors
dot_product = vector[IB_Z] * vector_to_point[IB_Z] + vector[IB_Y] * vector_to_point[IB_Y] + vector[IB_X] * vector_to_point[IB_X]
# get the angle from the dot product
if (dot_product < cos_theta): continue
# add this angle to the list to inspect further and ignore it every other time
labels_to_ignore.add(voxel_label)
potential_neighbors.add(voxel_label)
# only include potential neighbor labels that are locally adjacent
neighbors = []
means = []
for neighbor_label in potential_neighbors:
# do not include background predictions
if not neighbor_label: continue
# make sure that the neighbor is locally adjacent and add to the set of edges
if not (neighbor_label, label) in adjacency_matrix and not (label, neighbor_label) in adjacency_matrix: continue
neighbors.append(neighbor_label)
# return the mean as integer values and continue
means.append((int(zmeans[neighbor_label] / counts[neighbor_label]), int(ymeans[neighbor_label] / counts[neighbor_label]), int(xmeans[neighbor_label] / counts[neighbor_label])))
return neighbors, means
def EndpointTraversal(prefix, segmentation, seg2gold_mapping, maximum_distance):
# get the resolution for this data
resolution = dataIO.Resolution(prefix)
# get the maximum label
max_label = np.amax(segmentation) + 1
# read in all of the skeletons
skeletons = dataIO.ReadSkeletons(prefix)
# create a set of labels to consider
edges = []
# go through every skeletons endpoints
for skeleton in skeletons:
label = skeleton.label
for ie, endpoint in enumerate(skeleton.endpoints):
# get the (x, y, z) location
center = (endpoint.iz, endpoint.iy, endpoint.ix)
vector = endpoint.vector
# do not consider null vectors (the sums are all 0 or 1)
if vector[IB_Z] * vector[IB_Z] + vector[IB_Y] * vector[IB_Y] + vector[IB_X] * vector[IB_X] < 0.5: continue
neighbors, means = TraverseIndividualEndpoint(segmentation, center, vector, resolution, max_label, maximum_distance)
for iv, neighbor_label in enumerate(neighbors):
(zpoint, ypoint, xpoint) = means[iv]
# append this to this list of edges
edges.append((zpoint, ypoint, xpoint, label, neighbor_label, ie))
return edges
def GenerateEdges(prefix, segmentation, seg2gold_mapping, subset, network_radius=600, maximum_distance=500):
# possible widths for the neural network
widths = [(18, 52, 52)]#[(18, 52, 52), (20, 60, 60), (22, 68, 68), (24, 76, 76)]
# create the directory structure to save the features in
# forward is needed for training and validation data that is cropped
CreateDirectoryStructure(widths, network_radius, ['training', 'validation', 'testing', 'forward'], 'edges')
# get the size of the data
zres, yres, xres = segmentation.shape
# make sure the subset is one of three categories
assert (subset == 'training' or subset == 'validation' or subset == 'testing')
# crop the subset if it overlaps with testing data
((cropped_zmin, cropped_zmax), (cropped_ymin, cropped_ymax), (cropped_xmin, cropped_xmax)) = dataIO.CroppingBox(prefix)
# call the function to actually generate the edges
edges = EndpointTraversal(prefix, segmentation, seg2gold_mapping, maximum_distance)
# create list for all relevant examples
positive_examples = []
negative_examples = []
unknown_examples = []
forward_positive_examples = []
forward_negative_examples = []
forward_unknown_examples = []
for edge in edges:
zpoint, ypoint, xpoint = (edge[IB_Z], edge[IB_Y], edge[IB_X])
label_one, label_two = edge[3], edge[4]
# if the center of the point falls outside the cropped box do not include it in training or validation
forward = False
# however, you allow it for forward inference
if (zpoint < cropped_zmin or cropped_zmax <= zpoint): forward = True
if (ypoint < cropped_ymin or cropped_ymax <= ypoint): forward = True
if (xpoint < cropped_xmin or cropped_xmax <= xpoint): forward = True
# see if these two segments belong to the same neuron
gold_one = seg2gold_mapping[label_one]
gold_two = seg2gold_mapping[label_two]
# create lists of locations where these point occur
if forward:
if gold_one < 1 or gold_two < 1:
forward_unknown_examples.append(edge)
elif gold_one == gold_two:
forward_positive_examples.append(edge)
else:
forward_negative_examples.append(edge)
else:
if gold_one < 1 or gold_two < 1:
unknown_examples.append(edge)
elif gold_one == gold_two:
positive_examples.append(edge)
else:
negative_examples.append(edge)
print 'No. Positive Edges: {}'.format(len(positive_examples))
print 'No. Negative Edges: {}'.format(len(negative_examples))
print 'No. Unknown Edges: {}'.format(len(unknown_examples))
for width in widths:
parent_directory = 'features/biological/edges-{}nm-{}x{}x{}'.format(network_radius, width[IB_Z], width[IB_Y], width[IB_X])
if len(positive_examples):
# save the examples
positive_filename = '{}/{}/positives/{}.examples'.format(parent_directory, subset, prefix)
with open(positive_filename, 'wb') as fd:
fd.write(struct.pack('q', len(positive_examples)))
for ie, example in enumerate(positive_examples):
fd.write(struct.pack('qqqqqq', example[0], example[1], example[2], example[3], example[4], example[5]))
# create new examples array to remove last element
examples = []
for example in positive_examples:
examples.append(example[0:5])
positive_examples_array = GenerateExamplesArray(prefix, segmentation, examples, width, network_radius)
dataIO.WriteH5File(positive_examples_array, '{}/{}/positives/{}-examples.h5'.format(parent_directory, subset, prefix), 'main', compression=True)
del positive_examples_array
if len(negative_examples):
# save the examples
negative_filename = '{}/{}/negatives/{}.examples'.format(parent_directory, subset, prefix)
with open(negative_filename, 'wb') as fd:
fd.write(struct.pack('q', len(negative_examples)))
for example in negative_examples:
fd.write(struct.pack('qqqqqq', example[0], example[1], example[2], example[3], example[4], example[5]))
# create new examples array to remove last element
examples = []
for example in negative_examples:
examples.append(example[0:5])
negative_examples_array = GenerateExamplesArray(prefix, segmentation, examples, width, network_radius)
dataIO.WriteH5File(negative_examples_array, '{}/{}/negatives/{}-examples.h5'.format(parent_directory, subset, prefix), 'main', compression=True)
del negative_examples_array
if len(unknown_examples):
# save the examples
unknown_filename = '{}/{}/unknowns/{}.examples'.format(parent_directory, subset, prefix)
with open(unknown_filename, 'wb') as fd:
fd.write(struct.pack('q', len(unknown_examples)))
for example in unknown_examples:
fd.write(struct.pack('qqqqqq', example[0], example[1], example[2], example[3], example[4], example[5]))
# create new examples array to remove last element
examples = []
for example in unknown_examples:
examples.append(example[0:5])
unknown_examples_array = GenerateExamplesArray(prefix, segmentation, examples, width, network_radius)
dataIO.WriteH5File(unknown_examples_array, '{}/{}/unknowns/{}-examples.h5'.format(parent_directory, subset, prefix), 'main', compression=True)
del unknown_examples_array
if len(forward_positive_examples):
# save the examples
forward_positive_filename = '{}/forward/positives/{}.examples'.format(parent_directory, prefix)
with open(forward_positive_filename, 'wb') as fd:
fd.write(struct.pack('q', len(forward_positive_examples)))
for example in forward_positive_examples:
fd.write(struct.pack('qqqqqq', example[0], example[1], example[2], example[3], example[4], example[5]))
# create new examples array to remove last element
examples = []
for example in forward_positive_examples:
examples.append(example[0:5])
forward_positive_examples_array = GenerateExamplesArray(prefix, segmentation, examples, width, network_radius)
dataIO.WriteH5File(forward_positive_examples_array, '{}/forward/positives/{}-examples.h5'.format(parent_directory, prefix), 'main', compression=True)
del forward_positive_examples_array
if len(forward_negative_examples):
# save the examples
forward_negative_filename = '{}/forward/negatives/{}.examples'.format(parent_directory, prefix)
with open(forward_negative_filename, 'wb') as fd:
fd.write(struct.pack('q', len(forward_negative_examples)))
for example in forward_negative_examples:
fd.write(struct.pack('qqqqqq', example[0], example[1], example[2], example[3], example[4], example[5]))
# create new examples array to remove last element
examples = []
for example in forward_negative_examples:
examples.append(example[0:5])
forward_negative_examples_array = GenerateExamplesArray(prefix, segmentation, examples, width, network_radius)
dataIO.WriteH5File(forward_negative_examples_array, '{}/forward/negatives/{}-examples.h5'.format(parent_directory, prefix), 'main', compression=True)
del forward_negative_examples_array
if len(forward_unknown_examples):
# save the examples
forward_unknown_filename = '{}/forward/unknowns/{}.examples'.format(parent_directory, prefix)
with open(forward_unknown_filename, 'wb') as fd:
fd.write(struct.pack('q', len(forward_unknown_examples)))
for example in forward_unknown_examples:
fd.write(struct.pack('qqqqqq', example[0], example[1], example[2], example[3], example[4], example[5]))
# create new examples array to remove last element
examples = []
for example in forward_unknown_examples:
examples.append(example[0:5])
forward_unknown_examples_array = GenerateExamplesArray(prefix, segmentation, examples, width, network_radius)
dataIO.WriteH5File(forward_unknown_examples_array, '{}/forward/unknowns/{}-examples.h5'.format(parent_directory, prefix), 'main', compression=True)
del forward_unknown_examples_array
|
bmatejek/ibex
|
graphs/biological/edge_generation.py
|
Python
|
mit
| 18,935
|
[
"NEURON"
] |
16619ca7daa4ca72108e8b671e932ab153e385c1fb780c1d17452d670f5c7178
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
##from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
##import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/384')
from data_384 import Fmat_original
# Scaling function
def scaling(mat):
Fvec_a = mat[0:121,0:]
Fvec_b = mat[121:242,0:]
Fvec_c = mat[242:363,0:]
# With Scaling
max_a = np.max(abs(Fvec_a))
min_a = np.min(abs(Fvec_a))
mean_a = np.mean(Fvec_a)
std_a = np.std(Fvec_a)
#Fvec_a = (Fvec_a)/max_a
#Fvec_a = (Fvec_a-mean_a)
#Fvec_a = (Fvec_a-mean_a)/max_a
Fvec_a = (Fvec_a-mean_a)/std_a
# With Scaling
max_b = np.max(abs(Fvec_b))
min_b = np.min(abs(Fvec_b))
mean_b = np.mean(Fvec_b)
std_b = np.std(Fvec_b)
#Fvec_b = (Fvec_b)/max_b
#Fvec_b = (Fvec_b-mean_b)
#Fvec_b = (Fvec_b-mean_b)/max_b
#Fvec_b = (Fvec_b-mean_b)/std_b
# With Scaling
max_c = np.max(abs(Fvec_c))
min_c = np.min(abs(Fvec_c))
mean_c = np.mean(Fvec_c)
std_c = np.std(Fvec_c)
#Fvec_c = (Fvec_c)/max_c
#Fvec_c = (Fvec_c-mean_c)
#Fvec_c = (Fvec_c-mean_c)/max_c
Fvec_c = (Fvec_c-mean_c)/std_c
#Fvec_c = Fvec_c*np.max((max_a,max_b))/max_c
Fvec = np.row_stack([Fvec_a,Fvec_b,Fvec_c])
n_Fvec, m_Fvec = np.shape(Fvec)
#print 'Feature_Vector_Shape:',n_Fvec, m_Fvec
return Fvec
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_cov(fvec1,fvec2):
index = 0
m,n = np.shape(fvec1)
#print m,n
mu_1 = np.zeros((10,1))
mu_2 = np.zeros((10,1))
cov = np.zeros((10,2,2))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec1 = fvec1[(m_init):(m_init+DIVS),0:]
temp_fvec2 = fvec2[(m_init):(m_init+DIVS),0:]
temp_fvec1 = np.reshape(temp_fvec1,DIVS*n)
temp_fvec2 = np.reshape(temp_fvec2,DIVS*n)
mu_1[index] = np.mean(temp_fvec1)
mu_2[index] = np.mean(temp_fvec2)
cov[index,:,:] = np.cov(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
if index == 0:
print 'mean = ', mu_2[index]
print 'mean = ', scp.mean(fvec2[(m_init):(m_init+DIVS),0:])
print np.shape(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
print cov[index,:,:]
print scp.std(fvec2[(m_init):(m_init+DIVS),0:])
print scp.std(temp_fvec2)
index = index+1
return mu_1,mu_2,cov
if __name__ == '__main__':
Fmat = Fmat_original
# Scaling wrt all data
Fmat_rf = scaling(Fmat[:,0:35])
Fmat_rm = scaling(Fmat[:,35:70])
Fmat_sf = scaling(Fmat[:,70:105])
Fmat_sm = scaling(Fmat[:,105:140])
Fmat = np.matrix(np.column_stack((Fmat_rf,Fmat_rm,Fmat_sf,Fmat_sm)))
# Checking the Data-Matrix
#print np.shape(Fmat[0])
m_tot, n_tot = np.shape(Fmat)
#print " "
print 'Total_Matrix_Shape:',m_tot,n_tot
mu_rf_force,mu_rf_motion,cov_rf = feature_to_mu_cov(Fmat[0:121,0:35],Fmat[242:363,0:35])
mu_rm_force,mu_rm_motion,cov_rm = feature_to_mu_cov(Fmat[0:121,35:70],Fmat[242:363,35:70])
mu_sf_force,mu_sf_motion,cov_sf = feature_to_mu_cov(Fmat[0:121,70:105],Fmat[242:363,70:105])
mu_sm_force,mu_sm_motion,cov_sm = feature_to_mu_cov(Fmat[0:121,105:140],Fmat[242:363,105:140])
print cov_rf
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.1, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.20, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf = [0.0]*10
B_rm = [0.0]*10
B_sf = [0.0]*10
B_sm = [0.0]*10
for num_states in range(10):
B_rf[num_states] = [[mu_rf_force[num_states][0],mu_rf_motion[num_states][0]],[cov_rf[num_states][0][0],cov_rf[num_states][0][1],cov_rf[num_states][1][0],cov_rf[num_states][1][1]]]
B_rm[num_states] = [[mu_rm_force[num_states][0],mu_rm_motion[num_states][0]],[cov_rm[num_states][0][0],cov_rm[num_states][0][1],cov_rm[num_states][1][0],cov_rm[num_states][1][1]]]
B_sf[num_states] = [[mu_sf_force[num_states][0],mu_sf_motion[num_states][0]],[cov_sf[num_states][0][0],cov_sf[num_states][0][1],cov_sf[num_states][1][0],cov_sf[num_states][1][1]]]
B_sm[num_states] = [[mu_sm_force[num_states][0],mu_sm_motion[num_states][0]],[cov_sm[num_states][0][0],cov_sm[num_states][0][1],cov_sm[num_states][1][0],cov_sm[num_states][1][1]]]
print cov_sm[num_states][0][0],cov_sm[num_states][0][1],cov_sm[num_states][1][0],cov_sm[num_states][1][1]
print "----"
#print B_sm
#print mu_sm_motion
# pi - initial probabilities per state
pi = [0.1] * 10
# generate RF, RM, SF, SM models from parameters
model_rf = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf, pi) # Will be Trained
model_rm = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm, pi) # Will be Trained
model_sf = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf, pi) # Will be Trained
model_sm = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm, pi) # Will be Trained
# For Training
trial_number = 1
rf_final = np.matrix(np.zeros((28,1)))
rm_final = np.matrix(np.zeros((28,1)))
sf_final = np.matrix(np.zeros((28,1)))
sm_final = np.matrix(np.zeros((28,1)))
#total_seq = np.matrix(np.concatenate((np.array(Fmat[0:121,:]),np.array(Fmat[242:363,:])),axis=0))
total_seq = np.zeros((242,140))
temp_seq1 = Fmat[0:121,:]
temp_seq2 = Fmat[242:363,:]
i = 0
j = 0
while i < 242:
total_seq[i] = temp_seq1[j]
total_seq[i+1] = temp_seq2[j]
j=j+1
i=i+2
m_total, n_total = np.shape(total_seq)
print 'Total_Sequence_Shape:', m_total, n_total
while (trial_number < 6):
# For Training
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[:,1:5]
total_seq_rm = total_seq[:,36:40]
total_seq_sf = total_seq[:,71:75]
total_seq_sm = total_seq[:,106:110]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+1:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+36:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+71:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+106:j+110]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = np.column_stack((total_seq[:,0],total_seq[:,2:5]))
total_seq_rm = np.column_stack((total_seq[:,35],total_seq[:,37:40]))
total_seq_sf = np.column_stack((total_seq[:,70],total_seq[:,72:75]))
total_seq_sm = np.column_stack((total_seq[:,105],total_seq[:,107:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+0],total_seq[:,j+2:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+35],total_seq[:,j+37:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+70],total_seq[:,j+72:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+105],total_seq[:,j+107:j+110]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = np.column_stack((total_seq[:,0:2],total_seq[:,3:5]))
total_seq_rm = np.column_stack((total_seq[:,35:37],total_seq[:,38:40]))
total_seq_sf = np.column_stack((total_seq[:,70:72],total_seq[:,73:75]))
total_seq_sm = np.column_stack((total_seq[:,105:107],total_seq[:,108:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+0:j+2],total_seq[:,j+3:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+35:j+37],total_seq[:,j+38:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+70:j+72],total_seq[:,j+73:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+105:j+107],total_seq[:,j+108:j+110]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = np.column_stack((total_seq[:,0:3],total_seq[:,4:5]))
total_seq_rm = np.column_stack((total_seq[:,35:38],total_seq[:,39:40]))
total_seq_sf = np.column_stack((total_seq[:,70:73],total_seq[:,74:75]))
total_seq_sm = np.column_stack((total_seq[:,105:108],total_seq[:,109:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+0:j+3],total_seq[:,j+4:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+35:j+38],total_seq[:,j+39:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+70:j+73],total_seq[:,j+74:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+105:j+108],total_seq[:,j+109:j+110]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[:,0:4]
total_seq_rm = total_seq[:,35:39]
total_seq_sf = total_seq[:,70:74]
total_seq_sm = total_seq[:,105:109]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+0:j+4]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+35:j+39]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+70:j+74]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+105:j+109]))
j = j+5
train_seq_rf = (np.array(total_seq_rf).T).tolist()
train_seq_rm = (np.array(total_seq_rm).T).tolist()
train_seq_sf = (np.array(total_seq_sf).T).tolist()
train_seq_sm = (np.array(total_seq_sm).T).tolist()
#print train_seq_rf
final_ts_rf = ghmm.SequenceSet(F,train_seq_rf)
final_ts_rm = ghmm.SequenceSet(F,train_seq_rm)
final_ts_sf = ghmm.SequenceSet(F,train_seq_sf)
final_ts_sm = ghmm.SequenceSet(F,train_seq_sm)
model_rf.baumWelch(final_ts_rf)
model_rm.baumWelch(final_ts_rm)
model_sf.baumWelch(final_ts_sf)
model_sm.baumWelch(final_ts_sm)
# For Testing
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[:,0]
total_seq_rm = total_seq[:,35]
total_seq_sf = total_seq[:,70]
total_seq_sm = total_seq[:,105]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+35]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+70]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+105]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = total_seq[:,1]
total_seq_rm = total_seq[:,36]
total_seq_sf = total_seq[:,71]
total_seq_sm = total_seq[:,106]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+1]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+36]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+71]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+106]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = total_seq[:,2]
total_seq_rm = total_seq[:,37]
total_seq_sf = total_seq[:,72]
total_seq_sm = total_seq[:,107]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+2]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+37]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+72]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+107]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = total_seq[:,3]
total_seq_rm = total_seq[:,38]
total_seq_sf = total_seq[:,73]
total_seq_sm = total_seq[:,108]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+3]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+38]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+73]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+108]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[:,4]
total_seq_rm = total_seq[:,39]
total_seq_sf = total_seq[:,74]
total_seq_sm = total_seq[:,109]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+4]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+39]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+74]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+109]))
j = j+5
total_seq_obj = np.matrix(np.column_stack((total_seq_rf,total_seq_rm,total_seq_sf,total_seq_sm)))
rf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
rm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
k = 0
while (k < np.size(total_seq_obj,1)):
test_seq_obj = (np.array(total_seq_obj[:,k]).T).tolist()
new_test_seq_obj = np.array(sum(test_seq_obj,[]))
ts_obj = new_test_seq_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj.tolist())
# Find Viterbi Path
path_rf_obj = model_rf.viterbi(final_ts_obj)
path_rm_obj = model_rm.viterbi(final_ts_obj)
path_sf_obj = model_sf.viterbi(final_ts_obj)
path_sm_obj = model_sm.viterbi(final_ts_obj)
obj = max(path_rf_obj[1],path_rm_obj[1],path_sf_obj[1],path_sm_obj[1])
if obj == path_rf_obj[1]:
rf[0,k] = 1
elif obj == path_rm_obj[1]:
rm[0,k] = 1
elif obj == path_sf_obj[1]:
sf[0,k] = 1
else:
sm[0,k] = 1
k = k+1
#print rf.T
rf_final = rf_final + rf.T
rm_final = rm_final + rm.T
sf_final = sf_final + sf.T
sm_final = sm_final + sm.T
trial_number = trial_number + 1
#print rf_final
#print rm_final
#print sf_final
#print sm_final
# Confusion Matrix
cmat = np.zeros((4,4))
arrsum_rf = np.zeros((4,1))
arrsum_rm = np.zeros((4,1))
arrsum_sf = np.zeros((4,1))
arrsum_sm = np.zeros((4,1))
k = 7
i = 0
while (k < 29):
arrsum_rf[i] = np.sum(rf_final[k-7:k,0])
arrsum_rm[i] = np.sum(rm_final[k-7:k,0])
arrsum_sf[i] = np.sum(sf_final[k-7:k,0])
arrsum_sm[i] = np.sum(sm_final[k-7:k,0])
i = i+1
k = k+7
i=0
while (i < 4):
j=0
while (j < 4):
if (i == 0):
cmat[i][j] = arrsum_rf[j]
elif (i == 1):
cmat[i][j] = arrsum_rm[j]
elif (i == 2):
cmat[i][j] = arrsum_sf[j]
else:
cmat[i][j] = arrsum_sm[j]
j = j+1
i = i+1
#print cmat
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels], cmap='gray_r')
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j],color='k')
if cmat[i][j] > 24:
pp.text(j+0.5,3.5-i,cmat[i][j],color='w')
j = j+1
i = i+1
pp.show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/multivariate_gaussian_emissions/hmm_crossvalidation_force_motion_10_states_scaled_wrt_all_data.py
|
Python
|
mit
| 18,213
|
[
"Gaussian",
"Mayavi"
] |
d6ccae5c3e80101f44545a18fabb645024215f12429411dc596b6c9c5527798f
|
import sys
import vtk
import ca_smoothing
stl = vtk.vtkSTLReader()
stl.SetFileName(sys.argv[1])
stl.Update()
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(stl.GetOutputPort())
normals.ComputeCellNormalsOn()
normals.Update()
clean = vtk.vtkCleanPolyData()
clean.SetInputConnection(normals.GetOutputPort())
clean.Update()
pd = clean.GetOutput()
pd.BuildLinks()
tpd = ca_smoothing.ca_smoothing(pd, 0.7, 3, 0.2, 10)
ply = vtk.vtkPLYWriter()
ply.SetFileName(sys.argv[2])
ply.SetInputData(tpd)
ply.Write()
|
tfmoraes/python-casmoothing
|
test_python_wrapper.py
|
Python
|
gpl-2.0
| 523
|
[
"VTK"
] |
41bb1c2b0c6851db40cc32e73aa75ecd7228b39c751c75596c2bd9e729b247d0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.