text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Copyright (C) 2016 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import os
import socket
import subprocess
import docker
from common import common_logging_elasticsearch_httpx
# https://docker-py.readthedocs.io/en/stable/
# the following function is used in ALPINE until socket.gethostbyname('host.docker.internal') is valid
def com_docker_host_ip():
# this doesn't work from a container! it'll just give the route ip to the host ie 172.x.x.x
return \
subprocess.check_output(['ip', '-4', 'route', 'show', 'default']).decode("utf-8").split(
' ')[2]
class CommonDocker:
"""
Class for interfacing with docker
"""
def __init__(self):
self.cli = docker.from_env()
self.cli_api = docker.APIClient(base_url='unix://var/run/docker.sock')
def com_docker_container_list(self):
"""
List containers on host
"""
return self.cli_api.containers()
def com_docker_container_bind(self, container_name='/mkstack_server',
bind_match='/data/devices'):
for container_inst in self.com_docker_container_list():
common_logging_elasticsearch_httpx.com_es_httpx_post(message_type='info', message_text={
'container_inst': container_inst})
if container_inst['Names'][0] == container_name:
for mount_points in container_inst['Mounts']:
if mount_points['Source'].endswith(bind_match):
return mount_points['Source'].replace(bind_match, '')
def com_docker_container_id_by_name(self, container_name='/mkstack_database'):
print('id by name', flush=True)
for container_inst in self.com_docker_container_list():
common_logging_elasticsearch_httpx.com_es_httpx_post(message_type='info', message_text={
'container_inst name': container_inst})
print('con inst', container_inst, flush=True)
if container_inst['Names'][0] == container_name:
print(container_inst['Id'], flush=True)
return container_inst['Id']
def com_docker_info(self):
"""
docker info on host
"""
return self.cli.info()
def com_docker_port(self, container_id=None, mapped_port=5050):
"""
pull mapped ports for container
"""
if container_id is None:
# docker containers spun up have container id as hostname
container_id = socket.gethostname()
return self.cli_api.port(container_id, mapped_port)
def com_docker_ports_free(self):
"""
return list of ports in use by docker
"""
port_list = []
for container_inst in self.com_docker_container_list():
for port_ndx in container_inst['Ports']:
if 'PublicPort' in port_ndx: # as not all containers have open port
port_list.append(port_ndx['PublicPort'])
return port_list
def com_docker_swarm_init(self):
"""
initialize swarm on host
"""
if os.environ['SWARMIP'] == 'None':
try:
return self.cli_api.init_swarm()
except:
common_logging_elasticsearch_httpx.com_es_httpx_post(message_type='critical',
message_text={'stuff':
'Must define Docker Swarm IP in ENV file since multiple IP'})
else:
return self.cli_api.init_swarm(advertise_addr=os.environ['SWARMIP'])
def com_docker_swarm_inspect(self):
"""
swarm info on host
"""
return self.cli_api.inspect_swarm()
def com_docker_swarm_leave(self):
"""
leave current swarm
"""
return self.cli_api.leave_swarm()
def com_docker_node_list(self):
"""
List nodes in swarm
"""
return self.cli_api.nodes()
def com_docker_version(self):
"""
return docker version on host
"""
return self.cli.version()
def com_docker_volume_info(self, volume_name):
"""
return info no specified volume
"""
return self.cli.inspect_volume(volume_name)
def com_docker_volume_remove(self, volume_name):
"""
remove volume from docker
"""
return self.cli.remove_volume(volume_name)
def com_docker_volume_list(self):
"""
list docker volumes
"""
return self.cli_api.volumes()
def com_docker_run_command(self, docker_command):
"""
run command in a container
"""
return self.cli.exec_run(cmd=docker_command)
def com_docker_run_command_via_exec(self, container_id, docker_command):
"""
run command in a container via exec
"""
return self.cli_api.exec_create(container_id, docker_command)
def com_docker_start_exec(self, exec_id, stream_output=False):
"""
start the exec setup from run command
"""
return self.cli_api.exec_start(exec_id=exec_id, detach=False, stream=stream_output)
# https://docker-py.readthedocs.io/en/stable/containers.html
def com_docker_run_container(self, container_data_list):
"""
Launch container (usually for slave play)
"""
return self.cli.containers.run(image=container_data_list[2],
network=container_data_list[5],
detach=container_data_list[3],
ports=container_data_list[4],
command=container_data_list[0],
volumes=container_data_list[6],
name=container_data_list[1],
environment=container_data_list[8])
# auto_remove=container_remove)
def com_docker_delete_container(self, container_image_name, container_force=True):
"""
Remove container from disk and term it forcefully if asked
"""
try:
# since the container might not exist (like starting the main_debug.py
return self.cli_api.remove_container(container=container_image_name,
force=container_force)
except:
pass
def com_docker_pause_container(self, container_image_name):
"""
pause container
"""
return self.cli_api.pause(container=container_image_name)
def com_docker_unpause_container(self, container_image_name):
"""
unpause container
"""
return self.cli_api.unpause(container=container_image_name)
def com_docker_network_create(self, network_name='mediakraken_network_backend'):
"""
create network
"""
# verify the network doesn't already exist
if len(self.com_docker_network_list(network_name)) == 0:
return self.cli.networks.create(name=network_name, driver="bridge")
def com_docker_network_list(self, network_name='mediakraken_network_backend'):
return self.cli.networks.list(network_name)
def com_docker_network_prune(self):
"""
prune network
"""
return self.cli.networks.prune()
def com_docker_run_device_scan(self, current_host_working_directory):
if current_host_working_directory is not None \
and os.path.exists(os.path.join(current_host_working_directory, 'data/devices')):
self.com_docker_delete_container('mkdevicescan')
return self.cli.containers.run(
image='mediakraken/mkdevicescan:%s' % os.environ['BRANCH'],
detach=True,
command='python3 /mediakraken/main_hardware_discover.py',
name='mkdevicescan',
network_mode='host',
volumes={os.path.join(current_host_working_directory,
'data/devices'):
{'bind': '/mediakraken/devices',
'mode': 'rw'}
},
environment={'DEBUG': os.environ['DEBUG']},
)
def com_docker_run_dosbox(self, current_user_uuid, current_host_working_directory, game_uuid):
if current_host_working_directory is not None \
and os.path.exists(os.path.join(current_host_working_directory, 'data/dosbox')):
self.com_docker_delete_container(
('mkdosboxweb' + current_user_uuid.replace('-', ''))[:30])
self.com_docker_network_create('mediakraken_network_backend')
# create user dir for dosbox, game_uuid is used to not lose installs/game saves
user_host_dir = os.path.join(current_host_working_directory, 'data/dosbox',
current_user_uuid, game_uuid)
if not os.path.exists(user_host_dir):
os.makedirs(user_host_dir)
return self.cli.containers.run(
image='mediakraken/mkdosboxweb:%s' % os.environ['BRANCH'],
detach=True,
name=('mkdosboxweb' + current_user_uuid.replace('-',
''))[
:30],
network='mediakraken_network_backend',
volumes={user_host_dir:
{'bind': '/mediakraken/dosbox',
'mode': 'rw'}
},
environment={'DEBUG': os.environ['DEBUG']},
)
def com_docker_run_elk(self, current_host_working_directory):
if current_host_working_directory is not None \
and os.path.exists(os.path.join(current_host_working_directory, 'elkdata')):
self.com_docker_delete_container('mkelk')
self.com_docker_network_create('mediakraken_network_backend')
return self.cli.containers.run(image='mediakraken/mkelk:%s' % os.environ['BRANCH'],
detach=True,
ports={"5000": 5000, "5044": 5044,
"5601": 5601, "9200": 9200},
name='mkelk',
network='mediakraken_network_backend',
volumes={
os.path.join(current_host_working_directory,
'data/elk'):
{'bind': '/var/lib/elasticsearch',
'mode': 'rw'}
},
environment={'ELASTICSEARCH_START': 1,
'LOGSTASH_START': 1,
'KIBANA_START': 1}
)
def com_docker_run_game_data(self, current_host_working_directory,
container_command='python3 /mediakraken/subprogram_metadata_games.py'):
"""
Launch container for game data load
"""
if current_host_working_directory is not None and os.path.exists(
os.path.join(current_host_working_directory, 'data/emulation')):
self.com_docker_delete_container('mkgamedata')
return self.cli.containers.run(image='mediakraken/mkgamedata:%s' % os.environ['BRANCH'],
network='mediakraken_network_backend',
command=container_command,
detach=True,
volumes={os.path.join(current_host_working_directory,
'data/emulation'):
{'bind': '/mediakraken/emulation',
'mode': 'rw'}
},
environment={'POSTGRES_DB': os.environ['POSTGRES_DB'],
'POSTGRES_USER': os.environ[
'POSTGRES_USER'],
'POSTGRES_PASSWORD': os.environ[
'POSTGRES_PASSWORD'],
'DEBUG': os.environ['DEBUG'],
},
name='mkgamedata')
def com_docker_run_cast(self, hwaccel, name_container, container_command):
"""
Launch container for cast play
"""
# docker run --name waffleboy -it --rm --net host -v /mediakraken/nfsmount:/mediakraken/mnt mediakraken/mkslave castnow
# --tomp4 --ffmpeg-acodec ac3 --ffmpeg-movflags frag_keyframe+empty_moov+faststart
# --address 10.0.0.220 --myip 10.0.0.198 '/mediakraken/mnt/DVD/Creep (2004)/Creep (2004).mkv'
if hwaccel:
image_name = 'mediakraken/mkslavenvidiadebian:%s' % os.environ['BRANCH']
else:
image_name = 'mediakraken/mktranscode:%s' % os.environ['BRANCH']
# rm - cleanup after exit
# it - interactive tty
# container_command = 'docker run -it --rm --net host -v ' \
# + '/mediakraken/nfsmount:/mediakraken/mnt ' \
# + 'mediakraken/mkslave ' + container_command)
return self.cli.containers.run(image=image_name,
network_mode='host',
command=container_command,
detach=True,
volumes={'/var/run/docker.sock':
{'bind': '/var/run/docker.sock',
'mode': 'ro'},
'/mediakraken/nfsmount':
{'bind': '/mediakraken/mnt',
'mode': 'ro'}
},
name=name_container)
# def com_docker_run_musicbrainz(self, current_host_working_directory, brainzcode):
# if current_host_working_directory is not None and os.path.exists(
# os.path.join(current_host_working_directory, 'data/mbrainz')):
# self.com_docker_delete_container('mkmusicbrainz')
# return self.cli.containers.run(image='mediakraken/mkmusicbrainz:%s' % os.environ['BRANCH'],
# detach=True,
# name='mkmusicbrainz',
# network='mediakraken_network_backend',
# ports={"5000": 5000},
# environment={'BRAINZCODE': brainzcode},
# volumes={os.path.join(current_host_working_directory,
# 'data/mbrainz/config'):
# {'bind': '/config', 'mode': 'rw'},
# os.path.join(current_host_working_directory,
# 'data/mbrainz/data'):
# {'bind': '/data', 'mode': 'rw'}})
def com_docker_run_mumble(self, current_host_working_directory):
if current_host_working_directory is not None and os.path.exists(
os.path.join(current_host_working_directory, 'data/mumble')):
self.com_docker_delete_container('mkmumble')
return self.cli.containers.run(image='mediakraken/mkmumble:%s' % os.environ['BRANCH'],
detach=True,
ports={"64738": 64738},
name='mkmumble',
volumes={os.path.join(current_host_working_directory,
'data/mumble'):
{'bind': '/etc/mumble',
'mode': 'rw'}
}
)
def com_docker_run_openldap(self, current_host_working_directory):
if current_host_working_directory is not None and os.path.exists(
os.path.join(current_host_working_directory, 'data/openldap')):
self.com_docker_delete_container('mkopenldap')
return self.cli.containers.run(image='mediakraken/mkopenldap:%s' % os.environ['BRANCH'],
detach=True,
name='mkopenldap',
ports={"389": 389, "636": 636},
volumes={os.path.join(current_host_working_directory,
'data/openldap/conf'):
{'bind': '/etc/openldap',
'mode': 'rw'},
os.path.join(current_host_working_directory,
'data/openldap/data'):
{'bind': '/var/lib/openldap/openldap-data',
'mode': 'rw'}},
network='mediakraken_network_backend')
def com_docker_run_slave(self, hwaccel, port_mapping, name_container, container_command,
ram_disk=False):
"""
Launch container for slave play
"""
if hwaccel:
image_name = 'mediakraken/mktranscodenvidiadebian:%s' % os.environ['BRANCH']
else:
image_name = 'mediakraken/mktranscode:%s' % os.environ['BRANCH']
mount_volumes = {'/var/run/docker.sock':
{'bind': '/var/run/docker.sock',
'mode': 'ro'},
'/mediakraken/nfsmount':
{'bind': '/mediakraken/mnt',
'mode': 'ro'}
}
if ram_disk:
mount_volumes['tmpfs'] = {'bind': '/mediakraken/ramdisk'}
"""
tmpfs:
- /my-run:size=10M
"""
self.com_docker_delete_container(image_name.replace('mediakraken/', ''))
return self.cli.containers.run(image=image_name,
ports=port_mapping,
network='mediakraken_network_backend',
command=container_command,
detach=True,
volumes=mount_volumes,
name=name_container)
def com_docker_run_twitch_record_user(self, twitch_user):
"""
Launch container for twitch user recording
"""
return self.cli.containers.run(image='mediakraken/mktranscode:%s' % os.environ['BRANCH'],
command='python3 check.py ' + twitch_user,
detach=True,
volumes={
'/mediakraken/nfsmount':
{'bind': '/mediakraken/mnt',
'mode': 'rw'}
},
environment={'DEBUG': os.environ['DEBUG']},
name='mktwitchrecorduser_' + twitch_user)
def com_docker_run_wireshark(self):
"""
run wireshark
"""
self.com_docker_delete_container('mkwireshark')
self.com_docker_network_create('mediakraken_network_backend')
return self.cli.containers.run(image='mediakraken/mkwireshark:%s' % os.environ['BRANCH'],
detach=True,
name='mkwireshark',
ports={"14500": 14500},
cap_add=('NET_ADMIN'),
environment={'XPRA_PW': 'wireshark'})
|
MediaKraken/MediaKraken_Deployment
|
source/common/common_docker.py
|
Python
|
gpl-3.0
| 22,042
|
[
"Elk"
] |
4587b9f18ae9e2de02e7d570971ce4bcd12f222ea712c0ec020ca876ea1215b9
|
from __future__ import unicode_literals
import sys
import comtypes
from comtypes import GUID
import ctypes
import os
from itertools import takewhile
import logging
logging.basicConfig(filename=r'D:\bogo.log', level=logging.DEBUG)
# We aren't interested in DEBUG messages from comtypes
comtypes_logger = logging.getLogger("comtypes")
comtypes_logger.setLevel(logging.WARNING)
import bogo
import utils
# The generated files are in $PYTHON\Lib\site-packages\comtypes\gen\
from comtypes.gen.BoGo import BoGo
from comtypes.gen.TSF import *
# I had to hack through Windoze's registry to find these numbers...
CLSID_TF_InputProcessorProfiles = GUID("{33C53A50-F456-4884-B049-85FD643ECFED}")
CLSID_TF_CategoryMgr = GUID("{A4B544A1-438D-4B41-9325-869523E2D6C7}")
GUID_TFCAT_TIP_KEYBOARD = GUID("{34745C63-B2F0-4784-8B67-5E12C8701A31}")
TF_ES_ASYNCDONTCARE = 0x0
TF_ES_SYNC = 0x1
TF_ES_READ = 0x2
TF_ES_READWRITE = 0x6
TF_ES_ASYNC = 0x8
WM_KEYDOWN = 0x0100
WM_KEYUP = 0x0101
WM_CHAR = 0x0102
VK_BACK = 0x08
VK_CONTROL = 0x11
VK_MENU = 0x12
VK_SPACE = 0x20
serverGUIDPointer = ctypes.pointer(GUID("{4581A23E-03EA-4614-975B-FF6206A8B840}"))
class BoGoTextService(BoGo):
_reg_threading_ = "Apartment"
_reg_progid_ = "BoGo.Server.1"
_reg_novers_progid_ = "BoGo.Server"
_reg_desc_ = "BoGo COM server"
_reg_clsctx_ = comtypes.CLSCTX_INPROC_SERVER
# _reg_typelib_ = "interfaces\\bogo.tlb"
# The _register and _unregister class methods are called by comtypes.server.register
# as custom hooks for registration and unregistration.
@classmethod
def _register(self, registrar):
registrar._unregister(BoGoTextService, force=True)
registrar._register(BoGoTextService)
# Register input profile (supported languages, description,...)
inputProcessorProfiles = comtypes.client.CreateObject(CLSID_TF_InputProcessorProfiles,
clsctx=comtypes.CLSCTX_INPROC_SERVER,
interface=ITfInputProcessorProfiles)
profileGUIDPointer = serverGUIDPointer
description = utils.text_to_ushort_array("BoGo")
# http://msdn.microsoft.com/en-us/library/windows/desktop/dd318693%28v=vs.85%29.aspx
VI_VN = 0x042A
inputProcessorProfiles.Register(serverGUIDPointer)
inputProcessorProfiles.AddLanguageProfile(
serverGUIDPointer,
VI_VN,
profileGUIDPointer,
description,
4, # The description is 4-char long
None, # We don't have icons
-1,
-1)
# Register categories (whether we do keyboard, voice,...)
categoryManager = comtypes.client.CreateObject(CLSID_TF_CategoryMgr,
clsctx=comtypes.CLSCTX_INPROC_SERVER,
interface=ITfCategoryMgr)
categoryManager.RegisterCategory(serverGUIDPointer,
ctypes.pointer(GUID_TFCAT_TIP_KEYBOARD),
serverGUIDPointer)
@classmethod
def _unregister(self, registrar):
inputProcessorProfiles = comtypes.client.CreateObject(CLSID_TF_InputProcessorProfiles,
clsctx=comtypes.CLSCTX_INPROC_SERVER,
interface=ITfInputProcessorProfiles)
hr = inputProcessorProfiles.Unregister(serverGUIDPointer)
registrar._unregister(BoGoTextService)
#
# ITfTextInputProcessor
#
def Activate(self, thread_manager, client_id):
logging.debug("Activated")
self.client_id = client_id
self.thread_manager = thread_manager
keystroke_manager = thread_manager.QueryInterface(ITfKeystrokeMgr)
keystroke_manager.AdviseKeyEventSink(client_id, self, True)
self.reset()
def Deactivate(self):
logging.debug("Deactivated")
keystroke_manager = self.thread_manager.QueryInterface(ITfKeystrokeMgr)
keystroke_manager.UnadviseKeyEventSink(self.client_id)
self.reset()
#
# ITfKeyEventSink
#
# The OnTestKey* methods are used by TSF to probe whether we will eat/handle the key
# or not. After an OnTestKey* method returns True (eaten), another OnKey* event will
# be fired. We will always return True in OnKey* and do all handling in OnTestKey*
def OnTestKeyDown(self, this, input_context, virtual_key_code, key_info, out_eaten):
logging.debug("OnTestKeyDown: %s", virtual_key_code)
# out_eaten[0] = self.we_will_eat(virtual_key_code)
self.input_context = input_context
if virtual_key_code == VK_SPACE:
self.reset()
out_eaten[0] = False
return
if virtual_key_code == VK_BACK:
# Logic copied from ibus-bogo
if self.old_string != "":
deleted_char = self.old_string[-1]
self.old_string = self.old_string[:-1]
self.raw_string = self.raw_string[:-1]
if len(self.old_string) == 0:
self.reset()
else:
index = self.raw_string.rfind(deleted_char)
self.raw_string = self.raw_string[:-2] if index < 0 else \
self.raw_string[:index] + \
self.raw_string[(index + 1):]
out_eaten[0] = False
return
if self.we_will_eat(virtual_key_code):
# FIXME: Refactor the ToAscii code to a function/method
keyboard_state = (ctypes.c_ubyte * 256)()
if ctypes.windll.user32.GetKeyboardState(keyboard_state) == 0:
ctypes.memset(keyboard_state, 0, 256)
error = ctypes.windll.kernel32.GetLastError()
logging.debug("GetKeyboardState() Error: %x", error)
scan_code = (key_info >> 16) & 0xFF
buff = ctypes.create_string_buffer(2)
output = ctypes.windll.user32.ToAscii(virtual_key_code, scan_code, keyboard_state, buff, 0)
logging.debug("ToAscii() - %s - %s", output, buff.value)
logging.debug("CTRL: %s ALT: %s", keyboard_state[VK_CONTROL], keyboard_state[VK_MENU])
def is_key_down(key_state):
return key_state & (1 << 7) != 0
if is_key_down(keyboard_state[VK_MENU]) or \
is_key_down(keyboard_state[VK_CONTROL]):
self.reset()
out_eaten[0] = False
return
new_string, raw_string = bogo.process_key(self.old_string, buff.value, self.raw_string)
same_initial_chars = list(takewhile(unicode.__eq__, zip(self.old_string, self.new_string)))
n_backspace = len(self.old_string) - len(same_initial_chars)
self.delete_prev_chars(n_backspace)
self.commit_text(new_string)
self.old_string = new_string
self.raw_string = raw_string
out_eaten[0] = True
else:
out_eaten[0] = False
def OnKeyDown(self, this, input_context, virtual_key_code, key_info, out_eaten):
logging.debug("OnKeyDown: %s", virtual_key_code)
out_eaten[0] = True
def OnTestKeyUp(self, this, input_context, virtual_key_code, key_info, out_eaten):
logging.debug("OnTestKeyUp: %s", virtual_key_code)
out_eaten[0] = False
def OnKeyUp(self, this, input_context, virtual_key_code, key_info, out_eaten):
logging.debug("OnKeyUp: %s", virtual_key_code)
out_eaten[0] = True
def OnPreservedKey(self, input_context, preserved_key_guid):
# This thing is used to handle custom shortcut combinations
pass
def OnSetFocus(self, we_get_focus):
logging.debug("OnSetFocus(we_get_focus=%s)", we_get_focus)
#
# ITfEditSession
#
def DoEditSession(self, edit_cookie):
TS_DEFAULT_SELECTION = -1
selection, count = self.input_context.GetSelection(edit_cookie, TS_DEFAULT_SELECTION, 1)
_range = selection.range
# FIXME:
# Manual dispatching with state like this makes me cringe.
# We should have separate CommitEditSession, DeleteEditSession, etc. classes.
if self.editing_operation == "commit":
text, length = self.text_to_commit
inserter = self.input_context.QueryInterface(ITfInsertAtSelection)
out = inserter.InsertTextAtSelection(edit_cookie, 0, text, length)
# Move the carret to the end of our newly inserted string
# _range.Collapse(edit_cookie, TF_ANCHOR_END)
# style = TF_SELECTIONSTYLE()
# style.ase = TF_AE_NONE
# style.fInterimChar = False
# new_selection = TF_SELECTION()
# new_selection.style = style
# new_selection.range = _range
# self.input_context.SetSelection(edit_cookie, 1, new_selection)
elif self.editing_operation == "delete-prev-chars":
moved_chars = _range.ShiftStart(edit_cookie, -self.delete_count, None)
logging.debug("moved_chars: %s", moved_chars)
TF_ST_CORRECTION = 1
_range.SetText(edit_cookie, TF_ST_CORRECTION, utils.text_to_ushort_array(""), 0)
#
# BoGo
#
def we_will_eat(self, virtual_key_code):
# A-Z
return virtual_key_code in range(65, 91)
def reset(self):
logging.debug("reset()")
self.new_string = ""
self.old_string = ""
self.raw_string = ""
def commit_text(self, text):
logging.debug("commit_text(%s)", text)
self.text_to_commit = utils.text_to_ushort_array(text), len(text)
self.editing_operation = "commit"
return self.input_context.RequestEditSession(self.client_id, self, TF_ES_SYNC | TF_ES_READWRITE)
def delete_prev_chars(self, count):
logging.debug("delete_prev_chars(%s)", count)
# Somehow SendMessageW still sends a backspace if count == 0 so
# we have to explicitly check it like this.
if count <= 0:
return
if self.is_in_transitory_context():
hwnd = self.input_context.GetActiveView().GetWnd()
for i in range(count):
ctypes.windll.user32.SendMessageW(hwnd, WM_KEYDOWN, VK_BACK, 1)
ctypes.windll.user32.SendMessageW(hwnd, WM_KEYUP , VK_BACK, 1)
ctypes.windll.user32.SendMessageW(hwnd, WM_CHAR , VK_BACK, 1)
else:
self.editing_operation = "delete-prev-chars"
self.delete_count = count
self.input_context.RequestEditSession(self.client_id, self, TF_ES_SYNC | TF_ES_READWRITE)
def is_in_transitory_context(self):
# http://blogs.msdn.com/b/tsfaware/archive/2007/04/25/transitory-contexts.aspx
TS_SS_TRANSITORY = 0x4
status = self.input_context.GetStatus()
return status.dwStaticFlags & TS_SS_TRANSITORY != 0
|
BoGoEngine/bogo-win
|
bogo_server.py
|
Python
|
gpl-3.0
| 11,014
|
[
"ASE"
] |
9daadcd14ca3d3f497491df32862d618668f4300629e13b0fd1b135d86b3f6a0
|
"""
Functions related to 2D gaussian functions and comparing ellipticities
derived either analytically or using quadrupole moments.
:requires: NumPy
:requires: matplotlib
:author: Sami-Matias Niemi
:contact: smn2@mssl.ucl.ac.uk
:version: 0.2
"""
import matplotlib
matplotlib.use('PDF')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import numpy as np
import math, datetime, pprint
from analysis import shape
from support import logger as lg
from support import files
def Gaussian2D(x, y, sizex, sizey, sigmax, sigmay):
"""
Create a circular symmetric Gaussian centered on x, y.
:param x: x coordinate of the centre
:type x: float
:param y: y coordinate of the centre
:type y: float
:param sigmax: standard deviation of the Gaussian in x-direction
:type sigmax: float
:param sigmay: standard deviation of the Gaussian in y-direction
:type sigmay: float
:return: circular Gaussian 2D profile and x and y mesh grid
:rtype: dict
"""
#x and y coordinate vectors
Gyvect = np.arange(1, sizey + 1)
Gxvect = np.arange(1, sizex + 1)
#meshgrid
Gxmesh, Gymesh = np.meshgrid(Gxvect, Gyvect)
#normalizers
sigx = 1. / (2. * sigmax**2)
sigy = 1. / (2. * sigmay**2)
#gaussian
exponent = (sigx * (Gxmesh - x)**2 + sigy * (Gymesh - y)**2)
Gaussian = np.exp(-exponent) / (2. * math.pi * sigmax*sigmay)
output = dict(GaussianXmesh=Gxmesh, GaussianYmesh=Gymesh, Gaussian=Gaussian)
return output
def plot3D(data):
"""
Plot a 3d image of the input data. Assumes that the input dictionary
contains X, Y, and Z.
:param data: input data including X and Y mesh and Z-values
:type data: dict
"""
fig = plt.figure(figsize=(12,12))
rect = fig.add_subplot(111, visible=False).get_position()
ax = Axes3D(fig, rect)
surf = ax.plot_surface(data['GaussianXmesh'],
data['GaussianYmesh'],
data['Gaussian'],
rstride=1, cstride=1, cmap=cm.jet, linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=10)
plt.savefig('gaussian.pdf')
def plotEllipticityDependency(data, ellipticity, log):
"""
Generate a simple plot: size of the Gaussian weighting function vs. derived ellipticity.
"""
x = []
y = []
for sigma in range(1, 50):
settings = dict(sigma=sigma)
sh = shape.shapeMeasurement(data, log, **settings)
results = sh.measureRefinedEllipticity()
x.append(sigma)
y.append(results['ellipticity'])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, 'bo-')
ax.plot([min(x), max(x)], [ellipticity, ellipticity], 'k--')
ax.set_xlabel(r'Gaussian Weighting $\sigma$ [arcseconds]')
ax.set_ylabel('Measured Ellipticity')
ax.set_ylim(0, 1.01)
plt.savefig('EvsSigma.pdf')
plt.close()
def ellipticityFromSigmas(sigmax, sigmay):
"""
Calculate ellipticity from standard deviations of a 2D Gaussian.
:param sigmax: standard deviation in x direction
:type sigmax: float or ndarray
:param sigmay: standard deviation in y direction
:type sigmay: float or ndarray
:return: ellipticity
:rtype: float or ndarray
"""
e = (np.float(sigmax)**2 - sigmay**2) / (sigmax**2 + sigmay**2)
return np.abs(e)
def size():
"""
:requires: sympy
"""
from sympy import Symbol
from sympy import integrate, exp, pi
x = Symbol('x')
y = Symbol('y')
mu = Symbol('mu')
sigma = Symbol('sigma')
tmpx = (x - mu)
tmpy = (y - mu)
integrand = (1/(2*pi*sigma**2)) * exp(-((tmpx**2 + tmpy**2) / (2*sigma**2) ))
res = integrate(integrand, x)
pprint.pprint(res)
def measureGaussianR2(log):
#gaussian
sigma = 2. / (2. * math.sqrt(2.*math.log(2)))
Gaussian = shape.shapeMeasurement(np.zeros((100, 100)), log).circular2DGaussian(50, 50, sigma)['Gaussian']
settings = dict(sigma=sigma, weighted=False)
sh = shape.shapeMeasurement(Gaussian, log, **settings)
results = sh.measureRefinedEllipticity()
print
print results['R2']
print
#sh.writeFITS(Gaussian, 'GaussianSmall.fits')
def testFiles():
#testing part, looks for blob?.fits and psf.fits to derive centroids and ellipticity
import pyfits as pf
import glob as g
from support import logger as lg
import sys
files = g.glob('blob?.fits')
log = lg.setUpLogger('shape.log')
log.info('Testing shape measuring class...')
for file in files:
log.info('Processing file %s' % file)
data = pf.getdata(file)
sh = shape.shapeMeasurement(data, log)
results = sh.measureRefinedEllipticity()
sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits'))
print file
pprint.pprint(results)
print
file = 'psf1x.fits'
log.info('Processing file %s' % file)
data = pf.getdata(file)
sh = shape.shapeMeasurement(data, log)
results = sh.measureRefinedEllipticity()
sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits'))
print file
pprint.pprint(results)
print
file = 'stamp.fits'
log.info('Processing file %s' % file)
data = pf.getdata(file)
settings = dict(sigma=10.0)
sh = shape.shapeMeasurement(data, log, **settings)
results = sh.measureRefinedEllipticity()
sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits'))
print file
pprint.pprint(results)
print
file = 'gaussian.fits'
log.info('Processing file %s' % file)
data = pf.getdata(file)
settings = dict(sampling=0.2)
sh = shape.shapeMeasurement(data, log, **settings)
results = sh.measureRefinedEllipticity()
sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits'))
print file
pprint.pprint(results)
print
log.info('All done\n\n')
if __name__ == '__main__':
log = lg.setUpLogger('gaussians.log')
log.info('Testing gaussians...')
xsize, ysize = 300, 300
xcen, ycen = 150, 150
sigmax = 27.25
sigmay = 14.15
#calculate ellipticity from Sigmas
e = ellipticityFromSigmas(sigmax, sigmay)
#generate a 2D gaussian with given properties...
gaussian2d = Gaussian2D(xcen, ycen, xsize, ysize, sigmax, sigmay)
#plot
plot3D(gaussian2d)
#write FITS file
files.writeFITS(gaussian2d['Gaussian'], 'gaussian.fits')
#calculate shape and printout results
settings = dict(sigma=15., weighted=False)
sh = shape.shapeMeasurement(gaussian2d['Gaussian'], log, **settings)
results = sh.measureRefinedEllipticity()
print
pprint.pprint(results)
print e, (e - results['ellipticity']) / e * 100.
#generate a plot sigma vs ellipticity for a given Gaussian
plotEllipticityDependency(gaussian2d['Gaussian'], e, log)
#measureGaussianR2
measureGaussianR2(log)
#derive FWHM - R2 relation... not really working
#size()
#test many files
testFiles()
log.info('All done\n\n')
|
sniemi/EuclidVisibleInstrument
|
support/gaussians.py
|
Python
|
bsd-2-clause
| 7,161
|
[
"Gaussian"
] |
ed5c0dc56abc30064c6cff3d9654f045cd99c9cfe942f5f1088a22436d1ce85f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Setup for jsonextended."""
import io
from importlib import import_module
from setuptools import setup, find_packages
with open('requirements.txt') as handle:
requirements = handle.read().splitlines()
with open('test_requirements.txt') as handle:
test_requirements = handle.read().splitlines()
with io.open('README.md') as handle:
readme_text = handle.read()
setup(
name='jsonextended',
version=import_module('jsonextended').__version__,
description='Extending the python json package functionality',
long_description=readme_text,
long_description_content_type='text/markdown',
install_requires=requirements,
tests_require=test_requirements,
license='MIT',
author='Chris Sewell',
author_email='chrisj_sewell@hotmail.com',
url='https://github.com/chrisjsewell/jsonextended',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'Intended Audience :: Financial and Insurance Industry',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
keywords='json, units, parser, python',
zip_safe=True,
packages=find_packages(),
package_data={'': ['*.json', '*.crystal.out',
'*.csv', '*.keypair', '*.data']},
)
|
chrisjsewell/jsonextended
|
setup.py
|
Python
|
mit
| 2,162
|
[
"CRYSTAL"
] |
2e126726d75069083f821e1bc3d3bc1c2a4946fb1765babc77eb06b40c13908b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import NamedTuple
from typing import Optional
from typing import Set
import src.api.global_ as gl
import src.api.symboltable.symboltable
import src.api.utils
import src.api.symboltable
import src.api.check as chk
from src import symbols
from src.ast import NodeVisitor
from src.api import errmsg
from src.api.constants import TYPE, SCOPE, CLASS, CONVENTION
from src.api.debug import __DEBUG__
from src.api.errmsg import warning_not_used
from src.api.config import OPTIONS
class ToVisit(NamedTuple):
"""Used just to signal an object to be
traversed.
"""
obj: symbols.SYMBOL
class GenericVisitor(NodeVisitor):
"""A slightly different visitor, that just traverses an AST, but does not return
a translation of it. Used to examine the AST or do transformations
"""
node_type = ToVisit
@property
def O_LEVEL(self):
return OPTIONS.optimization_level
NOP = symbols.NOP() # Return this for "erased" nodes
@staticmethod
def TYPE(type_):
"""Converts a backend type (from api.constants)
to a SymbolTYPE object (taken from the SYMBOL_TABLE).
If type_ is already a SymbolTYPE object, nothing
is done.
"""
if isinstance(type_, symbols.TYPE):
return type_
assert TYPE.is_valid(type_)
return gl.SYMBOL_TABLE.basic_types[type_]
def visit(self, node):
return super().visit(ToVisit(node))
def _visit(self, node: ToVisit):
if node.obj is None:
return None
__DEBUG__(f"Optimizer: Visiting node {str(node.obj)}", 1)
meth = getattr(self, f"visit_{node.obj.token}", self.generic_visit)
return meth(node.obj)
def generic_visit(self, node: symbols.SYMBOL):
for i, child in enumerate(node.children):
node.children[i] = yield self.visit(child)
yield node
class UniqueVisitor(GenericVisitor):
def __init__(self):
super().__init__()
self.visited = set()
def _visit(self, node: ToVisit):
if node.obj in self.visited:
return node.obj
self.visited.add(node.obj)
return super()._visit(node)
class UnreachableCodeVisitor(UniqueVisitor):
"""Visitor to optimize unreachable code (and prune it)."""
def visit_FUNCTION(self, node: symbols.FUNCTION):
if (
node.class_ == CLASS.function
and node.body.token == "BLOCK"
and (not node.body or node.body[-1].token != "RETURN")
):
# String functions must *ALWAYS* return a value.
# Put a sentinel ("dummy") return "" sentence that will be removed if other is detected
lineno = node.lineno if not node.body else node.body[-1].lineno
errmsg.warning_function_should_return_a_value(lineno, node.name, node.filename)
type_ = node.type_
if type_ is not None and type_ == self.TYPE(TYPE.string):
node.body.append(symbols.ASM("\nld hl, 0\n", lineno, node.filename, is_sentinel=True))
yield (yield self.generic_visit(node))
def visit_BLOCK(self, node):
# Remove CHKBREAK after labels
i = 0
while i < len(node) - 1:
child = node[i]
if child.token == "LABEL" and node[i + 1].token == "CHKBREAK":
node.pop(i + 1)
continue
i += 1
warning_emitted = False
i = 0
while i < len(node):
sentence = node[i]
if chk.is_ender(sentence):
j = i + 1
while j < len(node):
if chk.is_LABEL(node[j]):
break
if node[j].token == "FUNCDECL":
j += 1
continue
if node[j].token == "END" and node[j].is_sentinel: # "Sentinel" instructions can be freely removed
node.pop(j)
continue
if node[j].token == "ASM":
break # User's ASM must always be left there
if not warning_emitted and self.O_LEVEL > 0:
warning_emitted = True
errmsg.warning_unreachable_code(lineno=node[j].lineno, fname=node[j].filename)
if self.O_LEVEL < 2:
break
node.pop(j)
i += 1
if self.O_LEVEL >= 1 and chk.is_null(node):
yield self.NOP
return
yield (yield self.generic_visit(node))
class FunctionGraphVisitor(UniqueVisitor):
"""Mark FUNCALLS"""
def _get_calls_from_children(self, node):
return [
symbol
for symbol in self.filter_inorder(node, lambda x: isinstance(x, (symbols.FUNCCALL, symbols.CALL)))
if not isinstance(symbol, symbols.ARRAYACCESS)
]
def _set_children_as_accessed(self, node: symbols.SYMBOL):
parent = node.get_parent(symbols.FUNCDECL)
if parent is None: # Global scope?
for symbol in self._get_calls_from_children(node):
symbol.entry.accessed = True
def visit_FUNCCALL(self, node: symbols.FUNCCALL):
self._set_children_as_accessed(node)
yield node
def visit_CALL(self, node: symbols.CALL):
self._set_children_as_accessed(node)
yield node
def visit_FUNCDECL(self, node: symbols.FUNCDECL):
if node.entry.accessed:
for symbol in self._get_calls_from_children(node):
symbol.entry.accessed = True
yield node
def visit_GOTO(self, node: symbols.SENTENCE):
parent = node.get_parent(symbols.FUNCDECL)
if parent is None: # Global scope?
node.args[0].accessed = True
yield node
def visit_GOSUB(self, node: symbols.SENTENCE):
return self.visit_GOTO(node)
class OptimizerVisitor(UniqueVisitor):
"""Implements some optimizations"""
def visit(self, node):
if self.O_LEVEL < 1: # Optimize only if O1 or above
return node
return super().visit(node)
def visit_ADDRESS(self, node):
if node.operand.token != "ARRAYACCESS":
if not chk.is_dynamic(node.operand):
node = symbols.CONST(node, node.lineno)
elif node.operand.offset is not None: # A constant access
if node.operand.scope == SCOPE.global_: # Calculate offset if global variable
node = symbols.BINARY.make_node(
"PLUS",
symbols.UNARY("ADDRESS", node.operand.entry, node.lineno, type_=self.TYPE(gl.PTR_TYPE)),
symbols.NUMBER(node.operand.offset, lineno=node.operand.lineno, type_=self.TYPE(gl.PTR_TYPE)),
lineno=node.lineno,
func=lambda x, y: x + y,
)
yield node
def visit_BINARY(self, node: symbols.BINARY):
node = yield self.generic_visit(node) # This might convert consts to numbers if possible
if self.O_LEVEL > 1 and node.operator in ("PLUS", "MUL"):
if chk.is_number(node.left) and not chk.is_number(node.right):
node.left, node.right = node.right, node.left
node = yield self.generic_visit(node)
if node.left.token == "BINARY" and node.left.operator == node.operator and chk.is_number(node.right):
left = ll = None
if chk.is_number(node.left.right):
left = node.left.left
ll = node.left.right
elif chk.is_number(node.left.left):
left = node.left.right
ll = node.left.left
if left is not None:
right = yield symbols.BINARY.make_node(
operator=node.operator,
left=ll,
right=node.right,
lineno=node.lineno,
func=node.func,
)
node.left = left
node.right = right
if (
node.left.token == node.right.token == "BINARY"
and node.operator == node.left.operator == node.right.operator
and chk.is_number(node.left.right, node.right.right)
):
left = yield symbols.BINARY.make_node(
operator=node.operator,
left=node.left.left,
right=node.right.left,
func=node.left.func,
lineno=node.left.lineno,
)
right = yield symbols.BINARY.make_node(
operator=node.operator,
left=node.left.right,
right=node.right.right,
func=node.right.func,
lineno=node.right.lineno,
)
node = yield symbols.BINARY.make_node(
operator=node.operator, left=left, right=right, func=node.func, lineno=node.lineno
)
# Retry folding
yield symbols.BINARY.make_node(node.operator, node.left, node.right, node.lineno, node.func, node.type_)
def visit_BUILTIN(self, node):
methodname = "visit_" + node.fname
if hasattr(self, methodname):
yield (yield getattr(self, methodname)(node))
else:
yield (yield self.generic_visit(node))
def visit_CHR(self, node):
node = yield self.generic_visit(node)
if all(chk.is_static(arg.value) for arg in node.operand):
yield symbols.STRING(
"".join(chr(src.api.utils.get_final_value(x.value) & 0xFF) for x in node.operand), node.lineno
)
else:
yield node
def visit_CONST(self, node):
if chk.is_number(node.expr) or chk.is_const(node.expr):
yield node.expr
else:
yield node
def visit_FUNCCALL(self, node):
node.args = yield self.generic_visit(node.args) # Avoid infinite recursion not visiting node.entry
self._check_if_any_arg_is_an_array_and_needs_lbound_or_ubound(node.entry.params, node.args)
yield node
def visit_CALL(self, node):
node.args = yield self.generic_visit(node.args) # Avoid infinite recursion not visiting node.entry
self._check_if_any_arg_is_an_array_and_needs_lbound_or_ubound(node.entry.params, node.args)
yield node
def visit_FUNCDECL(self, node):
if self.O_LEVEL > 1 and not node.entry.accessed:
errmsg.warning_func_is_never_called(node.entry.lineno, node.entry.name, fname=node.entry.filename)
yield self.NOP
return
if self.O_LEVEL > 1 and node.params_size == node.locals_size == 0:
node.entry.convention = CONVENTION.fastcall
node.children[1] = yield ToVisit(node.entry)
yield node
def visit_LET(self, node):
lvalue = node.children[0]
if self.O_LEVEL > 1 and not lvalue.accessed:
warning_not_used(lvalue.lineno, lvalue.name, fname=lvalue.filename)
block = symbols.BLOCK(
*[
symbols.CALL(x.entry, x.args, x.lineno, lvalue.filename)
for x in self.filter_inorder(
node.children[1],
lambda x: isinstance(x, symbols.FUNCCALL),
lambda x: not isinstance(x, symbols.FUNCTION),
)
]
)
yield block
else:
yield (yield self.generic_visit(node))
def visit_LETARRAY(self, node):
lvalue = node.args[0].entry
if self.O_LEVEL > 1 and not lvalue.accessed:
warning_not_used(lvalue.lineno, lvalue.name, fname=lvalue.filename)
block = symbols.BLOCK(
*[
symbols.CALL(x.entry, x.args, x.lineno, lvalue.filename)
for x in self.filter_inorder(
node.children[1],
lambda x: isinstance(x, symbols.FUNCCALL),
lambda x: not isinstance(x, symbols.FUNCTION),
)
]
)
yield block
else:
yield (yield self.generic_visit(node))
def visit_LETSUBSTR(self, node):
if self.O_LEVEL > 1 and not node.children[0].accessed:
errmsg.warning_not_used(node.children[0].lineno, node.children[0].name)
yield self.NOP
else:
yield (yield self.generic_visit(node))
def visit_RETURN(self, node):
"""Visits only children[1], since children[0] points to
the current function being returned from (if any), and
might cause infinite recursion.
"""
if len(node.children) == 2:
node.children[1] = yield ToVisit(node.children[1])
yield node
def visit_UNARY(self, node):
if node.operator == "ADDRESS":
yield (yield self.visit_ADDRESS(node))
else:
yield (yield self.generic_visit(node))
def visit_IF(self, node):
expr_ = yield ToVisit(node.children[0])
then_ = yield ToVisit(node.children[1])
else_ = (yield ToVisit(node.children[2])) if len(node.children) == 3 else self.NOP
if self.O_LEVEL >= 1:
if chk.is_null(then_, else_):
src.api.errmsg.warning_empty_if(node.lineno)
yield self.NOP
return
block_accessed = chk.is_block_accessed(then_) or chk.is_block_accessed(else_)
if not block_accessed and chk.is_number(expr_): # constant condition
if expr_.value: # always true (then_)
yield then_
else: # always false (else_)
yield else_
return
if chk.is_null(else_) and len(node.children) == 3:
node.children.pop() # remove empty else
yield node
return
for i in range(len(node.children)):
node.children[i] = (expr_, then_, else_)[i]
yield node
def visit_WHILE(self, node):
node = yield self.generic_visit(node)
expr_ = node.children[0]
body_ = node.children[1]
if self.O_LEVEL >= 1:
if chk.is_number(expr_) and not expr_.value and not chk.is_block_accessed(body_):
yield self.NOP
return
for i, child in enumerate((expr_, body_)):
node.children[i] = child
yield node
def visit_FOR(self, node):
node = yield self.generic_visit(node)
from_ = node.children[1]
to_ = node.children[2]
step_ = node.children[3]
body_ = node.children[4]
if self.O_LEVEL > 0 and chk.is_number(from_, to_, step_) and not chk.is_block_accessed(body_):
if from_ > to_ and step_ > 0:
yield self.NOP
return
if from_ < to_ and step_ < 0:
yield self.NOP
return
yield node
# TODO: ignore unused labels
def _visit_LABEL(self, node):
if self.O_LEVEL and not node.accessed:
yield self.NOP
else:
yield node
def generic_visit(self, node: symbols.SYMBOL):
for i, child in enumerate(node.children):
node.children[i] = yield ToVisit(child)
yield node
def _check_if_any_arg_is_an_array_and_needs_lbound_or_ubound(
self, params: symbols.PARAMLIST, args: symbols.ARGLIST
):
"""Given a list of params and a list of args, traverse them to check if any arg is a byRef array parameter,
and if so, whether it's use_lbound or use_ubound flag is updated to True and if it's a local var. If so, it's
offset size has changed and must be reevaluated!
"""
for arg, param in zip(args, params):
if not param.byref or param.class_ != CLASS.array:
continue
if arg.value.lbound_used and arg.value.ubound_used:
continue
self._update_bound_status(arg.value)
def _update_bound_status(self, arg: symbols.VARARRAY):
old_lbound_used = arg.lbound_used
old_ubound_used = arg.ubound_used
for p in arg.requires:
arg.lbound_used = arg.lbound_used or p.lbound_used
arg.ubound_used = arg.ubound_used or p.ubound_used
if old_lbound_used != arg.lbound_used or old_ubound_used != arg.ubound_used:
if arg.scope == SCOPE.global_:
return
if arg.scope == SCOPE.local and not arg.byref:
arg.scopeRef.owner.locals_size = src.api.symboltable.symboltable.SymbolTable.compute_offsets(
arg.scopeRef
)
class VarDependency(NamedTuple):
parent: symbols.VAR
dependency: symbols.VAR
class VariableVisitor(GenericVisitor):
_original_variable: Optional[symbols.VAR] = None
_parent_variable = None
_visited: Set[symbols.SYMBOL] = set()
def generic_visit(self, node: symbols.SYMBOL):
if node not in VariableVisitor._visited:
VariableVisitor._visited.add(node)
for i in range(len(node.children)):
node.children[i] = yield ToVisit(node.children[i])
yield node
def has_circular_dependency(self, var_dependency: VarDependency) -> bool:
if var_dependency.dependency == VariableVisitor._original_variable:
src.api.errmsg.error(
VariableVisitor._original_variable.lineno,
"Circular dependency between '{}' and '{}'".format(
VariableVisitor._original_variable.name, var_dependency.parent
),
)
return True
return False
def get_var_dependencies(self, var_entry: symbols.VAR):
visited: Set[symbols.VAR] = set()
result = set()
def visit_var(entry):
if entry in visited:
return
visited.add(entry)
if not isinstance(entry, symbols.VAR):
for child in entry.children:
visit_var(child)
if isinstance(child, symbols.VAR):
result.add(VarDependency(parent=VariableVisitor._parent_variable, dependency=child))
return
VariableVisitor._parent_variable = entry
if entry.alias is not None:
result.add(VarDependency(parent=entry, dependency=entry.alias))
visit_var(entry.alias)
elif entry.addr is not None:
visit_var(entry.addr)
visit_var(var_entry)
return result
def visit_VARDECL(self, node: symbols.VARDECL):
"""Checks for cyclic dependencies in aliasing variables"""
VariableVisitor._visited = set()
VariableVisitor._original_variable = node.entry
for dependency in self.get_var_dependencies(node.entry):
if self.has_circular_dependency(dependency):
break
VariableVisitor._visited = set()
VariableVisitor._original_variable = VariableVisitor._parent_variable = None
yield node
|
boriel/zxbasic
|
src/api/optimize.py
|
Python
|
gpl-3.0
| 19,483
|
[
"VisIt"
] |
c9a499c3b1f8d50d7b20be872629986caf050baf2fd3f35c6de5e8bc5b4ff460
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.simulation.neuron.hocmodbuilders.hocbuilder import HocBuilder
from morphforge.simulation.neuron.hocmodbuilders.hocbuilder_cell import HocBuilder_Cell
from morphforge.simulation.neuron.hocmodbuilders.hocmodutils import HocModUtils
from morphforge.simulation.neuron.hocmodbuilders.modfilesectioned import ModFileSectioned, NeuronParameter
from morphforge.simulation.neuron.hocmodbuilders.modfilewriterbase import MM_ModFileWriterBase
__all__ = [
'HocBuilder',
'HocBuilder_Cell',
'HocModUtils',
'ModFileSectioned',
'NeuronParameter',
'MM_ModFileWriterBase',
]
|
mikehulluk/morphforge
|
src/morphforge/simulation/neuron/hocmodbuilders/__init__.py
|
Python
|
bsd-2-clause
| 2,143
|
[
"NEURON"
] |
757ced23f173ddee491ff94269f01b38cb316a89940cae5b501dd0c07173ec2a
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# Copyright (c) 2009-2010 Arista Networks, Inc.
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""basic checker for Python code"""
from logilab import astng
from logilab.common.ureports import Table
from logilab.astng import are_exclusive
from pylint.interfaces import IASTNGChecker
from pylint.reporters import diff_string
from pylint.checkers import BaseChecker, EmptyReport
from pylint.checkers.utils import (
check_messages,
clobber_in_except,
is_inside_except,
safe_infer,
)
import re
# regex for class/function/variable/constant name
CLASS_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$')
MOD_NAME_RGX = re.compile('(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$')
CONST_NAME_RGX = re.compile('(([A-Z_][A-Z0-9_]*)|(__.*__))$')
COMP_VAR_RGX = re.compile('[A-Za-z_][A-Za-z0-9_]*$')
DEFAULT_NAME_RGX = re.compile('[a-z_][a-z0-9_]{2,30}$')
# do not require a doc string on system methods
NO_REQUIRED_DOC_RGX = re.compile('__.*__')
del re
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(parent, (astng.For, astng.ListComp, astng.SetComp,
astng.DictComp, astng.GenExpr)):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _loop_exits_early(loop):
"""Returns true if a loop has a break statement in its body."""
loop_nodes = (astng.For, astng.While)
# Loop over body explicitly to avoid matching break statements
# in orelse.
for child in loop.body:
if isinstance(child, loop_nodes):
continue
for _ in child.nodes_of_class(astng.Break, skip_klass=loop_nodes):
return True
return False
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ('module', 'class', 'method', 'function'):
try:
total = stats[node_type]
except KeyError:
raise EmptyReport()
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats['undocumented_'+node_type]
percent = (documented * 100.) / total
nice_stats[node_type]['percent_documented'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_documented'] = 'NC'
try:
percent = (stats['badname_'+node_type] * 100.) / total
nice_stats[node_type]['percent_badname'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_badname'] = 'NC'
lines = ('type', 'number', 'old number', 'difference',
'%documented', '%badname')
for node_type in ('module', 'class', 'method', 'function'):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = diff_string(old, new)
else:
old, diff_str = 'NC', 'NC'
lines += (node_type, str(new), str(old), diff_str,
nice_stats[node_type].get('percent_documented', '0'),
nice_stats[node_type].get('percent_badname', '0'))
sect.append(Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (isinstance(decorator, astng.Getattr) and
getattr(decorator.expr, 'name', None) == node.name):
return True
return False
class _BasicChecker(BaseChecker):
__implements__ = IASTNGChecker
name = 'basic'
class BasicErrorChecker(_BasicChecker):
msgs = {
'E0100': ('__init__ method is a generator',
'init-is-generator',
'Used when the special class method __init__ is turned into a '
'generator by a yield in its body.'),
'E0101': ('Explicit return in __init__',
'return-in-init',
'Used when the special class method __init__ has an explicit \
return value.'),
'E0102': ('%s already defined line %s',
'function-redefined',
'Used when a function / class / method is redefined.'),
'E0103': ('%r not properly in loop',
'not-in-loop',
'Used when break or continue keywords are used outside a loop.'),
'E0104': ('Return outside function',
'return-outside-function',
'Used when a "return" statement is found outside a function or '
'method.'),
'E0105': ('Yield outside function',
'yield-outside-function',
'Used when a "yield" statement is found outside a function or '
'method.'),
'E0106': ('Return with argument inside generator',
'return-arg-in-generator',
'Used when a "return" statement with an argument is found '
'outside in a generator function or method (e.g. with some '
'"yield" statements).'),
'E0107': ("Use of the non-existent %s operator",
'nonexistent-operator',
"Used when you attempt to use the C-style pre-increment or"
"pre-decrement operator -- and ++, which doesn't exist in Python."),
'E0108': ('Duplicate argument name %s in function definition',
'duplicate-argument-name',
'Duplicate argument names in function definitions are syntax'
' errors.'),
'W0120': ('Else clause on loop without a break statement',
'useless-else-on-loop',
'Loops should only have an else clause if they can exit early '
'with a break statement, otherwise the statements under else '
'should be on the same scope as the loop itself.'),
}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
@check_messages('E0102')
def visit_class(self, node):
self._check_redefinition('class', node)
@check_messages('E0100', 'E0101', 'E0102', 'E0106', 'E0108')
def visit_function(self, node):
if not redefined_by_decorator(node):
self._check_redefinition(node.is_method() and 'method' or 'function', node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(astng.Return,
skip_klass=(astng.Function, astng.Class))
if node.is_method() and node.name == '__init__':
if node.is_generator():
self.add_message('E0100', node=node)
else:
values = [r.value for r in returns]
if [v for v in values if not (v is None or
(isinstance(v, astng.Const) and v.value is None)
or (isinstance(v, astng.Name) and v.name == 'None'))]:
self.add_message('E0101', node=node)
elif node.is_generator():
# make sure we don't mix non-None returns and yields
for retnode in returns:
if isinstance(retnode.value, astng.Const) and \
retnode.value.value is not None:
self.add_message('E0106', node=node,
line=retnode.fromlineno)
args = set()
for name in node.argnames():
if name in args:
self.add_message('E0108', node=node, args=(name,))
else:
args.add(name)
@check_messages('E0104')
def visit_return(self, node):
if not isinstance(node.frame(), astng.Function):
self.add_message('E0104', node=node)
@check_messages('E0105')
def visit_yield(self, node):
if not isinstance(node.frame(), (astng.Function, astng.Lambda)):
self.add_message('E0105', node=node)
@check_messages('E0103')
def visit_continue(self, node):
self._check_in_loop(node, 'continue')
@check_messages('E0103')
def visit_break(self, node):
self._check_in_loop(node, 'break')
@check_messages('W0120')
def visit_for(self, node):
self._check_else_on_loop(node)
@check_messages('W0120')
def visit_while(self, node):
self._check_else_on_loop(node)
@check_messages('E0107')
def visit_unaryop(self, node):
"""check use of the non-existent ++ adn -- operator operator"""
if ((node.op in '+-') and
isinstance(node.operand, astng.UnaryOp) and
(node.operand.op == node.op)):
self.add_message('E0107', node=node, args=node.op*2)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message('W0120', node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astng.For, astng.While)):
break
_node = _node.parent
else:
self.add_message('E0103', node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
defined_self = node.parent.frame()[node.name]
if defined_self is not node and not are_exclusive(node, defined_self):
self.add_message('E0102', node=node,
args=(redeftype, defined_self.fromlineno))
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* modules / classes / functions / methods / arguments / variables name
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = IASTNGChecker
name = 'basic'
msgs = {
'W0101': ('Unreachable code',
'unreachable',
'Used when there is some code behind a "return" or "raise" \
statement, which will never be accessed.'),
'W0102': ('Dangerous default value %s as argument',
'dangerous-default-value',
'Used when a mutable value as list or dictionary is detected in \
a default value for an argument.'),
'W0104': ('Statement seems to have no effect',
'pointless-statement',
'Used when a statement doesn\'t have (or at least seems to) \
any effect.'),
'W0105': ('String statement has no effect',
'pointless-string-statement',
'Used when a string is used as a statement (which of course \
has no effect). This is a particular case of W0104 with its \
own message so you can easily disable it if you\'re using \
those strings as documentation, instead of comments.'),
'W0106': ('Expression "%s" is assigned to nothing',
'expression-not-assigned',
'Used when an expression that is not a function call is assigned\
to nothing. Probably something else was intended.'),
'W0108': ('Lambda may not be necessary',
'unnecessary-lambda',
'Used when the body of a lambda expression is a function call \
on the same argument list as the lambda itself; such lambda \
expressions are in all but a few cases replaceable with the \
function being called in the body of the lambda.'),
'W0109': ("Duplicate key %r in dictionary",
'duplicate-key',
"Used when a dictionary expression binds the same key multiple \
times."),
'W0122': ('Use of the exec statement',
'exec-statement',
'Used when you use the "exec" statement, to discourage its \
usage. That doesn\'t mean you can not use it !'),
'W0141': ('Used builtin function %r',
'bad-builtin',
'Used when a black listed builtin function is used (see the '
'bad-function option). Usual black listed functions are the ones '
'like map, or filter , where Python offers now some cleaner '
'alternative like list comprehension.'),
'W0142': ('Used * or ** magic',
'star-args',
'Used when a function or method is called using `*args` or '
'`**kwargs` to dispatch arguments. This doesn\'t improve '
'readability and should be used with care.'),
'W0150': ("%s statement in finally block may swallow exception",
'lost-exception',
"Used when a break or a return statement is found inside the \
finally clause of a try...finally block: the exceptions raised \
in the try clause will be silently swallowed instead of being \
re-raised."),
'W0199': ('Assert called on a 2-uple. Did you mean \'assert x,y\'?',
'assert-on-tuple',
'A call of assert on a tuple will always evaluate to true if '
'the tuple is not empty, and will always evaluate to false if '
'it is.'),
'C0121': ('Missing required attribute "%s"', # W0103
'missing-module-attribute',
'Used when an attribute required for modules is missing.'),
}
options = (('required-attributes',
{'default' : (), 'type' : 'csv',
'metavar' : '<attributes>',
'help' : 'Required attributes for module, separated by a '
'comma'}
),
('bad-functions',
{'default' : ('map', 'filter', 'apply', 'input'),
'type' :'csv', 'metavar' : '<builtin function names>',
'help' : 'List of builtins function names that should not be '
'used, separated by a comma'}
),
)
reports = ( ('RP0101', 'Statistics by type', report_by_type_stats), )
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics
"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0,
method=0, class_=0)
def visit_module(self, node):
"""check module name, docstring and required arguments
"""
self.stats['module'] += 1
for attr in self.config.required_attributes:
if attr not in node:
self.add_message('C0121', node=node, args=attr)
def visit_class(self, node):
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats['class'] += 1
@check_messages('W0104', 'W0105')
def visit_discard(self, node):
"""check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astng.Const) and isinstance(expr.value,
str):
# treat string statement in a separated message
self.add_message('W0105', node=node)
return
# ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield (which are wrapped by a discard node in _ast XXX)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else W0104
if (isinstance(expr, (astng.Yield, astng.CallFunc)) or
(isinstance(node.parent, astng.TryExcept) and
node.parent.body == [node])):
return
if any(expr.nodes_of_class(astng.CallFunc)):
self.add_message('W0106', node=node, args=expr.as_string())
else:
self.add_message('W0104', node=node)
@check_messages('W0108')
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious
"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astng.CallFunc):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
# XXX are lambda still different with astng >= 0.18 ?
# *args and **kwargs need to be treated specially, since they
# are structured differently between the lambda and the function
# call (in the lambda they appear in the args.args list and are
# indicated as * and ** by two bits in the lambda's flags, but
# in the function call they are omitted from the args list and
# are indicated by separate attributes on the function call node).
ordinary_args = list(node.args.args)
if node.args.kwarg:
if (not call.kwargs
or not isinstance(call.kwargs, astng.Name)
or node.args.kwarg != call.kwargs.name):
return
elif call.kwargs:
return
if node.args.vararg:
if (not call.starargs
or not isinstance(call.starargs, astng.Name)
or node.args.vararg != call.starargs.name):
return
elif call.starargs:
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(call.args):
return
for i in range(len(ordinary_args)):
if not isinstance(call.args[i], astng.Name):
return
if node.args.args[i].name != call.args[i].name:
return
self.add_message('W0108', line=node.fromlineno, node=node)
def visit_function(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats[node.is_method() and 'method' or 'function'] += 1
# check for dangerous default values as arguments
for default in node.args.defaults:
try:
value = next(default.infer())
except astng.InferenceError:
continue
if (isinstance(value, astng.Instance) and
value.qname() in ('__builtin__.set', '__builtin__.dict', '__builtin__.list')):
if value is default:
msg = default.as_string()
elif type(value) is astng.Instance:
msg = '%s (%s)' % (default.as_string(), value.qname())
else:
msg = '%s (%s)' % (default.as_string(), value.as_string())
self.add_message('W0102', node=node, args=(msg,))
@check_messages('W0101', 'W0150')
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'return', (astng.Function,))
@check_messages('W0101')
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@check_messages('W0101', 'W0150')
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'break', (astng.For, astng.While,))
@check_messages('W0101')
def visit_raise(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@check_messages('W0122')
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message('W0122', node=node)
@check_messages('W0141', 'W0142')
def visit_callfunc(self, node):
"""visit a CallFunc node -> check if this is not a blacklisted builtin
call and check for * or ** use
"""
if isinstance(node.func, astng.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or
name in node.root()):
if name in self.config.bad_functions:
self.add_message('W0141', node=node, args=name)
if node.starargs or node.kwargs:
scope = node.scope()
if isinstance(scope, astng.Function):
toprocess = [(n, vn) for (n, vn) in ((node.starargs, scope.args.vararg),
(node.kwargs, scope.args.kwarg)) if n]
if toprocess:
for cfnode, fargname in toprocess[:]:
if getattr(cfnode, 'name', None) == fargname:
toprocess.remove((cfnode, fargname))
if not toprocess:
return # W0142 can be skipped
self.add_message('W0142', node=node.func)
@check_messages('W0199')
def visit_assert(self, node):
"""check the use of an assert statement on a tuple."""
if node.fail is None and isinstance(node.test, astng.Tuple) and \
len(node.test.elts) == 2:
self.add_message('W0199', node=node)
@check_messages('W0109')
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astng.Const):
key = k.value
if key in keys:
self.add_message('W0109', node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message('W0101', node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not a in try...finally bloc
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, 'finalbody') and _node in _parent.finalbody:
self.add_message('W0150', node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
class NameChecker(_BasicChecker):
msgs = {
'C0102': ('Black listed name "%s"',
'blacklisted-name',
'Used when the name is listed in the black list (unauthorized \
names).'),
'C0103': ('Invalid name "%s" for type %s (should match %s)',
'invalid-name',
'Used when the name doesn\'t match the regular expression \
associated to its type (constant, variable, class...).'),
}
options = (('module-rgx',
{'default' : MOD_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'module names'}
),
('const-rgx',
{'default' : CONST_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'module level names'}
),
('class-rgx',
{'default' : CLASS_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'class names'}
),
('function-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'function names'}
),
('method-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'method names'}
),
('attr-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'instance attribute names'}
),
('argument-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'argument names'}),
('variable-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'variable names'}
),
('inlinevar-rgx',
{'default' : COMP_VAR_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'list comprehension / generator expression variable \
names'}
),
# XXX use set
('good-names',
{'default' : ('i', 'j', 'k', 'ex', 'Run', '_'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Good variable names which should always be accepted,'
' separated by a comma'}
),
('bad-names',
{'default' : ('foo', 'bar', 'baz', 'toto', 'tutu', 'tata'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Bad variable names which should always be refused, '
'separated by a comma'}
),
)
def open(self):
self.stats = self.linter.add_stats(badname_module=0,
badname_class=0, badname_function=0,
badname_method=0, badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0)
@check_messages('C0102', 'C0103')
def visit_module(self, node):
self._check_name('module', node.name.split('.')[-1], node)
@check_messages('C0102', 'C0103')
def visit_class(self, node):
self._check_name('class', node.name, node)
for attr, anodes in node.instance_attrs.items():
self._check_name('attr', attr, anodes[0])
@check_messages('C0102', 'C0103')
def visit_function(self, node):
self._check_name(node.is_method() and 'method' or 'function',
node.name, node)
# check arguments name
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
@check_messages('C0102', 'C0103')
def visit_assname(self, node):
"""check module level assigned names"""
frame = node.frame()
ass_type = node.ass_type()
if isinstance(ass_type, (astng.Comprehension, astng.Comprehension)):
self._check_name('inlinevar', node.name, node)
elif isinstance(frame, astng.Module):
if isinstance(ass_type, astng.Assign) and not in_loop(ass_type):
self._check_name('const', node.name, node)
elif isinstance(ass_type, astng.ExceptHandler):
self._check_name('variable', node.name, node)
elif isinstance(frame, astng.Function):
# global introduced variable aren't in the function locals
if node.name in frame:
self._check_name('variable', node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astng.AssName):
self._check_name('argument', arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _check_name(self, node_type, name, node):
"""check for a name using the type's regexp"""
if is_inside_except(node):
clobbering, _ = clobber_in_except(node)
if clobbering:
return
if name in self.config.good_names:
return
if name in self.config.bad_names:
self.stats['badname_' + node_type] += 1
self.add_message('C0102', node=node, args=name)
return
regexp = getattr(self.config, node_type + '_rgx')
if regexp.match(name) is None:
type_label = {'inlinedvar': 'inlined variable',
'const': 'constant',
'attr': 'attribute',
}.get(node_type, node_type)
self.add_message('C0103', node=node, args=(name, type_label, regexp.pattern))
self.stats['badname_' + node_type] += 1
class DocStringChecker(_BasicChecker):
msgs = {
'C0111': ('Missing docstring', # W0131
'missing-docstring',
'Used when a module, function, class or method has no docstring.\
Some special methods like __init__ doesn\'t necessary require a \
docstring.'),
'C0112': ('Empty docstring', # W0132
'empty-docstring',
'Used when a module, function, class or method has an empty \
docstring (it would be too easy ;).'),
}
options = (('no-docstring-rgx',
{'default' : NO_REQUIRED_DOC_RGX,
'type' : 'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match '
'functions or classes name which do not require a '
'docstring'}
),
)
def open(self):
self.stats = self.linter.add_stats(undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0)
def visit_module(self, node):
self._check_docstring('module', node)
def visit_class(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring('class', node)
def visit_function(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = node.is_method() and 'method' or 'function'
if isinstance(node.parent.frame(), astng.Class):
overridden = False
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and \
isinstance(ancestor[node.name], astng.Function):
overridden = True
break
if not overridden:
self._check_docstring(ftype, node)
else:
self._check_docstring(ftype, node)
def _check_docstring(self, node_type, node):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
self.stats['undocumented_'+node_type] += 1
self.add_message('C0111', node=node)
elif not docstring.strip():
self.stats['undocumented_'+node_type] += 1
self.add_message('C0112', node=node)
class PassChecker(_BasicChecker):
"""check is the pass statement is really necessary"""
msgs = {'W0107': ('Unnecessary pass statement',
'unnecessary-pass',
'Used when a "pass" statement that can be avoided is '
'encountered.'),
}
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1:
self.add_message('W0107', node=node)
class LambdaForComprehensionChecker(_BasicChecker):
"""check for using a lambda where a comprehension would do.
See <http://www.artima.com/weblogs/viewpost.jsp?thread=98196>
where GvR says comprehensions would be clearer.
"""
msgs = {'W0110': ('map/filter on lambda could be replaced by comprehension',
'deprecated-lambda',
'Used when a lambda is the first argument to "map" or '
'"filter". It could be clearer as a list '
'comprehension or generator expression.'),
}
@check_messages('W0110')
def visit_callfunc(self, node):
"""visit a CallFunc node, check if map or filter are called with a
lambda
"""
if not node.args:
return
if not isinstance(node.args[0], astng.Lambda):
return
infered = safe_infer(node.func)
if (infered
and infered.parent.name == '__builtin__'
and infered.name in ['map', 'filter']):
self.add_message('W0110', node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(LambdaForComprehensionChecker(linter))
|
tlksio/tlksio
|
env/lib/python3.4/site-packages/pylint/checkers/base.py
|
Python
|
mit
| 37,578
|
[
"VisIt"
] |
d35ca1c64609cb5653862a144df93eecb3fc1b3025dbe98e1a270f3d71d56d7e
|
import tensorflow as tf
from tensorflow.contrib import rnn as rnn_cell
import numpy as np
import io
from util.tf_utils import tf_confusion_metrics
import inspect
import util.eval as eval
class Model:
"""
-- Copied from RNN TODO update to FC --
Tensorflow graph using ful
Tensorflow Graph using Fully connected layers and fully connected softmax layer for field identification
with multispectral/temporal data acquired from satellite imagery
Params
tf placeholders:
X Input data cube of dimensions [batch_size x max_observations x n_input]
y Target data Tensor of dimensions [batch_size x max_observations]
seq_lenghts Number of observations for each batch if observation < max_obs data is
padded with zeros [batch_size]
input parameters:
n_input length of observed pixel values. [n_pixels * n_bands + n_time]
n_pixels number of observed pixels (default 3*3)
n_bands number of observed bands (default 6)
n_time number of time parameters (default 1 e.g. day of year)
n_classes number of target classes
batch_size number of batches
max_obs maximum number of observations if seq_lengs < max_obs matrices will be padded
controls number of iterations in rnn layers (aka sequence length)
network specific parameters
n_layers number of rnn layers (aka depth)
learning_rate
dropout_keep_prob
logdir
Marc.Russwurm@tum.de
"""
def __init__(self, n_input=9 * 6 + 1, n_classes=20,
n_layers=2, dropout_keep_prob=.5, adam_lr=1e-3, adam_b1=0.9, adam_b2=0.999, adam_eps=1e-8,
fc_w_stddev=0.1, fc_b_offset=0.1, n_cell_per_input=1, activation_func=None, gpu=None):
# save input arguments
self.args = inspect.getargvalues(inspect.currentframe()).locals
del self.args["self"] # delete self
self.n_classes = n_classes
if activation_func is None:
activation_func = tf.nn.sigmoid
# alternative tf.nn.relu
# take
self.n_neurons = n_neurons = n_cell_per_input * n_input
with tf.device(None):
with tf.variable_scope('input'):
# block of [batch_size x max_obs x n_input]
self.X = X = tf.placeholder(tf.float32, [None, n_input], name="X")
self.y = y = tf.placeholder(tf.float32, [None, n_classes], name="y")
self.batch_size = batch_size = tf.placeholder(tf.int32, name="batch_size")
with tf.name_scope('FC'):
# first fc layer: expand neuron dimensions from n_input to n_neurons
# list of fully connected weights matrices
fc_in = X
# first fc layer X:(batchsize x n_input) -> fc_in (batchsize x n_neurons)
fc_W0 = tf.Variable(tf.truncated_normal([n_input, n_neurons], stddev=fc_w_stddev), name="W0")
fc_b0 = tf.Variable(tf.constant(fc_b_offset, shape=[n_neurons]), name="b0")
h = activation_func(tf.matmul(fc_in, fc_W0) + fc_b0)
h = tf.nn.dropout(h, dropout_keep_prob)
# for all other fc layers
fc_W = []
fc_b = []
for i in range(1, n_layers):
W = tf.Variable(tf.truncated_normal([n_neurons, n_neurons], stddev=fc_w_stddev), name="W" + str(i))
b = tf.Variable(tf.constant(fc_b_offset, shape=[n_neurons]), name="b" + str(i))
h = tf.matmul(h, W) + b
# apply activation function
h = activation_func(h)
h = tf.nn.dropout(h, dropout_keep_prob)
fc_out = h
with tf.name_scope('fc_softmax'):
# reshape outputs to: block of [batch_size * max_obs x rnn_size]
softmax_in = tf.reshape(fc_out, [-1, n_neurons])
softmax_w = tf.Variable(tf.truncated_normal([n_neurons, n_classes], stddev=fc_w_stddev), name="W_softmax")
softmax_b = tf.Variable(tf.constant(fc_b_offset, shape=[n_classes]), name="b_softmax")
self.logits = logits = tf.matmul(softmax_in, softmax_w) + softmax_b
with tf.name_scope('train'):
# Define loss and optimizer
# create mask for cross entropies incases where seq_lengths < max_max_obs
# masking from http://stackoverflow.com/questions/34128104/tensorflow-creating-mask-of-varied-lengths
""" no masking needed
with tf.name_scope('mask'):
lengths_transposed = tf.expand_dims(seq_lengths, 1)
range = tf.range(0, max_obs, 1)
range_row = tf.expand_dims(range, 0)
self.mask = mask = tf.less(range_row, lengths_transposed)
"""
self.cross_entropy_matrix = cross_entropy_matrix = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)
# normalize with total number of observations
self.cross_entropy = cross_entropy = tf.reduce_sum(cross_entropy_matrix) / tf.cast(batch_size,"float32")
tf.summary.scalar('cross_entropy', cross_entropy)
# grad_train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
self.train_op = tf.train.AdamOptimizer(learning_rate=adam_lr, beta1=adam_b1, beta2=adam_b2,
epsilon=adam_eps).minimize(cross_entropy)
# tf.summary.scalar('learning_rate', learning_rate)
with tf.name_scope('evaluation'):
self.probabilities = probs = tf.nn.softmax(logits, name="full_probability_matrix")
# Evaluate model
predicted = tf.argmax(logits, 1)
targets = tf.argmax(y, 1)
correct_pred = tf.equal(predicted, targets)
self.accuracy_op = accuracy = tf.reduce_sum(tf.cast(correct_pred, tf.float32)) / tf.cast(batch_size, tf.float32)
tf.summary.scalar('accuracy', accuracy)
self.probs_list = probs_list = tf.reshape(probs, (-1, n_classes))
predicted_list = tf.reshape(predicted, [-1])
targets_list = tf.reshape(targets, [-1])
one_hot_targets = tf.one_hot(targets_list, n_classes)
scores = tf.boolean_mask(probs_list, tf.cast(one_hot_targets, tf.bool))
self.scores = probs_list
self.targets = tf.reshape(y, [-1,n_classes])
# drop all values which are > seqlength
#self.scores = tf.boolean_mask(scores, mask_list)
#self.targets = tf.boolean_mask(targets_list, mask_list)
#self.obs = tf.boolean_mask(obs_list, mask_list)
"""
self.confusion_matrix = confusion_matrix = tf.contrib.metrics.confusion_matrix(
tf.boolean_mask(targets_list, mask_list),
tf.boolean_mask(predicted_list, mask_list),
num_classes=n_classes)
confusion_matrix = tf.cast(confusion_matrix, tf.uint8)
confusion_matrix = tf.expand_dims(confusion_matrix, 2)
confusion_matrix = tf.expand_dims(confusion_matrix, 0)
tf.summary.image("confusion matrix", confusion_matrix, max_outputs=3)
logits_ = tf.cast(logits, tf.uint8)
logits_ = tf.expand_dims(logits_, 3)
tf.summary.image("logits", logits_, max_outputs=1)
probs_ = tf.cast(probs*255, tf.uint8)
probs_ = tf.expand_dims(probs_, 3)
tf.summary.image("probabilities", probs_, max_outputs=1)
targets_ = tf.cast(y_, tf.uint8)
targets_ = tf.expand_dims(targets_, 3)
tf.summary.image("targets", targets_, max_outputs=1)
# tf.add_to_collection(tf.GraphKeys.SUMMARIES, cm_im_summary)
"""
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
self.merge_summary_op = tf.summary.merge_all()
self.init_op = tf.global_variables_initializer()
def main():
# model = Model()
test()
def unroll(x, y, seq_lengths):
"""
Reshapes and masks input and output data from
X(batchsize x n_max_obs x n_input) -> X_ (new_batchsize x n_input)
y(batchsize x n_max_obs x n_classes) -> X_ (new_batchsize x n_classes)
new_batch_size is variable representing batchsize * n_max_obs - invalid_observations
with invalid observations being observations > seq_length -> means
if at one point only 24 of maximum 26 images are available X is usually padded with zeros
this masking removes the last two observations
:return:
"""
# create mask for valid times of acquisition
batch_size, max_seqlengths, n_input = x.shape
np.arange(0, max_seqlengths) * np.ones((batch_size, max_seqlengths))
ones = np.ones([batch_size, max_seqlengths])
mask = np.arange(0, max_seqlengths) * ones < (seq_lengths * ones.T).T
new_x = x[mask]
new_y = y[mask]
return new_x, new_y
def test():
import os
import pickle
n_input = 9 * 6 + 1
n_classes = 20
batch_size = 50
max_obs = 26
n_classes = 38
confusion_matrix = np.zeros((n_classes, n_classes), dtype=int)
model = Model(n_input=n_input, n_classes=n_classes, n_layers=2, batch_size=batch_size,
adam_lr=1e-3, dropout_keep_prob=0.5, n_cell_per_input=4)
savedir = "tmp"
if not os.path.exists(savedir):
os.makedirs(savedir)
# dump pickle args for loading
#pickle.dump(model.args, open(os.path.join(savedir, "args.pkl"), "wb"))
# dump human readable args
#open(os.path.join(savedir, "args.txt"), "w").write(str(model.args))
init_from = None
if init_from is not None:
args = pickle.load(open(os.path.join(init_from, "args.pkl"), "rb"))
X = np.random.rand(batch_size, max_obs, n_input)
y = np.random.rand(batch_size, max_obs, n_classes)
seq_length = np.random.randint(16, max_obs, batch_size)
with tf.Session() as sess:
sess.run([model.init_op])
feed = {model.X: X, model.y: y}
# training step
for i in range(1, 30):
train_op, cross_entropy = \
sess.run([model.train_op,
model.cross_entropy], feed_dict=feed)
print("done")
if __name__ == '__main__':
main()
|
TUM-LMF/fieldRNN
|
cnn_model.py
|
Python
|
mit
| 10,879
|
[
"NEURON"
] |
a4617f51a1d26ab8d7909342c73b7d881cc3c44783954161420aea4f8b1a51f9
|
"""
/******************************************************************************
This source file is part of the Avogadro project.
Copyright 2013 Kitware, Inc.
This source code is released under the New BSD License, (the "License").
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/
"""
import argparse
import json
import sys
# Some globals:
debug = False
def getOptions():
userOptions = {}
userOptions['Input File'] = {}
userOptions['Input File']['type'] = 'string'
userOptions['Input File']['default'] = ''
userOptions['Calculation'] = {}
userOptions['Calculation']['type'] = 'string'
userOptions['Calculation']['default'] = 'mg-auto'
opts = {'userOptions' : userOptions}
opts['allowCustomBaseName'] = True
return opts
def generateInputFile(opts, settings):
# Extract options:
input_file = opts['Input File']
calculation = opts['Calculation']
output = ''
output += 'read\n'
output += ' mol pqr %s\n'%input_file
output += 'end\n\n'
output += 'elec\n'
output += ' %s\n'%calculation
output += ' dime 97 97 97\n'
output += ' chgm spl0\n'
output += ' fglen 150 115 160\n'
output += ' cglen 156 121 162\n'
output += ' cgcent mol 1\n'
output += ' fgcent mol 1\n'
output += ' mol 1\n'
output += ' npbe\n'
output += ' bcfl sdh\n'
output += ' ion 1 0.150 2.0\n'
output += ' ion -1 0.150 2.0\n'
output += ' pdie 2.0\n'
output += ' sdie 78.54\n'
output += ' srfm mol\n'
output += ' srad 1.4\n'
output += ' sdens 10.0\n'
output += ' temp 298.15\n'
output += ' calcenergy total\n'
output += ' calcforce no\n'
output += ' write pot dx pot\n' # write potential output
output += 'end\n\n'
output += 'quit\n'
return output
def generateInput():
# Read options from stdin
stdinStr = sys.stdin.read()
# Parse the JSON strings
opts = json.loads(stdinStr)
# Generate the input file
inp = generateInputFile(opts['options'], opts['settings'])
# Prepare the result
result = {}
# Input file text -- will appear in the same order in the GUI as they are
# listed in the array:
files = []
files.append({'filename': 'apbs.in', 'contents': inp})
if debug:
files.append({'filename': 'debug_info', 'contents': stdinStr})
result['files'] = files
# Specify the main input file. This will be used by MoleQueue to determine
# the value of the $$inputFileName$$ and $$inputFileBaseName$$ keywords.
result['mainFile'] = 'apbs.in'
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser('Generate an APBS input file.')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--print-options', action='store_true')
parser.add_argument('--generate-input', action='store_true')
parser.add_argument('--display-name', action='store_true')
args = vars(parser.parse_args())
debug = args['debug']
if args['display_name']:
print("APBS")
if args['print_options']:
print(json.dumps(getOptions()))
elif args['generate_input']:
print(json.dumps(generateInput()))
|
wadejong/avogadrolibs
|
avogadro/qtplugins/quantuminput/inputGenerators/apbs.py
|
Python
|
bsd-3-clause
| 3,447
|
[
"Avogadro"
] |
3f32011dedcbf1c0701aa03c633a284c7a11ca2ea3c73a3438cace24e5d40e8e
|
# coding: utf-8
from __future__ import unicode_literals
import numpy as np
import unittest
import os
from pymatgen.analysis.structure_analyzer import VoronoiCoordFinder, \
solid_angle, contains_peroxide, RelaxationAnalyzer, VoronoiConnectivity, \
oxide_type
from pymatgen.io.vasp.inputs import Poscar
from pymatgen import Element, Structure, Lattice
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class VoronoiCoordFinderTest(PymatgenTest):
def setUp(self):
s = self.get_structure('LiFePO4')
self.finder = VoronoiCoordFinder(s, [Element("O")])
def test_get_voronoi_polyhedra(self):
self.assertEqual(len(self.finder.get_voronoi_polyhedra(0).items()), 8,
"Incorrect number of results returned for " +
"get_voronoi_polyhedra")
def test_get_coordination_number(self):
self.assertAlmostEqual(self.finder.get_coordination_number(0),
5.809265748999465, 7)
def test_get_coordinated_sites(self):
self.assertEqual(len(self.finder.get_coordinated_sites(0)), 8)
class RelaxationAnalyzerTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.Li2O'),
check_for_POTCAR=False)
s1 = p.structure
p = Poscar.from_file(os.path.join(test_dir, 'CONTCAR.Li2O'),
check_for_POTCAR=False)
s2 = p.structure
self.analyzer = RelaxationAnalyzer(s1, s2)
def test_vol_and_para_changes(self):
for k, v in self.analyzer.get_percentage_lattice_parameter_changes().items():
self.assertAlmostEqual(-0.0092040921155279731, v)
latt_change = v
vol_change = self.analyzer.get_percentage_volume_change()
self.assertAlmostEqual(-0.0273589101391,
vol_change)
#This is a simple cubic cell, so the latt and vol change are simply
#Related. So let's test that.
self.assertAlmostEqual((1 + latt_change) ** 3 - 1, vol_change)
def test_get_percentage_bond_dist_changes(self):
for k, v in self.analyzer.get_percentage_bond_dist_changes().items():
for k2, v2 in v.items():
self.assertAlmostEqual(-0.009204092115527862, v2)
class VoronoiConnectivityTest(PymatgenTest):
def test_connectivity_array(self):
vc = VoronoiConnectivity(self.get_structure("LiFePO4"))
ca = vc.connectivity_array
np.set_printoptions(threshold = np.NAN, linewidth = np.NAN, suppress = np.NAN)
expected = np.array([0, 1.96338392, 0, 0.04594495])
self.assertTrue(np.allclose(ca[15, :4, ca.shape[2] // 2], expected))
expected = np.array([0, 0, 0])
self.assertTrue(np.allclose(ca[1, -3:, 51], expected))
site = vc.get_sitej(27, 51)
self.assertEqual(site.specie, Element('O'))
expected = np.array([-0.29158, 0.74889, 0.95684])
self.assertTrue(np.allclose(site.frac_coords, expected))
class MiscFunctionTest(PymatgenTest):
def test_solid_angle(self):
center = [2.294508207929496, 4.4078057081404, 2.299997773791287]
coords = [[1.627286218099362, 3.081185538926995, 3.278749383217061],
[1.776793751092763, 2.93741167455471, 3.058701096568852],
[3.318412187495734, 2.997331084033472, 2.022167590167672],
[3.874524708023352, 4.425301459451914, 2.771990305592935],
[2.055778446743566, 4.437449313863041, 4.061046832034642]]
self.assertAlmostEqual(solid_angle(center, coords), 1.83570965938, 7,
"Wrong result returned by solid_angle")
def test_contains_peroxide(self):
for f in ['LiFePO4', 'NaFePO4', 'Li3V2(PO4)3', 'Li2O']:
self.assertFalse(contains_peroxide(self.get_structure(f)))
for f in ['Li2O2', "K2O2"]:
self.assertTrue(contains_peroxide(self.get_structure(f)))
def test_oxide_type(self):
el_li = Element("Li")
el_o = Element("O")
latt = Lattice([[3.985034, 0.0, 0.0],
[0.0, 4.881506, 0.0],
[0.0, 0.0, 2.959824]])
elts = [el_li, el_li, el_o, el_o, el_o, el_o]
coords = list()
coords.append([0.500000, 0.500000, 0.500000])
coords.append([0.0, 0.0, 0.0])
coords.append([0.632568, 0.085090, 0.500000])
coords.append([0.367432, 0.914910, 0.500000])
coords.append([0.132568, 0.414910, 0.000000])
coords.append([0.867432, 0.585090, 0.000000])
struct = Structure(latt, elts, coords)
self.assertEqual(oxide_type(struct, 1.1), "superoxide")
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_o, el_o, el_o]
latt = Lattice.from_parameters(3.999911, 3.999911, 3.999911, 133.847504, 102.228244, 95.477342)
coords = [[0.513004, 0.513004, 1.000000],
[0.017616, 0.017616, 0.000000],
[0.649993, 0.874790, 0.775203],
[0.099587, 0.874790, 0.224797]]
struct = Structure(latt, elts, coords)
self.assertEqual(oxide_type(struct, 1.1), "ozonide")
latt = Lattice.from_parameters(3.159597, 3.159572, 7.685205, 89.999884, 89.999674, 60.000510)
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_li, el_li, el_o, el_o, el_o, el_o]
coords = [[0.666656, 0.666705, 0.750001],
[0.333342, 0.333378, 0.250001],
[0.000001, 0.000041, 0.500001],
[0.000001, 0.000021, 0.000001],
[0.333347, 0.333332, 0.649191],
[0.333322, 0.333353, 0.850803],
[0.666666, 0.666686, 0.350813],
[0.666665, 0.666684, 0.149189]]
struct = Structure(latt, elts, coords)
self.assertEqual(oxide_type(struct, 1.1), "peroxide")
el_li = Element("Li")
el_o = Element("O")
el_h = Element("H")
latt = Lattice.from_parameters(3.565276, 3.565276, 4.384277, 90.000000, 90.000000, 90.000000)
elts = [el_h, el_h, el_li, el_li, el_o, el_o]
coords = [[0.000000, 0.500000, 0.413969],
[0.500000, 0.000000, 0.586031],
[0.000000, 0.000000, 0.000000],
[0.500000, 0.500000, 0.000000],
[0.000000, 0.500000, 0.192672],
[0.500000, 0.000000, 0.807328]]
struct = Structure(latt, elts, coords)
self.assertEqual(oxide_type(struct, 1.1), "hydroxide")
el_li = Element("Li")
el_n = Element("N")
el_h = Element("H")
latt = Lattice.from_parameters(3.565276, 3.565276, 4.384277, 90.000000, 90.000000, 90.000000)
elts = [el_h, el_h, el_li, el_li, el_n, el_n]
coords = [[0.000000, 0.500000, 0.413969],
[0.500000, 0.000000, 0.586031],
[0.000000, 0.000000, 0.000000],
[0.500000, 0.500000, 0.000000],
[0.000000, 0.500000, 0.192672],
[0.500000, 0.000000, 0.807328]]
struct = Structure(latt, elts, coords)
self.assertEqual(oxide_type(struct, 1.1), "None")
el_o = Element("O")
latt = Lattice.from_parameters(4.389828, 5.369789, 5.369789, 70.786622, 69.244828, 69.244828)
elts = [el_o, el_o, el_o, el_o, el_o, el_o, el_o, el_o]
coords = [[0.844609, 0.273459, 0.786089],
[0.155391, 0.213911, 0.726541],
[0.155391, 0.726541, 0.213911],
[0.844609, 0.786089, 0.273459],
[0.821680, 0.207748, 0.207748],
[0.178320, 0.792252, 0.792252],
[0.132641, 0.148222, 0.148222],
[0.867359, 0.851778, 0.851778]]
struct = Structure(latt, elts, coords)
self.assertEqual(oxide_type(struct, 1.1), "None")
if __name__ == '__main__':
unittest.main()
|
rousseab/pymatgen
|
pymatgen/analysis/tests/test_structure_analyzer.py
|
Python
|
mit
| 8,139
|
[
"VASP",
"pymatgen"
] |
d9d304ad1e3881d340a74a260a07a6414b75211694120d41760192e96b52ed92
|
""" VOMS2CSSyncronizer is a helper class containing the logic for synchronization
of the VOMS user data with the DIRAC Registry
"""
__RCSID__ = "$Id$"
from collections import defaultdict
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.Core.Security.VOMSService import VOMSService
from DIRAC.Core.Utilities.List import fromChar
from DIRAC.Core.Utilities.PrettyPrint import printTable
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOOption, getVOMSRoleGroupMapping, \
getUsersInVO, getAllUsers, getUserOption
def _getUserNameFromMail(mail):
""" Utility to construct a reasonable user name from the user mail address
:param str mail: e-mail address
:return str: user name
"""
mailName = mail.split('@')[0].lower()
if '.' in mailName:
# Most likely the mail contains the full user name
names = mailName.split('.')
name = names[0][0] + names[-1].lower()
return name
return mailName
def _getUserNameFromDN(dn, vo):
""" Utility to construct a reasonable user name from the user DN
:param str dn: user DN
:return str: user name
"""
shortVO = vo
if '.' in vo:
vos = vo.split('.')
if vos[0] == 'vo':
vos = vos[1:]
if len(vos[-1]) == 2 or vos[-1] == 'org':
vos = vos[:1]
shortVO = '.'.join(vos)
# Weird case of just a name as DN !
if '/' not in dn and 'CN=' not in dn:
dn = 'CN=' + dn
entries = dn.split('/')
entries.reverse()
for entry in entries:
if entry:
# Weird case of no field name !
if '=' not in entry:
key, value = "CN", entry
else:
key, value = entry.split('=')
if key.upper() == 'CN':
ind = value.find("(")
# Strip of possible words in parenthesis in the name
if ind != -1:
value = value[:ind]
names = value.split()
if len(names) == 1:
nname = names[0].lower()
if '.' in nname:
names = nname.split('.')
nname = (names[0][0] + names[-1]).lower()
if '-' in nname:
names = nname.split('-')
nname = (names[0][0] + names[-1]).lower()
return nname
else:
robot = False
if names[0].lower().startswith("robot"):
names.pop(0)
robot = True
for name in list(names):
if name[0].isdigit() or "@" in name:
names.pop(names.index(name))
if robot:
nname = "robot-%s-%s" % (names[-1].lower(), shortVO)
else:
nname = (names[0][0] + names[-1]).lower()
if '.' in nname:
names = nname.split('.')
nname = (names[0][0] + names[-1]).lower()
return nname
def _getUserNameFromSurname(name, surname):
""" Construct a reasonable userName from the user name and surname
:param str name: user name
:param str surname: user surname
:return str: constructed user name
"""
names = name.split()
initials = ""
for nn in names:
initials += nn[0]
surnames = surname.split()
result = initials + surnames[-1]
if len(result) >= 12:
return result[:11]
result = result.lower()
return result
class VOMS2CSSynchronizer(object):
def __init__(self, vo, autoModifyUsers=True, autoAddUsers=True,
autoDeleteUsers=False, autoLiftSuspendedStatus=False):
""" VOMS2CSSynchronizer class constructor
:param str vo: VO to be synced
:param boolean autoModifyUsers: flag to automatically modify user data in CS
:param autoAddUsers: flag to automatically add new users to CS
:param autoDeleteUsers: flag to automatically delete users from CS if no more in VOMS
:param autoLiftSuspendedStatus: flag to automatically remove Suspended status in CS
:return: None
"""
self.log = gLogger.getSubLogger("VOMS2CSSynchronizer")
self.csapi = CSAPI()
self.vo = vo
self.vomsVOName = getVOOption(vo, "VOMSName", "")
if not self.vomsVOName:
raise Exception("VOMS name not defined for VO %s" % vo)
self.adminMsgs = {'Errors': [], 'Info': []}
self.vomsUserDict = {}
self.autoModifyUsers = autoModifyUsers
self.autoAddUsers = autoAddUsers
self.autoDeleteUsers = autoDeleteUsers
self.autoLiftSuspendedStatus = autoLiftSuspendedStatus
self.voChanged = False
def syncCSWithVOMS(self):
""" Performs the synchronization of the DIRAC registry with the VOMS data. The resulting
CSAPI object containing modifications is returned as part of the output dictionary.
Those changes can be applied by the caller depending on the mode (dry or a real run)
:return: S_OK with a dictionary containing the results of the synchronization operation
"""
resultDict = defaultdict(list)
# Get DIRAC group vs VOMS Role Mappings
result = getVOMSRoleGroupMapping(self.vo)
if not result['OK']:
return result
vomsDIRACMapping = result['Value']['VOMSDIRAC']
diracVOMSMapping = result['Value']['DIRACVOMS']
noVOMSGroups = result['Value']['NoVOMS']
noSyncVOMSGroups = result['Value']['NoSyncVOMS']
vomsSrv = VOMSService(self.vo)
# Get VOMS users
result = vomsSrv.getUsers()
if not result['OK']:
self.log.error('Could not retrieve user information from VOMS', result['Message'])
return result
self.vomsUserDict = result['Value']
message = "There are %s user entries in VOMS for VO %s" % (len(self.vomsUserDict), self.vomsVOName)
self.adminMsgs['Info'].append(message)
self.log.info('VOMS user entries', message)
self.log.debug(self.vomsUserDict)
# Get DIRAC users
result = self.getVOUserData(self.vo)
if not result['OK']:
return result
diracUserDict = result['Value']
self.adminMsgs['Info'].append("There are %s registered users in DIRAC for VO %s" % (len(diracUserDict), self.vo))
self.log.info("Users already registered",
": there are %s registered users in DIRAC VO %s" % (len(diracUserDict), self.vo))
# Find new and obsoleted user DNs
existingDNs = []
obsoletedDNs = []
newDNs = []
for user in diracUserDict:
dn = diracUserDict[user]['DN']
# We can have users with more than one DN registered
dnList = fromChar(dn)
existingDNs.extend(dnList)
for dn in dnList:
if dn not in self.vomsUserDict:
obsoletedDNs.append(dn)
for dn in self.vomsUserDict:
if dn not in existingDNs:
newDNs.append(dn)
allDiracUsers = getAllUsers()
nonVOUserDict = {}
nonVOUsers = list(set(allDiracUsers) - set(diracUserDict))
if nonVOUsers:
result = self.csapi.describeUsers(nonVOUsers)
if not result['OK']:
self.log.error('Could not retrieve CS User description')
return result
nonVOUserDict = result['Value']
# Process users
defaultVOGroup = getVOOption(self.vo, "DefaultGroup", "%s_user" % self.vo)
# If a user is (previously put by hand) in a "QuarantineGroup",
# then the default group will be ignored.
# So, this option is only considered for the case of existing users.
quarantineVOGroup = getVOOption(self.vo, "QuarantineGroup")
newAddedUserDict = {}
for dn in self.vomsUserDict:
newDNForExistingUser = ''
diracName = ''
if dn in existingDNs:
for user in diracUserDict:
if dn == diracUserDict[user]['DN']:
diracName = user
if dn in newDNs:
# Find if the DN is already registered in the DIRAC CS
for user in nonVOUserDict:
if dn == nonVOUserDict[user]['DN']:
diracName = user
# Check the nickName in the same VO to see if the user is already registered
# with another DN
nickName = self.vomsUserDict[dn].get('nickname')
if nickName in diracUserDict or nickName in newAddedUserDict:
diracName = nickName
# This is a flag for adding the new DN to an already existing user
newDNForExistingUser = dn
# We have a real new user
if not diracName:
if nickName:
newDiracName = nickName
else:
newDiracName = self.getUserName(dn)
# Do not consider users with Suspended status in VOMS
if self.vomsUserDict[dn]['suspended'] or self.vomsUserDict[dn]['certSuspended']:
resultDict["SuspendedUsers"].append(newDiracName)
continue
# If the chosen user name exists already, append a distinguishing suffix
ind = 1
trialName = newDiracName
while newDiracName in allDiracUsers:
# We have a user with the same name but with a different DN
newDiracName = "%s_%d" % (trialName, ind)
ind += 1
# We now have everything to add the new user
userDict = {"DN": dn, "CA": self.vomsUserDict[dn]['CA'], "Email": self.vomsUserDict[dn]['mail']}
groupsWithRole = []
for role in self.vomsUserDict[dn]['Roles']:
groupList = vomsDIRACMapping.get(role, [])
for group in groupList:
if group not in noSyncVOMSGroups:
groupsWithRole.append(group)
userDict['Groups'] = list(set(groupsWithRole + [defaultVOGroup]))
message = "\n Added new user %s:\n" % newDiracName
for key in userDict:
message += " %s: %s\n" % (key, str(userDict[key]))
self.adminMsgs['Info'].append(message)
self.voChanged = True
if self.autoAddUsers:
self.log.info("Adding new user %s: %s" % (newDiracName, str(userDict)))
result = self.csapi.modifyUser(newDiracName, userDict, createIfNonExistant=True)
if not result['OK']:
self.log.warn('Failed adding new user %s' % newDiracName)
resultDict['NewUsers'].append(newDiracName)
newAddedUserDict[newDiracName] = userDict
continue
# We have an already existing user
modified = False
suspendedInVOMS = self.vomsUserDict[dn]['suspended'] or self.vomsUserDict[dn]['certSuspended']
suspendedVOList = getUserOption(diracName, 'Suspended', [])
userDict = {"DN": dn,
"CA": self.vomsUserDict[dn]['CA'],
"Email": self.vomsUserDict[dn].get('mail', self.vomsUserDict[dn].get('emailAddress'))}
# Set Suspended status for the user for this particular VO
if suspendedInVOMS and self.vo not in suspendedVOList:
suspendedVOList.append(self.vo)
userDict['Suspended'] = ','.join(suspendedVOList)
modified = True
# Remove the lifted Suspended status
if not suspendedInVOMS and self.vo in suspendedVOList and self.autoLiftSuspendedStatus:
newList = []
for vo in suspendedVOList:
if vo != self.vo:
newList.append(vo)
if not newList:
newList = ["None"]
userDict['Suspended'] = ','.join(newList)
modified = True
if newDNForExistingUser:
userDict['DN'] = ','.join([dn, diracUserDict.get(diracName, newAddedUserDict.get(diracName))['DN']])
modified = True
existingGroups = diracUserDict.get(diracName, {}).get('Groups', [])
nonVOGroups = list(set(existingGroups) - set(diracVOMSMapping))
groupsWithRole = []
for role in self.vomsUserDict[dn]['Roles']:
groupList = vomsDIRACMapping.get(role, [])
for group in groupList:
if group not in noSyncVOMSGroups:
groupsWithRole.append(group)
keepGroups = nonVOGroups + groupsWithRole
if not quarantineVOGroup or quarantineVOGroup not in existingGroups:
keepGroups += [defaultVOGroup]
for group in existingGroups:
if group in nonVOGroups:
continue
role = diracVOMSMapping.get(group, '')
# Among already existing groups for the user keep those without a special VOMS Role
# because this membership is done by hand in the CS
if "Role" not in role:
keepGroups.append(group)
# Keep existing groups with no VOMS attribute if any
if group in noVOMSGroups:
keepGroups.append(group)
# Keep groups for which syncronization with VOMS is forbidden
if group in noSyncVOMSGroups:
keepGroups.append(group)
userDict['Groups'] = list(set(keepGroups))
# Merge together groups for the same user but different DNs
if diracName in newAddedUserDict:
otherGroups = newAddedUserDict[diracName].get('Groups', [])
userDict['Groups'] = list(set(keepGroups + otherGroups))
modified = True
# Check if something changed before asking CSAPI to modify
if diracName in diracUserDict:
message = "\n Modified user %s:\n" % diracName
modMsg = ''
for key in userDict:
if key == "Groups":
addedGroups = set(userDict[key]) - set(diracUserDict.get(diracName, {}).get(key, []))
removedGroups = set(diracUserDict.get(diracName, {}).get(key, [])) - set(userDict[key])
if addedGroups:
modMsg += " Added to group(s) %s\n" % ','.join(addedGroups)
if removedGroups:
modMsg += " Removed from group(s) %s\n" % ','.join(removedGroups)
elif key == "Suspended":
if userDict['Suspended'] == "None":
modMsg += " Suspended status removed\n"
else:
modMsg += " User Suspended in VOs: %s\n" % userDict['Suspended']
else:
oldValue = str(diracUserDict.get(diracName, {}).get(key, ''))
if str(userDict[key]) != oldValue:
modMsg += " %s: %s -> %s\n" % (key, oldValue, str(userDict[key]))
if modMsg:
self.adminMsgs['Info'].append(message + modMsg)
modified = True
if self.autoModifyUsers and modified:
result = self.csapi.modifyUser(diracName, userDict)
if result['OK'] and result['Value']:
self.log.info("Modified user %s: %s" % (diracName, str(userDict)))
self.voChanged = True
resultDict['ModifiedUsers'].append(diracName)
# Check if there are potentially obsoleted users
oldUsers = set()
for user in diracUserDict:
dnSet = set(fromChar(diracUserDict[user]['DN']))
if not dnSet.intersection(set(self.vomsUserDict)) and user not in nonVOUserDict:
for group in diracUserDict[user]['Groups']:
if group not in noVOMSGroups:
oldUsers.add(user)
# Check for obsoleted DNs
for user in diracUserDict:
dnSet = set(fromChar(diracUserDict[user]['DN']))
for dn in dnSet:
if dn in obsoletedDNs and user not in oldUsers:
self.log.verbose("Modified user %s: dropped DN %s" % (user, dn))
if self.autoModifyUsers:
userDict = diracUserDict[user]
modDNSet = dnSet - set([dn])
if modDNSet:
userDict['DN'] = ','.join(modDNSet)
result = self.csapi.modifyUser(user, userDict)
if result['OK'] and result['Value']:
self.log.info("Modified user %s: dropped DN %s" % (user, dn))
self.adminMsgs['Info'].append("Modified user %s: dropped DN %s" % (user, dn))
self.voChanged = True
resultDict['ModifiedUsers'].append(diracName)
else:
oldUsers.add(user)
if oldUsers:
self.voChanged = True
if self.autoDeleteUsers:
self.log.info('The following users will be deleted: %s' % str(oldUsers))
result = self.csapi.deleteUsers(oldUsers)
if result['OK']:
self.adminMsgs['Info'].append('The following users are deleted from CS:\n %s\n' % str(oldUsers))
resultDict['DeletedUsers'] = oldUsers
else:
self.adminMsgs['Errors'].append('Error in deleting users from CS:\n %s' % str(oldUsers))
self.log.error('Error while user deletion from CS', result)
else:
self.adminMsgs['Info'].append('The following users to be checked for deletion:\n\t%s' %
"\n\t".join(sorted(oldUsers)))
self.log.info('The following users to be checked for deletion: %s' % str(oldUsers))
resultDict['CSAPI'] = self.csapi
resultDict['AdminMessages'] = self.adminMsgs
resultDict['VOChanged'] = self.voChanged
return S_OK(resultDict)
def getVOUserData(self, refreshFlag=False):
""" Get a report for users of a given VO
:param bool refreshFlag: flag to indicate that the configuration must be refreshed
before looking up user data
:return: S_OK/S_ERROR, Value = user description dictionary
"""
if refreshFlag:
gConfig.forceRefresh()
# Get DIRAC users
diracUsers = getUsersInVO(self.vo)
if not diracUsers:
return S_ERROR("No VO users found for %s" % self.vo)
if refreshFlag:
result = self.csapi.downloadCSData()
if not result['OK']:
return result
result = self.csapi.describeUsers(diracUsers)
if not result['OK']:
self.log.error('Could not retrieve CS User description')
return result
def getVOUserReport(self):
""" Get a report string with the current status of the DIRAC Registry for the
Virtual Organization
:return: S_OK with the report string as Value
"""
result = self.getVOUserData(refreshFlag=True)
if not result['OK']:
return result
userDict = result['Value']
# Get DIRAC group vs VOMS Role Mappings
result = getVOMSRoleGroupMapping(self.vo)
if not result['OK']:
return result
diracVOMSMapping = result['Value']['DIRACVOMS']
records = []
groupDict = defaultdict(int)
multiDNUsers = {}
suspendedUsers = []
for user in userDict:
for group in userDict[user]['Groups']:
groupDict[group] += 1
dnList = fromChar(userDict[user]['DN'])
if len(dnList) > 1:
multiDNUsers[user] = dnList
if userDict[user].get('Status', 'Active') == 'Suspended':
suspendedUsers.append(user)
for group in diracVOMSMapping:
records.append((group, str(groupDict[group]), diracVOMSMapping.get(group, '')))
fields = ['Group', 'Number of users', 'VOMS Role']
output = printTable(fields, records, sortField='Group', printOut=False, numbering=False)
if multiDNUsers:
output += '\nUsers with multiple DNs:\n'
for user in multiDNUsers:
output += ' %s:\n' % user
for dn in multiDNUsers[user]:
output += ' %s\n' % dn
if suspendedUsers:
output += '\n%d suspended users:\n' % len(suspendedUsers)
output += ' %s' % ','.join(suspendedUsers)
return S_OK(output)
def getUserName(self, dn):
""" Utility to construct user name
:param str dn: user DN
:return str: user name
"""
name = self.vomsUserDict[dn].get('name')
surname = self.vomsUserDict[dn].get('surname')
if name and surname:
surnameName = _getUserNameFromSurname(name, surname)
return surnameName
dnName = _getUserNameFromDN(dn, self.vo)
# If robot, take the dn based name
if dnName.startswith('robot'):
return dnName
# Is mailName reasonable ?
mail = self.vomsUserDict[dn]['mail']
if mail:
mailName = _getUserNameFromMail(mail)
if len(mailName) > 5 and mailName.isalpha():
return mailName
# dnName too long
if len(dnName) >= 12:
dnName = dnName[:11]
# May be the mail name is still more reasonable
if mail and len(dnName) < len(mailName) and mailName.isalpha():
return mailName
return dnName
|
chaen/DIRAC
|
ConfigurationSystem/Client/VOMS2CSSynchronizer.py
|
Python
|
gpl-3.0
| 19,777
|
[
"DIRAC"
] |
99a1bd8c15c18ccaa91e9e13e0b475073978f2ddbc58c5388666aaf0cb517559
|
import datetime
import json
from flask import Flask, g, render_template, flash, redirect, url_for, request, jsonify, abort, send_from_directory, session
from flask.ext.login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from flask.ext.babel import Babel, get_locale, get_timezone, refresh, lazy_gettext, gettext
from flask_babel import to_user_timezone
from sqlalchemy import func
from sqlalchemy.orm import exc
from . import helper
import pytz
import os
import rfk.helper
from rfk.helper import now, get_secret_key
from rfk.site.helper import paginate_query, Pagination, permission_required
import rfk.database
from rfk.database.base import User, Anonymous, News
from rfk.database.donations import Donation
from rfk.database.streaming import Stream, Listener, Relay
from rfk.site.forms.login import login_form, register_form
from rfk.site.forms.settings import SettingsForm
from rfk.exc.base import UserNameTakenException, UserNotFoundException, InvalidUsernameException
from rfk.database.track import Track
from rfk.database.show import Show, UserShow
from rfk.database.stats import Statistic
from collections import OrderedDict
app = Flask(__name__, instance_relative_config=True)
app.config['DEBUG'] = True
app.config['BABEL_DEFAULT_TIMEZONE'] = 'Europe/Berlin'
app.config['BABEL_DEFAULT_LOCALE'] = 'de'
app.config['BABEL_LOCALE_PATH'] = 'de'
app.secret_key = get_secret_key()
locales = {'de': {'name': 'Bernd',
'img': '/static/img/cb/de.png',
'datetime_format': 'DD.MM.YYYY HH:mm'},
'en': {'name': 'English',
'img': '/static/img/cb/gb.png',
'datetime_format': 'MM/DD/YYYY HH:mm'}}
def get_datetime_format():
try:
return locales[str(get_locale())]['datetime_format']
except KeyError:
return locales['de']['datetime_format']
# Register Jinja2 filters
app.jinja_env.filters['bbcode'] = helper.bbcode
app.jinja_env.filters['timedelta'] = helper.timedelta
app.jinja_env.filters['naturaltime'] = helper.naturaltime
app.jinja_env.filters['naturaldelta'] = helper.naturaldelta
app.jinja_env.filters['countryball'] = rfk.helper.iso_country_to_countryball
app.jinja_env.filters['countryname'] = rfk.helper.iso_country_to_countryname
# Register Jinja2 globals
app.jinja_env.globals['piwik_url'] = helper.piwik_url()
app.jinja_env.globals['now_playing'] = helper.now_playing
app.jinja_env.globals['get_disco'] = helper.disco
app.jinja_env.globals['url_for_other_page'] = helper.url_for_other_page
babel = Babel(app)
@app.teardown_request
def shutdown_session(exception=None):
rfk.database.session.rollback()
rfk.database.session.remove()
@babel.localeselector
def babel_localeselector():
if hasattr(g, 'current_locale'):
return g.current_locale
elif request.cookies.get('locale') is not None:
return request.cookies.get('locale')
elif current_user is not None:
return current_user.get_locale()
return request.accept_languages.best_match(locales.keys())
@babel.timezoneselector
def babel_timezoneselector():
if hasattr(g, 'current_timezone'):
return g.current_timezone
elif request.cookies.get('timezone') is not None:
return request.cookies.get('timezone')
elif current_user is not None:
return current_user.get_timezone()
return 'Europe/Berlin'
login_manager = LoginManager()
login_manager.setup_app(app)
login_manager.anonymous_user = Anonymous
login_manager.login_view = "login"
login_manager.login_message = gettext("Please log in to access this page.")
#login_manager.refresh_view = "reauth"
@login_manager.user_loader
def load_user(userid):
return User.get_user(id=int(userid))
from . import user
app.register_blueprint(user.user, url_prefix='/user')
from . import show
app.register_blueprint(show.show)
from . import admin
app.register_blueprint(admin.admin, url_prefix='/admin')
from . import listen
app.register_blueprint(listen.listen, url_prefix='/listen')
from rfk.api import api
app.register_blueprint(api, url_prefix='/api')
from rfk.feeds import feeds
app.register_blueprint(feeds, url_prefix='/feeds')
from . import streaming
app.register_blueprint(streaming.streaming, url_prefix='/')
from . import backend
app.register_blueprint(backend.backend, url_prefix='/backend')
from . import donation
app.register_blueprint(donation.donation, url_prefix='/donations')
def after_this_request(f):
if not hasattr(g, 'after_request_callbacks'):
g.after_request_callbacks = []
g.after_request_callbacks.append(f)
return f
@app.after_request
def call_after_request_callbacks(response):
for callback in getattr(g, 'after_request_callbacks', ()):
response = callback(response)
return response
@app.before_request
def before_request():
if request.method == 'GET':
if request.args.get('lang') is not None and request.args.get('lang') != '':
current_user.locale = request.args.get('lang')
g.current_locale = request.args.get('lang')
@after_this_request
def remember_locale(response):
response.set_cookie('locale', current_user.locale,
expires=datetime.datetime.utcnow() + datetime.timedelta(days=365))
return response
if request.args.get('tz') is not None and \
request.args.get('tz') in pytz.common_timezones:
current_user.timezone = request.args.get('tz')
g.current_timezone = request.args.get('tz')
@after_this_request
def remember_timezone(response):
response.set_cookie('timezone', current_user.timezone)
return response
refresh()
request.current_locale = get_locale()
request.current_timezone = str(get_timezone())
@app.before_request
def make_menu():
request.menu = OrderedDict()
entries = [['index', gettext('Home')], ['listeners', gettext('Listeners')]]
if current_user.is_authenticated():
entries.append(['history', gettext('History')])
for entry in entries:
request.menu['app.' + entry[0]] = {'name': entry[1],
'url': url_for(entry[0]), 'active': (entry[0] == request.endpoint)}
for bpname in app.blueprints.keys():
try:
menu = app.blueprints[bpname].create_menu(request.endpoint)
if menu:
request.menu[bpname] = menu
except AttributeError:
pass
@app.errorhandler(404)
def page_not_found(e):
return render_template('error/404.html', TITLE='404'), 404
@app.route('/')
def index():
news = News.query.order_by(News.time.desc()).all()
streams = Stream.query.all()
return render_template('index.html', TITLE=gettext('Index'), news=news, streams=streams)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = login_form(request.form)
if request.method == 'POST' and form.validate():
username = form.username.data
try:
user = User.get_user(username=username)
if user and user.check_password(password=form.password.data):
user.authenticated = True
remember = form.remember.data
if login_user(user, remember=remember):
if not user.last_login:
flash(gettext('<strong>Tip:</strong> It appears that this is your first login, if you need any help please visit our help section by clicking <a href="/help">here</a>.'), 'info')
user.last_login = now()
loc = rfk.helper.get_location(request.remote_addr)
if 'country_code' in loc and loc['country_code'] is not None:
user.country = loc['country_code']
rfk.database.session.commit()
flash(gettext('Login successful. Welcome %s!' % user.username), 'success')
return redirect(request.args.get('next') or url_for('index'))
else:
form.username.errors.append(gettext('There was an error while logging you in.'))
#flash('There was an error while logging you in.', 'error')
else:
form.username.errors.append(gettext('Invalid User or Password.'))
#flash('Invalid username or password.')
except UserNotFoundException:
form.username.errors.append(gettext('Invalid User or Password.'))
#flash('Invalid username or password.')
return render_template('login.html', form=form, TITLE=gettext('Login'))
@app.route('/logout')
@login_required
def logout():
logout_user()
flash(gettext('Logout successful. See you soon!'), 'success')
return redirect(url_for('index'))
@app.route("/register", methods=["GET", "POST"])
def register():
form = register_form(request.form)
if request.method == "POST" and form.validate():
try:
user = User.add_user(form.username.data, form.password.data)
if form.email.data:
user.mail = form.email.data
rfk.database.session.commit()
flash(gettext('Registration successful. You can now login!'), 'success')
return redirect(url_for("login"))
except UserNameTakenException:
form.username.errors.append(gettext('Username already taken!'))
except InvalidUsernameException:
form.username.errors.append(gettext('Username invalid!'))
return render_template("register.html", form=form, TITLE=gettext('Register'))
@app.route('/settings', methods=['get', 'post'])
@login_required
def settings():
form = SettingsForm(request.form,
username=current_user.username,
email=current_user.mail,
show_def_name=current_user.get_setting(code='show_def_name'),
show_def_desc=current_user.get_setting(code='show_def_desc'),
show_def_tags=current_user.get_setting(code='show_def_tags'),
show_def_logo=current_user.get_setting(code='show_def_logo'),
use_icy=current_user.get_setting(code='use_icy'))
if request.method == "POST" and form.validate():
if current_user.check_password(password=form.old_password.data):
if form.new_password.data:
current_user.password = User.make_password(form.new_password.data)
current_user.mail = form.email.data
current_user.set_setting(code='show_def_name', value=form.show_def_name.data)
current_user.set_setting(code='show_def_desc', value=form.show_def_desc.data)
current_user.set_setting(code='show_def_tags', value=form.show_def_tags.data)
current_user.set_setting(code='show_def_logo', value=form.show_def_logo.data)
current_user.set_setting(code='use_icy', value=form.use_icy.data)
rfk.database.session.commit()
flash(gettext('Settings successfully updated.'), 'success')
return redirect(url_for('settings'))
else:
form.old_password.errors.append(gettext('Wrong password.'))
return render_template('settings.html', form=form, TITLE=gettext('Settings'),
imgur={'client': rfk.CONFIG.get('site', 'imgur-client')})
@app.route('/irc')
def irc():
return render_template('irc.html', TITLE='IRC')
@app.route('/help')
def help():
return render_template('help.html', TITLE='Help')
@app.route('/history/', defaults={'page': 1})
@app.route('/history/page/<int:page>')
@login_required
def history(page):
per_page = 25
(tracks, total_count) = paginate_query(Track.query.join(Show).join(UserShow).order_by(Track.end.desc()), page=page, per_page=per_page)
pagination = Pagination(page, per_page, total_count)
return render_template('history.html', tracks=tracks, pagination=pagination, TITLE=gettext('History'))
@app.route('/donations')
def donations():
return redirect(url_for("donation.list"))
#@app.route('/stats')
def stats():
#>>> session.query(Listener).filter(Listener.useragent.like('%iTunes%')).count()
countries = rfk.database.session.query(Listener.country, func.count('*')).group_by(Listener.country).filter(Listener.country != None, Listener.country != 'DE').all()
return render_template('stats.html', countries=json.dumps(countries), TITLE=gettext('Stats'))
@app.route('/listeners')
def listeners():
# check if current_user is logged in and if user is streaming or if user is admin
if not current_user.is_anonymous():
is_streaming = UserShow.query.join(User).filter(UserShow.status == UserShow.STATUS.STREAMING, UserShow.user == current_user).first()
if is_streaming or current_user.has_permission('admin'):
show_listener_list = True
else:
show_listener_list = False
else:
show_listener_list = False
# get current bandwidth of all active relays
total_bandwidth = 0
relays = Relay.query.filter(Relay.status == Relay.STATUS.ONLINE).all()
active_relays = len(relays)
for relay in relays:
total_bandwidth += relay.usage
total_bandwidth *= 128 # convert kbit to byte
# get all current listeners
current_listener = Listener.get_current_listeners()
# generate per country stats
per_country = {}
for listener in current_listener:
country = listener.country
try:
per_country[country]['count'] += 1
except KeyError:
per_country[country] = {'count': 1}
per_country[country]['ball'] = country
per_country = sorted(per_country.iteritems(), key=lambda (k, v): v['count'], reverse=True)
# get recent listener count to calculate a trend
try:
stats_total = Statistic.query.filter(Statistic.identifier == 'lst-total').one()
stats = stats_total.get(start=now() - datetime.timedelta(minutes=5), stop=now())
except exc.NoResultFound:
stats = None
if stats and stats.count() > 0:
listener_sum = 0
for stat in stats:
listener_sum += stat.value
average_listeners = listener_sum / stats.count()
else:
average_listeners = len(current_listener)
return render_template('listenergraph.html', TITLE=gettext('Listeners'), show_listener_list=show_listener_list,
listeners=current_listener, per_country=per_country, total_bandwidth=total_bandwidth,
active_relays=active_relays, average_listeners=average_listeners)
@app.route('/player5')
@app.route('/player')
def html5player():
'''legacy url'''
return redirect(url_for("listen.html5_player"))
@app.route('/api/')
@app.route('/api/index.php')
def api_legacy():
'''lazy people...'''
apikey = request.args.get("apikey")
if apikey != '86c6c5162aa6845906cff55320ea8608991358c3':
return ''
#ltid=0&w=track%2Clistener%2Cdj%2Cshow%2Cnextshows,
ret = {}
listeners = Listener.query.filter(Listener.disconnect == None).all()
tmp = {}
for listener in listeners:
if listener.stream_relay.stream.code in tmp:
tmp[listener.stream_relay.stream.code]['c'] += 1
else:
tmp[listener.stream_relay.stream.code] = {'c': 1,
'name': listener.stream_relay.stream.code,
'description': listener.stream_relay.stream.name}
ret['listener'] = tmp.values()
currtrack = Track.current_track()
ltid = request.args.get("apikey")
if currtrack and ltid != currtrack.track:
ret['trackid'] = currtrack.track
ret['title'] = currtrack.title.name
ret['artist'] = currtrack.title.artist.name
show = Show.get_active_show()
if show:
user = show.get_active_user()
ret['dj'] = user.username
ret['djid'] = user.user
ret['status'] = 'STREAMING'
ret['showbegin'] = int(to_user_timezone(show.begin).strftime("%s"))
if show.end:
ret['showend'] = int(to_user_timezone(show.end).strftime("%s"))
else:
ret['showend'] = None
ret['showtype'] = 'PLANNED'
ret['showname'] = show.name
ret['showdescription'] = show.description
ret['showid'] = show.show
ret['showthread'] = None
ret['showdj'] = user.username
ret['showdjid'] = user.user
ret['shows'] = []
if show and show.end:
filter_begin = show.end
else:
filter_begin = now()
nextshow = Show.query.filter(Show.begin >= filter_begin).order_by(Show.begin.asc()).first()
if nextshow:
arr = {}
arr['showbegin'] = int(to_user_timezone(nextshow.begin).strftime("%s"))
if nextshow.end:
arr['showend'] = int(to_user_timezone(nextshow.end).strftime("%s"))
else:
arr['showend'] = None
arr['showtype'] = 'PLANNED'
arr['showname'] = nextshow.name
arr['showdescription'] = nextshow.description
arr['showid'] = nextshow.show
arr['showdj'] = nextshow.users[0].user.username
arr['showdjid'] = nextshow.users[0].user.user
arr['showthread'] = None
ret['shows'].append(arr)
return jsonify(ret)
@app.route('/robots.txt')
@app.route('/favicon.ico')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
|
buckket/weltklang
|
lib/rfk/site/__init__.py
|
Python
|
bsd-3-clause
| 17,628
|
[
"VisIt"
] |
0ad7b8a5c207662143b2f6199928f35ee9e993398d3527249935e275e593e434
|
''' Use this file to hand register multiple depth cameras with the 3D visualizer
Procedure:
1) Modify the scrip below for your files
2) After adding points, click the mayavi button in the window and add Transformation to the scene. Drag the second points to the transformation.
3) Manually match the two scenes
4) Click red button "Start/Stop Script Recording". Transform a tiny bit so that you see the transformation matrix
5) Write down/save the transformation matrix
'''
from pyKinectTools.utils.DepthUtils import *
from scipy.misc import imread
from mayavi import mlab
from mayavi.api import Engine
import cPickle as pickle
''' ---- Transforms ---
ICUDec2012 data:
#3->2: transform_data.transform.matrix.__setstate__({'elements': [0.9553782053802112, -0.09691967661345026, 0.27903236545178867, -392.81878278215254, 0.09283849668727677, 0.9952919671849423, 0.02783726980083738, 231.6724797545669, -0.2804166511056782, -0.0006901755293638524, 0.9598781305147085, -118.84124965680712, 0.0, 0.0, 0.0, 1.0]})
#1->2: transform_data2.transform.matrix.__setstate__({'elements': [-0.8531195226064485, -0.08215320378328564, 0.5152066878990207, 761.2299809410998, 0.3177589268248827, 0.7014041249433673, 0.6380137286418792, 1427.5420972165339, -0.4137829679564377, 0.7080134918351199, -0.5722766383564786, -3399.696025885259, 0.0, 0.0, 0.0, 1.0]})
Office 23Feb2013
top->bottom view
#1->2 [0.9955555989899513, 0.03914715069837748, 0.08565366257179756, 240.34720254711863,
-0.08535684788048972, 0.7593599156829527, 0.6450478485925556, 1305.7428154935583,
-0.039790172651939335, -0.6494921239292868, 0.759326493093817, -237.20556423494145,
0.0, 0.0, 0.0, 1.0]})
'''
'''
ICUDec2012
base_dir1 = '/media/Data/ICU_Dec2012/ICU_Dec2012_r40_c1/depth/356/14/1/'
base_dir23 = '/media/Data/ICU_Dec2012/ICU_Dec2012_r40_c2/depth/356/14/0/'
depthFile1 = base_dir1+'device_1/'+'depth_356_14_1_55_00_95.png'
depthFile2 = base_dir23+'device_1/'+'depth_356_14_0_10_01_44.png'
depthFile3 = base_dir23+'device_2/'+'depth_356_14_0_10_00_48.png'
'''
'''
# Office_25Feb2013
base_dir = '/Users/colin/Data/Office_25Feb2013/depth/56/17/31/'
depthFile1 = base_dir+'device_1/'+'depth_56_17_31_0_00_506677.png'
depthFile2 = base_dir+'device_2/'+'depth_56_17_31_0_00_510469.png'
'''
'''
# CIRL_28Feb2013
base_dir = '/Users/colin/Data/CIRL_28Feb2013/depth/59/13/42/'
depthFile1 = base_dir+'device_1/'+'depth_59_13_42_0_00_364016.png'
depthFile2 = base_dir+'device_2/'+'depth_59_13_42_0_00_645072.png'
result:
T = np.array([[0.857551855717905, 0.11935353392976167, 0.5003594195108932, -1053.586999301418],
[0.1430128492517155, 0.8790419590510106, -0.45478847740098743, 1081.8626448851123],
[-0.4941175363248235, 0.4615625289885183, 0.736754974282534, 1295.7083313896273],
[0.0, 0.0, 0.0, 1.0]])
'''
'''
# JHU CIRL pod
base_dir = '/Users/colin/Data/JHU_RGBD_Pose/CIRL_P1/depth/100/11/15/'
depthFile1 = base_dir+'device_1/'+'depth_100_11_15_59_13_725918.png'
depthFile2 = base_dir+'device_2/'+'depth_100_11_15_59_13_395133.png'
result:
T = np.array([[0.857551855717905, 0.11935353392976167, 0.5003594195108932, -1053.586999301418],
[0.1430128492517155, 0.8790419590510106, -0.45478847740098743, 1081.8626448851123],
[-0.4941175363248235, 0.4615625289885183, 0.736754974282534, 1295.7083313896273],
[0.0, 0.0, 0.0, 1.0]])
'''
''' --------------------Main setup-------------------------------- '''
depthIm1 = imread(depthFile1)
depthIm2 = imread(depthFile2)
pts1 = depthIm2XYZ(depthIm1).astype(np.int)
pts2 = depthIm2XYZ(depthIm2).astype(np.int)
engine = Engine()
engine.start()
figure = mlab.figure(1, bgcolor=(0,0,0), fgcolor=(1,1,1))
mlab.clf()
figure.scene.disable_render = True
interval = 40 # Don't show all points (otherwise it's slow!)
pts = np.array([x for x in pts1 if x[2] > -93500])
pts = np.vstack([[0,0,1], pts])
ptsViz1 = mlab.points3d(pts[::interval,0], pts[::interval,1], pts[::interval,2], 2.-(np.minimum(pts[::interval,2], 5000)/float((-pts[:,2]).max()))/1000., scale_factor=30., colormap='summer')
pts = np.array([x for x in pts2 if x[2] > -93500])
pts = np.vstack([[0,0,1], pts])
ptsViz1 = mlab.points3d(pts[::interval,0], pts[::interval,1], pts[::interval,2], 2.-(np.minimum(pts[::interval,2], 5000)/float((-pts[:,2]).max()))/1000., scale_factor=30., colormap='Blues')
# Copy description and transform here as 4x4 matrix
# e.g.
filename = 'Registration.dat'
description = "JHU CIRL Pod Bottom (good) cam to top (bad) cam"
T = np.array([[0.857551855717905, 0.11935353392976167, 0.5003594195108932, -1053.586999301418],
[0.1430128492517155, 0.8790419590510106, -0.45478847740098743, 1081.8626448851123],
[-0.4941175363248235, 0.4615625289885183, 0.736754974282534, 1295.7083313896273],
[0.0, 0.0, 0.0, 1.0]])
# save
pickle.dump({'description':description, 'transform':T}, open(filename, 'w'))
''' --------------------------------------------------------- '''
''' ------------------- Fine Tuning ------------------------- '''
''' --------------------------------------------------------- '''
from mayavi.filters.transform_data import TransformData
depthIm1 = imread(depthFile1)
depthIm2 = imread(depthFile2)
depthIm3 = imread(depthFile3)
''' Put all in the frame of 2 '''
pts1 = depthIm2XYZ(depthIm1)#.astype(np.int)
pts2 = depthIm2XYZ(depthIm2)#.astype(np.int)
pts3 = depthIm2XYZ(depthIm3).astype(np.int)
p1 = depthIm2PosIm(depthIm1)
p2 = depthIm2PosIm(depthIm2)
p3 = depthIm2PosIm(depthIm3)
'''3DViz'''
engine = Engine()
engine.start()
figure = mlab.figure(1, bgcolor=(0,0,0), fgcolor=(1,1,1))
mlab.clf()
figure.scene.disable_render = True
interval = 15
'''2'''
pts = np.array([x for x in pts2 if x[2] > -93500])
ptsViz1 = mlab.points3d(pts[::interval,0], pts[::interval,1], pts[::interval,2], 2.-(np.minimum(pts[::interval,2], 5000)/float((-pts[:,2]).max()))/1000., scale_factor=10., colormap='Blues')
'''3'''
pts = np.array([x for x in pts3 if x[2] > -93500])
ptsViz2 = mlab.points3d(pts[::interval,0], pts[::interval,1], pts[::interval,2], 2.-(np.minimum(pts[::interval,2], 5000)/float((-pts[:,2]).max()))/1000., scale_factor=10., colormap='PuOr')
transform_data = TransformData()
engine.add_filter(transform_data, engine.scenes[0].children[1])
transform_data.children = [engine.scenes[0].children[1].children[0]]
# engine.scenes[0].children[1].children[0]=[]
transform_data.transform.matrix.__setstate__({'elements': [0.9553782053802112, -0.09691967661345026, 0.27903236545178867, -392.81878278215254, 0.09283849668727677, 0.9952919671849423, 0.02783726980083738, 231.6724797545669, -0.2804166511056782, -0.0006901755293638524, 0.9598781305147085, -118.84124965680712, 0.0, 0.0, 0.0, 1.0]})
transform_data.widget.set_transform(transform_data.transform)
transform_data.filter.update()
transform_data.widget.enabled = False
'''1'''
pts = np.array([x for x in pts1 if x[2] > -93500])
ptsViz1 = mlab.points3d(pts[::interval,0], pts[::interval,1], pts[::interval,2], 2.-(np.minimum(pts[::interval,2], 5000)/float((-pts[:,2]).max()))/1000., scale_factor=10., colormap='summer')
mlab.view(azimuth=0, elevation=0, distance=3000., focalpoint=(0,0,0), figure=figure)#, reset_roll=False)
figure.scene.disable_render = False
transform_data2 = TransformData()
engine.add_filter(transform_data2, engine.scenes[0].children[2])
transform_data2.children = [engine.scenes[0].children[2].children[0]]
# engine.scenes[0].children[2].children[0]=[]
transform_data2.transform.matrix.__setstate__({'elements': [-0.8531195226064485, -0.08215320378328564, 0.5152066878990207, 761.2299809410998, 0.3177589268248827, 0.7014041249433673, 0.6380137286418792, 1427.5420972165339, -0.4137829679564377, 0.7080134918351199, -0.5722766383564786, -3399.696025885259, 0.0, 0.0, 0.0, 1.0]})
transform_data2.widget.set_transform(transform_data1.transform)
transform_data2.filter.update()
transform_data2.widget.enabled = False
'''
mlab.view(azimuth=0, elevation=0, distance=3000., focalpoint=(0,0,0), figure=figure)#, reset_roll=False)
'''
|
colincsl/pyKinectTools
|
pyKinectTools/utils/AlignCameras.py
|
Python
|
bsd-2-clause
| 7,960
|
[
"Mayavi"
] |
3ad16b0653dca2714dc6faafb474df1297b1e52b951965d9bf323d787827e739
|
# .. coding: utf-8
# $Id: __init__.py 7745 2014-02-28 14:15:59Z milde $
# Author: Engelbert Gruber, Günter Milde
# Maintainer: docutils-develop@lists.sourceforge.net
# Copyright: This module has been placed in the public domain.
"""LaTeX2e document tree Writer."""
__docformat__ = 'reStructuredText'
# code contributions from several people included, thanks to all.
# some named: David Abrahams, Julien Letessier, Lele Gaifax, and others.
#
# convention deactivate code by two # i.e. ##.
import sys
import os
import time
import re
import string
import urllib
try:
import roman
except ImportError:
import docutils.utils.roman as roman
from docutils import frontend, nodes, languages, writers, utils, io
from docutils.utils.error_reporting import SafeString
from docutils.transforms import writer_aux
from docutils.utils.math import pick_math_environment, unichar2tex
class Writer(writers.Writer):
supported = ('latex','latex2e')
"""Formats this writer supports."""
default_template = 'default.tex'
default_template_path = os.path.dirname(__file__)
default_preamble = '\n'.join([r'% PDF Standard Fonts',
r'\usepackage{mathptmx} % Times',
r'\usepackage[scaled=.90]{helvet}',
r'\usepackage{courier}'])
settings_spec = (
'LaTeX-Specific Options',
None,
(('Specify documentclass. Default is "article".',
['--documentclass'],
{'default': 'article', }),
('Specify document options. Multiple options can be given, '
'separated by commas. Default is "a4paper".',
['--documentoptions'],
{'default': 'a4paper', }),
('Footnotes with numbers/symbols by Docutils. (default)',
['--docutils-footnotes'],
{'default': True, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Alias for --docutils-footnotes (deprecated)',
['--use-latex-footnotes'],
{'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for footnote text (deprecated)',
['--figure-footnotes'],
{'action': 'store_true',
'validator': frontend.validate_boolean}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "superscript".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'superscript',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Use \\cite command for citations. ',
['--use-latex-citations'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for citations '
'(might get mixed with real figures). (default)',
['--figure-citations'],
{'dest': 'use_latex_citations', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Specify LaTeX packages/stylesheets. '
' A style is referenced with \\usepackage if extension is '
'".sty" or omitted and with \\input else. '
' Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'default': '', 'metavar': '<file[,file,...]>',
'overrides': 'stylesheet_path',
'validator': frontend.validate_comma_separated_list}),
('Comma separated list of LaTeX packages/stylesheets. '
'Relative paths are expanded if a matching file is found in '
'the --stylesheet-dirs. With --link-stylesheet, '
'the path is rewritten relative to the output *.tex file. ',
['--stylesheet-path'],
{'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
'validator': frontend.validate_comma_separated_list}),
('Link to the stylesheet(s) in the output file. (default)',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Embed the stylesheet(s) in the output file. '
'Stylesheets must be accessible during processing. ',
['--embed-stylesheet'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Comma-separated list of directories where stylesheets are found. '
'Used by --stylesheet-path when expanding relative path arguments. '
'Default: "."',
['--stylesheet-dirs'],
{'metavar': '<dir[,dir,...]>',
'validator': frontend.validate_comma_separated_list,
'default': ['.']}),
('Customization by LaTeX code in the preamble. '
'Default: select PDF standard fonts (Times, Helvetica, Courier).',
['--latex-preamble'],
{'default': default_preamble}),
('Specify the template file. Default: "%s".' % default_template,
['--template'],
{'default': default_template, 'metavar': '<file>'}),
('Table of contents by LaTeX. (default) ',
['--use-latex-toc'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table of contents by Docutils (without page numbers). ',
['--use-docutils-toc'],
{'dest': 'use_latex_toc', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Add parts on top of the section hierarchy.',
['--use-part-section'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Attach author and date to the document info table. (default) ',
['--use-docutils-docinfo'],
{'dest': 'use_latex_docinfo', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Attach author and date to the document title.',
['--use-latex-docinfo'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
("Typeset abstract as topic. (default)",
['--topic-abstract'],
{'dest': 'use_latex_abstract', 'action': 'store_false',
'validator': frontend.validate_boolean}),
("Use LaTeX abstract environment for the document's abstract. ",
['--use-latex-abstract'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Color of any hyperlinks embedded in text '
'(default: "blue", "false" to disable).',
['--hyperlink-color'], {'default': 'blue'}),
('Additional options to the "hyperref" package '
'(default: "").',
['--hyperref-options'], {'default': ''}),
('Enable compound enumerators for nested enumerated lists '
'(e.g. "1.2.a.ii"). Default: disabled.',
['--compound-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compound enumerators for nested enumerated lists. '
'This is the default.',
['--no-compound-enumerators'],
{'action': 'store_false', 'dest': 'compound_enumerators'}),
('Enable section ("." subsection ...) prefixes for compound '
'enumerators. This has no effect without --compound-enumerators.'
'Default: disabled.',
['--section-prefix-for-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable section prefixes for compound enumerators. '
'This is the default.',
['--no-section-prefix-for-enumerators'],
{'action': 'store_false', 'dest': 'section_prefix_for_enumerators'}),
('Set the separator between section number and enumerator '
'for compound enumerated lists. Default is "-".',
['--section-enumerator-separator'],
{'default': '-', 'metavar': '<char>'}),
('When possibile, use the specified environment for literal-blocks. '
'Default is quoting of whitespace and special chars.',
['--literal-block-env'],
{'default': ''}),
('When possibile, use verbatim for literal-blocks. '
'Compatibility alias for "--literal-block-env=verbatim".',
['--use-verbatim-when-possible'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table style. "standard" with horizontal and vertical lines, '
'"booktabs" (LaTeX booktabs style) only horizontal lines '
'above and below the table and below the header or "borderless". '
'Default: "standard"',
['--table-style'],
{'choices': ['standard', 'booktabs','nolines', 'borderless'],
'default': 'standard',
'metavar': '<format>'}),
('LaTeX graphicx package option. '
'Possible values are "dvips", "pdftex". "auto" includes LaTeX code '
'to use "pdftex" if processing with pdf(la)tex and dvips otherwise. '
'Default is no option.',
['--graphicx-option'],
{'default': ''}),
('LaTeX font encoding. '
'Possible values are "", "T1" (default), "OT1", "LGR,T1" or '
'any other combination of options to the `fontenc` package. ',
['--font-encoding'],
{'default': 'T1'}),
('Per default the latex-writer puts the reference title into '
'hyperreferences. Specify "ref*" or "pageref*" to get the section '
'number or the page number.',
['--reference-label'],
{'default': None, }),
('Specify style and database for bibtex, for example '
'"--use-bibtex=mystyle,mydb1,mydb2".',
['--use-bibtex'],
{'default': None, }),
),)
settings_defaults = {'sectnum_depth': 0 # updated by SectNum transform
}
config_section = 'latex2e writer'
config_section_dependencies = ('writers',)
head_parts = ('head_prefix', 'requirements', 'latex_preamble',
'stylesheet', 'fallbacks', 'pdfsetup',
'title', 'subtitle', 'titledata')
visitor_attributes = head_parts + ('body_pre_docinfo', 'docinfo',
'dedication', 'abstract', 'body')
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = LaTeXTranslator
# Override parent method to add latex-specific transforms
def get_transforms(self):
return writers.Writer.get_transforms(self) + [
# Convert specific admonitions to generic one
writer_aux.Admonitions,
# TODO: footnote collection transform
]
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
# copy parts
for part in self.visitor_attributes:
setattr(self, part, getattr(visitor, part))
# get template string from file
try:
template_file = open(self.document.settings.template, 'rb')
except IOError:
template_file = open(os.path.join(self.default_template_path,
self.document.settings.template), 'rb')
template = string.Template(unicode(template_file.read(), 'utf-8'))
template_file.close()
# fill template
self.assemble_parts() # create dictionary of parts
self.output = template.substitute(self.parts)
def assemble_parts(self):
"""Assemble the `self.parts` dictionary of output fragments."""
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
lines = getattr(self, part)
if part in self.head_parts:
if lines:
lines.append('') # to get a trailing newline
self.parts[part] = '\n'.join(lines)
else:
# body contains inline elements, so join without newline
self.parts[part] = ''.join(lines)
class Babel(object):
"""Language specifics for LaTeX."""
# TeX (babel) language names:
# ! not all of these are supported by Docutils!
#
# based on LyX' languages file with adaptions to `BCP 47`_
# (http://www.rfc-editor.org/rfc/bcp/bcp47.txt) and
# http://www.tug.org/TUGboat/Articles/tb29-3/tb93miklavec.pdf
# * the key without subtags is the default
# * case is ignored
# cf. http://docutils.sourceforge.net/docs/howto/i18n.html
# http://www.w3.org/International/articles/language-tags/
# and http://www.iana.org/assignments/language-subtag-registry
language_codes = {
# code TeX/Babel-name comment
'af': 'afrikaans',
'ar': 'arabic',
# 'be': 'belarusian',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
# 'cop': 'coptic',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'de': 'ngerman', # new spelling (de_1996)
'de-1901': 'german', # old spelling
'de-AT': 'naustrian',
'de-AT-1901': 'austrian',
'dsb': 'lowersorbian',
'el': 'greek', # monotonic (el-monoton)
'el-polyton': 'polutonikogreek',
'en': 'english', # TeX' default language
'en-AU': 'australian',
'en-CA': 'canadian',
'en-GB': 'british',
'en-NZ': 'newzealand',
'en-US': 'american',
'eo': 'esperanto',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
# 'fa': 'farsi',
'fi': 'finnish',
'fr': 'french',
'fr-CA': 'canadien',
'ga': 'irish', # Irish Gaelic
# 'grc': # Ancient Greek
'grc-ibycus': 'ibycus', # Ibycus encoding
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hsb': 'uppersorbian',
'hu': 'magyar',
'ia': 'interlingua',
'id': 'bahasai', # Bahasa (Indonesian)
'is': 'icelandic',
'it': 'italian',
'ja': 'japanese',
'kk': 'kazakh',
'la': 'latin',
'lt': 'lithuanian',
'lv': 'latvian',
'mn': 'mongolian', # Mongolian, Cyrillic script (mn-cyrl)
'ms': 'bahasam', # Bahasa (Malay)
'nb': 'norsk', # Norwegian Bokmal
'nl': 'dutch',
'nn': 'nynorsk', # Norwegian Nynorsk
'no': 'norsk', # Norwegian (Bokmal)
'pl': 'polish',
'pt': 'portuges',
'pt-BR': 'brazil',
'ro': 'romanian',
'ru': 'russian',
'se': 'samin', # North Sami
'sh-Cyrl': 'serbianc', # Serbo-Croatian, Cyrillic script
'sh-Latn': 'serbian', # Serbo-Croatian, Latin script see also 'hr'
'sk': 'slovak',
'sl': 'slovene',
'sq': 'albanian',
'sr': 'serbianc', # Serbian, Cyrillic script (contributed)
'sr-Latn': 'serbian', # Serbian, Latin script
'sv': 'swedish',
# 'th': 'thai',
'tr': 'turkish',
'uk': 'ukrainian',
'vi': 'vietnam',
# zh-Latn: Chinese Pinyin
}
# normalize (downcase) keys
language_codes = dict([(k.lower(), v) for (k,v) in language_codes.items()])
warn_msg = 'Language "%s" not supported by LaTeX (babel)'
# "Active characters" are shortcuts that start a LaTeX macro and may need
# escaping for literals use. Characters that prevent literal use (e.g.
# starting accent macros like "a -> ä) will be deactivated if one of the
# defining languages is used in the document.
# Special cases:
# ~ (tilde) -- used in estonian, basque, galician, and old versions of
# spanish -- cannot be deactivated as it denotes a no-break space macro,
# " (straight quote) -- used in albanian, austrian, basque
# brazil, bulgarian, catalan, czech, danish, dutch, estonian,
# finnish, galician, german, icelandic, italian, latin, naustrian,
# ngerman, norsk, nynorsk, polish, portuges, russian, serbian, slovak,
# slovene, spanish, swedish, ukrainian, and uppersorbian --
# is escaped as ``\textquotedbl``.
active_chars = {# TeX/Babel-name: active characters to deactivate
# 'breton': ':;!?' # ensure whitespace
# 'esperanto': '^',
# 'estonian': '~"`',
# 'french': ':;!?' # ensure whitespace
'galician': '.<>', # also '~"'
# 'magyar': '`', # for special hyphenation cases
'spanish': '.<>', # old versions also '~'
# 'turkish': ':!=' # ensure whitespace
}
def __init__(self, language_code, reporter=None):
self.reporter = reporter
self.language = self.language_name(language_code)
self.otherlanguages = {}
def __call__(self):
"""Return the babel call with correct options and settings"""
languages = sorted(self.otherlanguages.keys())
languages.append(self.language or 'english')
self.setup = [r'\usepackage[%s]{babel}' % ','.join(languages)]
# Deactivate "active characters"
shorthands = []
for c in ''.join([self.active_chars.get(l, '') for l in languages]):
if c not in shorthands:
shorthands.append(c)
if shorthands:
self.setup.append(r'\AtBeginDocument{\shorthandoff{%s}}'
% ''.join(shorthands))
# Including '~' in shorthandoff prevents its use as no-break space
if 'galician' in languages:
self.setup.append(r'\deactivatetilden % restore ~ in Galician')
if 'estonian' in languages:
self.setup.extend([r'\makeatletter',
r' \addto\extrasestonian{\bbl@deactivate{~}}',
r'\makeatother'])
if 'basque' in languages:
self.setup.extend([r'\makeatletter',
r' \addto\extrasbasque{\bbl@deactivate{~}}',
r'\makeatother'])
if (languages[-1] == 'english' and
'french' in self.otherlanguages.keys()):
self.setup += ['% Prevent side-effects if French hyphenation '
'patterns are not loaded:',
r'\frenchbsetup{StandardLayout}',
r'\AtBeginDocument{\selectlanguage{%s}'
r'\noextrasfrench}' % self.language]
return '\n'.join(self.setup)
def language_name(self, language_code):
"""Return TeX language name for `language_code`"""
for tag in utils.normalize_language_tag(language_code):
try:
return self.language_codes[tag]
except KeyError:
pass
if self.reporter is not None:
self.reporter.warning(self.warn_msg % language_code)
return ''
def get_language(self):
# Obsolete, kept for backwards compatibility with Sphinx
return self.language
# Building blocks for the latex preamble
# --------------------------------------
class SortableDict(dict):
"""Dictionary with additional sorting methods
Tip: use key starting with with '_' for sorting before small letters
and with '~' for sorting after small letters.
"""
def sortedkeys(self):
"""Return sorted list of keys"""
keys = self.keys()
keys.sort()
return keys
def sortedvalues(self):
"""Return list of values sorted by keys"""
return [self[key] for key in self.sortedkeys()]
# PreambleCmds
# `````````````
# A container for LaTeX code snippets that can be
# inserted into the preamble if required in the document.
#
# .. The package 'makecmds' would enable shorter definitions using the
# \providelength and \provideenvironment commands.
# However, it is pretty non-standard (texlive-latex-extra).
class PreambleCmds(object):
"""Building blocks for the latex preamble."""
PreambleCmds.abstract = r"""
% abstract title
\providecommand*{\DUtitleabstract}[1]{\centering\textbf{#1}}"""
PreambleCmds.admonition = r"""
% admonition (specially marked topic)
\providecommand{\DUadmonition}[2][class-arg]{%
% try \DUadmonition#1{#2}:
\ifcsname DUadmonition#1\endcsname%
\csname DUadmonition#1\endcsname{#2}%
\else
\begin{center}
\fbox{\parbox{0.9\textwidth}{#2}}
\end{center}
\fi
}"""
PreambleCmds.align_center = r"""
\makeatletter
\@namedef{DUrolealign-center}{\centering}
\makeatother
"""
## PreambleCmds.caption = r"""% configure caption layout
## \usepackage{caption}
## \captionsetup{singlelinecheck=false}% no exceptions for one-liners"""
PreambleCmds.color = r"""\usepackage{color}"""
PreambleCmds.docinfo = r"""
% docinfo (width of docinfo table)
\DUprovidelength{\DUdocinfowidth}{0.9\textwidth}"""
# PreambleCmds.docinfo._depends = 'providelength'
PreambleCmds.dedication = r"""
% dedication topic
\providecommand{\DUtopicdedication}[1]{\begin{center}#1\end{center}}"""
PreambleCmds.error = r"""
% error admonition title
\providecommand*{\DUtitleerror}[1]{\DUtitle{\color{red}#1}}"""
# PreambleCmds.errortitle._depends = 'color'
PreambleCmds.fieldlist = r"""
% fieldlist environment
\ifthenelse{\isundefined{\DUfieldlist}}{
\newenvironment{DUfieldlist}%
{\quote\description}
{\enddescription\endquote}
}{}"""
PreambleCmds.float_settings = r"""\usepackage{float} % float configuration
\floatplacement{figure}{H} % place figures here definitely"""
PreambleCmds.footnotes = r"""% numeric or symbol footnotes with hyperlinks
\providecommand*{\DUfootnotemark}[3]{%
\raisebox{1em}{\hypertarget{#1}{}}%
\hyperlink{#2}{\textsuperscript{#3}}%
}
\providecommand{\DUfootnotetext}[4]{%
\begingroup%
\renewcommand{\thefootnote}{%
\protect\raisebox{1em}{\protect\hypertarget{#1}{}}%
\protect\hyperlink{#2}{#3}}%
\footnotetext{#4}%
\endgroup%
}"""
PreambleCmds.footnote_floats = r"""% settings for footnotes as floats:
\setlength{\floatsep}{0.5em}
\setlength{\textfloatsep}{\fill}
\addtolength{\textfloatsep}{3em}
\renewcommand{\textfraction}{0.5}
\renewcommand{\topfraction}{0.5}
\renewcommand{\bottomfraction}{0.5}
\setcounter{totalnumber}{50}
\setcounter{topnumber}{50}
\setcounter{bottomnumber}{50}"""
PreambleCmds.graphicx_auto = r"""% Check output format
\ifx\pdftexversion\undefined
\usepackage{graphicx}
\else
\usepackage[pdftex]{graphicx}
\fi"""
PreambleCmds.highlight_rules = r"""% basic code highlight:
\providecommand*\DUrolecomment[1]{\textcolor[rgb]{0.40,0.40,0.40}{#1}}
\providecommand*\DUroledeleted[1]{\textcolor[rgb]{0.40,0.40,0.40}{#1}}
\providecommand*\DUrolekeyword[1]{\textbf{#1}}
\providecommand*\DUrolestring[1]{\textit{#1}}"""
PreambleCmds.inline = r"""
% inline markup (custom roles)
% \DUrole{#1}{#2} tries \DUrole#1{#2}
\providecommand*{\DUrole}[2]{%
\ifcsname DUrole#1\endcsname%
\csname DUrole#1\endcsname{#2}%
\else% backwards compatibility: try \docutilsrole#1{#2}
\ifcsname docutilsrole#1\endcsname%
\csname docutilsrole#1\endcsname{#2}%
\else%
#2%
\fi%
\fi%
}"""
PreambleCmds.legend = r"""
% legend environment
\ifthenelse{\isundefined{\DUlegend}}{
\newenvironment{DUlegend}{\small}{}
}{}"""
PreambleCmds.lineblock = r"""
% lineblock environment
\DUprovidelength{\DUlineblockindent}{2.5em}
\ifthenelse{\isundefined{\DUlineblock}}{
\newenvironment{DUlineblock}[1]{%
\list{}{\setlength{\partopsep}{\parskip}
\addtolength{\partopsep}{\baselineskip}
\setlength{\topsep}{0pt}
\setlength{\itemsep}{0.15\baselineskip}
\setlength{\parsep}{0pt}
\setlength{\leftmargin}{#1}}
\raggedright
}
{\endlist}
}{}"""
# PreambleCmds.lineblock._depends = 'providelength'
PreambleCmds.linking = r"""
%% hyperlinks:
\ifthenelse{\isundefined{\hypersetup}}{
\usepackage[%s]{hyperref}
\urlstyle{same} %% normal text font (alternatives: tt, rm, sf)
}{}"""
PreambleCmds.minitoc = r"""%% local table of contents
\usepackage{minitoc}"""
PreambleCmds.optionlist = r"""
% optionlist environment
\providecommand*{\DUoptionlistlabel}[1]{\bf #1 \hfill}
\DUprovidelength{\DUoptionlistindent}{3cm}
\ifthenelse{\isundefined{\DUoptionlist}}{
\newenvironment{DUoptionlist}{%
\list{}{\setlength{\labelwidth}{\DUoptionlistindent}
\setlength{\rightmargin}{1cm}
\setlength{\leftmargin}{\rightmargin}
\addtolength{\leftmargin}{\labelwidth}
\addtolength{\leftmargin}{\labelsep}
\renewcommand{\makelabel}{\DUoptionlistlabel}}
}
{\endlist}
}{}"""
# PreambleCmds.optionlist._depends = 'providelength'
PreambleCmds.providelength = r"""
% providelength (provide a length variable and set default, if it is new)
\providecommand*{\DUprovidelength}[2]{
\ifthenelse{\isundefined{#1}}{\newlength{#1}\setlength{#1}{#2}}{}
}"""
PreambleCmds.rubric = r"""
% rubric (informal heading)
\providecommand*{\DUrubric}[2][class-arg]{%
\subsubsection*{\centering\textit{\textmd{#2}}}}"""
PreambleCmds.sidebar = r"""
% sidebar (text outside the main text flow)
\providecommand{\DUsidebar}[2][class-arg]{%
\begin{center}
\colorbox[gray]{0.80}{\parbox{0.9\textwidth}{#2}}
\end{center}
}"""
PreambleCmds.subtitle = r"""
% subtitle (for topic/sidebar)
\providecommand*{\DUsubtitle}[2][class-arg]{\par\emph{#2}\smallskip}"""
PreambleCmds.documentsubtitle = r"""
% subtitle (in document title)
\providecommand*{\DUdocumentsubtitle}[1]{{\large #1}}"""
PreambleCmds.table = r"""\usepackage{longtable,ltcaption,array}
\setlength{\extrarowheight}{2pt}
\newlength{\DUtablewidth} % internal use in tables"""
# Options [force,almostfull] prevent spurious error messages, see
# de.comp.text.tex/2005-12/msg01855
PreambleCmds.textcomp = """\
\\usepackage{textcomp} % text symbol macros"""
PreambleCmds.titlereference = r"""
% titlereference role
\providecommand*{\DUroletitlereference}[1]{\textsl{#1}}"""
PreambleCmds.title = r"""
% title for topics, admonitions, unsupported section levels, and sidebar
\providecommand*{\DUtitle}[2][class-arg]{%
% call \DUtitle#1{#2} if it exists:
\ifcsname DUtitle#1\endcsname%
\csname DUtitle#1\endcsname{#2}%
\else
\smallskip\noindent\textbf{#2}\smallskip%
\fi
}"""
PreambleCmds.topic = r"""
% topic (quote with heading)
\providecommand{\DUtopic}[2][class-arg]{%
\ifcsname DUtopic#1\endcsname%
\csname DUtopic#1\endcsname{#2}%
\else
\begin{quote}#2\end{quote}
\fi
}"""
PreambleCmds.transition = r"""
% transition (break, fancybreak, anonymous section)
\providecommand*{\DUtransition}[1][class-arg]{%
\hspace*{\fill}\hrulefill\hspace*{\fill}
\vskip 0.5\baselineskip
}"""
# LaTeX encoding maps
# -------------------
# ::
class CharMaps(object):
"""LaTeX representations for active and Unicode characters."""
# characters that always need escaping:
special = {
ord('#'): ur'\#',
ord('$'): ur'\$',
ord('%'): ur'\%',
ord('&'): ur'\&',
ord('~'): ur'\textasciitilde{}',
ord('_'): ur'\_',
ord('^'): ur'\textasciicircum{}',
ord('\\'): ur'\textbackslash{}',
ord('{'): ur'\{',
ord('}'): ur'\}',
# straight double quotes are 'active' in many languages
ord('"'): ur'\textquotedbl{}',
# Square brackets are ordinary chars and cannot be escaped with '\',
# so we put them in a group '{[}'. (Alternative: ensure that all
# macros with optional arguments are terminated with {} and text
# inside any optional argument is put in a group ``[{text}]``).
# Commands with optional args inside an optional arg must be put in a
# group, e.g. ``\item[{\hyperref[label]{text}}]``.
ord('['): ur'{[}',
ord(']'): ur'{]}',
# the soft hyphen is unknown in 8-bit text
# and not properly handled by XeTeX
0x00AD: ur'\-', # SOFT HYPHEN
}
# Unicode chars that are not recognized by LaTeX's utf8 encoding
unsupported_unicode = {
0x00A0: ur'~', # NO-BREAK SPACE
# TODO: ensure white space also at the beginning of a line?
# 0x00A0: ur'\leavevmode\nobreak\vadjust{}~'
0x2008: ur'\,', # PUNCTUATION SPACE
0x2011: ur'\hbox{-}', # NON-BREAKING HYPHEN
0x202F: ur'\,', # NARROW NO-BREAK SPACE
0x21d4: ur'$\Leftrightarrow$',
# Docutils footnote symbols:
0x2660: ur'$\spadesuit$',
0x2663: ur'$\clubsuit$',
}
# Unicode chars that are recognized by LaTeX's utf8 encoding
utf8_supported_unicode = {
0x00AB: ur'\guillemotleft', # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bb: ur'\guillemotright', # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x200C: ur'\textcompwordmark', # ZERO WIDTH NON-JOINER
0x2013: ur'\textendash{}',
0x2014: ur'\textemdash{}',
0x2018: ur'\textquoteleft{}',
0x2019: ur'\textquoteright{}',
0x201A: ur'\quotesinglbase{}', # SINGLE LOW-9 QUOTATION MARK
0x201C: ur'\textquotedblleft{}',
0x201D: ur'\textquotedblright{}',
0x201E: ur'\quotedblbase{}', # DOUBLE LOW-9 QUOTATION MARK
0x2030: ur'\textperthousand{}', # PER MILLE SIGN
0x2031: ur'\textpertenthousand{}', # PER TEN THOUSAND SIGN
0x2039: ur'\guilsinglleft{}',
0x203A: ur'\guilsinglright{}',
0x2423: ur'\textvisiblespace{}', # OPEN BOX
0x2020: ur'\dag{}',
0x2021: ur'\ddag{}',
0x2026: ur'\dots{}',
0x2122: ur'\texttrademark{}',
}
# recognized with 'utf8', if textcomp is loaded
textcomp = {
# Latin-1 Supplement
0x00a2: ur'\textcent{}', # ¢ CENT SIGN
0x00a4: ur'\textcurrency{}', # ¤ CURRENCY SYMBOL
0x00a5: ur'\textyen{}', # ¥ YEN SIGN
0x00a6: ur'\textbrokenbar{}', # ¦ BROKEN BAR
0x00a7: ur'\textsection{}', # § SECTION SIGN
0x00a8: ur'\textasciidieresis{}', # ¨ DIAERESIS
0x00a9: ur'\textcopyright{}', # © COPYRIGHT SIGN
0x00aa: ur'\textordfeminine{}', # ª FEMININE ORDINAL INDICATOR
0x00ac: ur'\textlnot{}', # ¬ NOT SIGN
0x00ae: ur'\textregistered{}', # ® REGISTERED SIGN
0x00af: ur'\textasciimacron{}', # ¯ MACRON
0x00b0: ur'\textdegree{}', # ° DEGREE SIGN
0x00b1: ur'\textpm{}', # ± PLUS-MINUS SIGN
0x00b2: ur'\texttwosuperior{}', # ² SUPERSCRIPT TWO
0x00b3: ur'\textthreesuperior{}', # ³ SUPERSCRIPT THREE
0x00b4: ur'\textasciiacute{}', # ´ ACUTE ACCENT
0x00b5: ur'\textmu{}', # µ MICRO SIGN
0x00b6: ur'\textparagraph{}', # ¶ PILCROW SIGN # != \textpilcrow
0x00b9: ur'\textonesuperior{}', # ¹ SUPERSCRIPT ONE
0x00ba: ur'\textordmasculine{}', # º MASCULINE ORDINAL INDICATOR
0x00bc: ur'\textonequarter{}', # 1/4 FRACTION
0x00bd: ur'\textonehalf{}', # 1/2 FRACTION
0x00be: ur'\textthreequarters{}', # 3/4 FRACTION
0x00d7: ur'\texttimes{}', # × MULTIPLICATION SIGN
0x00f7: ur'\textdiv{}', # ÷ DIVISION SIGN
# others
0x0192: ur'\textflorin{}', # LATIN SMALL LETTER F WITH HOOK
0x02b9: ur'\textasciiacute{}', # MODIFIER LETTER PRIME
0x02ba: ur'\textacutedbl{}', # MODIFIER LETTER DOUBLE PRIME
0x2016: ur'\textbardbl{}', # DOUBLE VERTICAL LINE
0x2022: ur'\textbullet{}', # BULLET
0x2032: ur'\textasciiacute{}', # PRIME
0x2033: ur'\textacutedbl{}', # DOUBLE PRIME
0x2035: ur'\textasciigrave{}', # REVERSED PRIME
0x2036: ur'\textgravedbl{}', # REVERSED DOUBLE PRIME
0x203b: ur'\textreferencemark{}', # REFERENCE MARK
0x203d: ur'\textinterrobang{}', # INTERROBANG
0x2044: ur'\textfractionsolidus{}', # FRACTION SLASH
0x2045: ur'\textlquill{}', # LEFT SQUARE BRACKET WITH QUILL
0x2046: ur'\textrquill{}', # RIGHT SQUARE BRACKET WITH QUILL
0x2052: ur'\textdiscount{}', # COMMERCIAL MINUS SIGN
0x20a1: ur'\textcolonmonetary{}', # COLON SIGN
0x20a3: ur'\textfrenchfranc{}', # FRENCH FRANC SIGN
0x20a4: ur'\textlira{}', # LIRA SIGN
0x20a6: ur'\textnaira{}', # NAIRA SIGN
0x20a9: ur'\textwon{}', # WON SIGN
0x20ab: ur'\textdong{}', # DONG SIGN
0x20ac: ur'\texteuro{}', # EURO SIGN
0x20b1: ur'\textpeso{}', # PESO SIGN
0x20b2: ur'\textguarani{}', # GUARANI SIGN
0x2103: ur'\textcelsius{}', # DEGREE CELSIUS
0x2116: ur'\textnumero{}', # NUMERO SIGN
0x2117: ur'\textcircledP{}', # SOUND RECORDING COYRIGHT
0x211e: ur'\textrecipe{}', # PRESCRIPTION TAKE
0x2120: ur'\textservicemark{}', # SERVICE MARK
0x2122: ur'\texttrademark{}', # TRADE MARK SIGN
0x2126: ur'\textohm{}', # OHM SIGN
0x2127: ur'\textmho{}', # INVERTED OHM SIGN
0x212e: ur'\textestimated{}', # ESTIMATED SYMBOL
0x2190: ur'\textleftarrow{}', # LEFTWARDS ARROW
0x2191: ur'\textuparrow{}', # UPWARDS ARROW
0x2192: ur'\textrightarrow{}', # RIGHTWARDS ARROW
0x2193: ur'\textdownarrow{}', # DOWNWARDS ARROW
0x2212: ur'\textminus{}', # MINUS SIGN
0x2217: ur'\textasteriskcentered{}', # ASTERISK OPERATOR
0x221a: ur'\textsurd{}', # SQUARE ROOT
0x2422: ur'\textblank{}', # BLANK SYMBOL
0x25e6: ur'\textopenbullet{}', # WHITE BULLET
0x25ef: ur'\textbigcircle{}', # LARGE CIRCLE
0x266a: ur'\textmusicalnote{}', # EIGHTH NOTE
0x26ad: ur'\textmarried{}', # MARRIAGE SYMBOL
0x26ae: ur'\textdivorced{}', # DIVORCE SYMBOL
0x27e8: ur'\textlangle{}', # MATHEMATICAL LEFT ANGLE BRACKET
0x27e9: ur'\textrangle{}', # MATHEMATICAL RIGHT ANGLE BRACKET
}
# Unicode chars that require a feature/package to render
pifont = {
0x2665: ur'\ding{170}', # black heartsuit
0x2666: ur'\ding{169}', # black diamondsuit
0x2713: ur'\ding{51}', # check mark
0x2717: ur'\ding{55}', # check mark
}
# TODO: greek alphabet ... ?
# see also LaTeX codec
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124
# and unimap.py from TeXML
class DocumentClass(object):
"""Details of a LaTeX document class."""
def __init__(self, document_class, with_part=False):
self.document_class = document_class
self._with_part = with_part
self.sections = ['section', 'subsection', 'subsubsection',
'paragraph', 'subparagraph']
if self.document_class in ('book', 'memoir', 'report',
'scrbook', 'scrreprt'):
self.sections.insert(0, 'chapter')
if self._with_part:
self.sections.insert(0, 'part')
def section(self, level):
"""Return the LaTeX section name for section `level`.
The name depends on the specific document class.
Level is 1,2,3..., as level 0 is the title.
"""
if level <= len(self.sections):
return self.sections[level-1]
else: # unsupported levels
return 'DUtitle[section%s]' % roman.toRoman(level)
class Table(object):
"""Manage a table while traversing.
Maybe change to a mixin defining the visit/departs, but then
class Table internal variables are in the Translator.
Table style might be
:standard: horizontal and vertical lines
:booktabs: only horizontal lines (requires "booktabs" LaTeX package)
:borderless: no borders around table cells
:nolines: alias for borderless
"""
def __init__(self,translator,latex_type,table_style):
self._translator = translator
self._latex_type = latex_type
self._table_style = table_style
self._open = False
# miscellaneous attributes
self._attrs = {}
self._col_width = []
self._rowspan = []
self.stubs = []
self._in_thead = 0
def open(self):
self._open = True
self._col_specs = []
self.caption = []
self._attrs = {}
self._in_head = False # maybe context with search
def close(self):
self._open = False
self._col_specs = None
self.caption = []
self._attrs = {}
self.stubs = []
def is_open(self):
return self._open
def set_table_style(self, table_style):
if not table_style in ('standard','booktabs','borderless','nolines'):
return
self._table_style = table_style
def get_latex_type(self):
if self._latex_type == 'longtable' and not self.caption:
# do not advance the "table" counter (requires "ltcaption" package)
return('longtable*')
return self._latex_type
def set(self,attr,value):
self._attrs[attr] = value
def get(self,attr):
if attr in self._attrs:
return self._attrs[attr]
return None
def get_vertical_bar(self):
if self._table_style == 'standard':
return '|'
return ''
# horizontal lines are drawn below a row,
def get_opening(self):
return '\n'.join([r'\setlength{\DUtablewidth}{\linewidth}',
r'\begin{%s}[c]' % self.get_latex_type()])
def get_closing(self):
closing = []
if self._table_style == 'booktabs':
closing.append(r'\bottomrule')
# elif self._table_style == 'standard':
# closing.append(r'\hline')
closing.append(r'\end{%s}' % self.get_latex_type())
return '\n'.join(closing)
def visit_colspec(self, node):
self._col_specs.append(node)
# "stubs" list is an attribute of the tgroup element:
self.stubs.append(node.attributes.get('stub'))
def get_colspecs(self):
"""Return column specification for longtable.
Assumes reST line length being 80 characters.
Table width is hairy.
=== ===
ABC DEF
=== ===
usually gets to narrow, therefore we add 1 (fiddlefactor).
"""
width = 80
total_width = 0.0
# first see if we get too wide.
for node in self._col_specs:
colwidth = float(node['colwidth']+1) / width
total_width += colwidth
self._col_width = []
self._rowspan = []
# donot make it full linewidth
factor = 0.93
if total_width > 1.0:
factor /= total_width
bar = self.get_vertical_bar()
latex_table_spec = ''
for node in self._col_specs:
colwidth = factor * float(node['colwidth']+1) / width
self._col_width.append(colwidth+0.005)
self._rowspan.append(0)
latex_table_spec += '%sp{%.3f\\DUtablewidth}' % (bar, colwidth+0.005)
return latex_table_spec+bar
def get_column_width(self):
"""Return columnwidth for current cell (not multicell)."""
return '%.2f\\DUtablewidth' % self._col_width[self._cell_in_row-1]
def get_multicolumn_width(self, start, len_):
"""Return sum of columnwidths for multicell."""
mc_width = sum([width
for width in ([self._col_width[start + co - 1]
for co in range (len_)])])
return '%.2f\\DUtablewidth' % mc_width
def get_caption(self):
if not self.caption:
return ''
caption = ''.join(self.caption)
if 1 == self._translator.thead_depth():
return r'\caption{%s}\\' '\n' % caption
return r'\caption[]{%s (... continued)}\\' '\n' % caption
def need_recurse(self):
if self._latex_type == 'longtable':
return 1 == self._translator.thead_depth()
return 0
def visit_thead(self):
self._in_thead += 1
if self._table_style == 'standard':
return ['\\hline\n']
elif self._table_style == 'booktabs':
return ['\\toprule\n']
return []
def depart_thead(self):
a = []
#if self._table_style == 'standard':
# a.append('\\hline\n')
if self._table_style == 'booktabs':
a.append('\\midrule\n')
if self._latex_type == 'longtable':
if 1 == self._translator.thead_depth():
a.append('\\endfirsthead\n')
else:
a.append('\\endhead\n')
a.append(r'\multicolumn{%d}{c}' % len(self._col_specs) +
r'{\hfill ... continued on next page} \\')
a.append('\n\\endfoot\n\\endlastfoot\n')
# for longtable one could add firsthead, foot and lastfoot
self._in_thead -= 1
return a
def visit_row(self):
self._cell_in_row = 0
def depart_row(self):
res = [' \\\\\n']
self._cell_in_row = None # remove cell counter
for i in range(len(self._rowspan)):
if (self._rowspan[i]>0):
self._rowspan[i] -= 1
if self._table_style == 'standard':
rowspans = [i+1 for i in range(len(self._rowspan))
if (self._rowspan[i]<=0)]
if len(rowspans)==len(self._rowspan):
res.append('\\hline\n')
else:
cline = ''
rowspans.reverse()
# TODO merge clines
while True:
try:
c_start = rowspans.pop()
except:
break
cline += '\\cline{%d-%d}\n' % (c_start,c_start)
res.append(cline)
return res
def set_rowspan(self,cell,value):
try:
self._rowspan[cell] = value
except:
pass
def get_rowspan(self,cell):
try:
return self._rowspan[cell]
except:
return 0
def get_entry_number(self):
return self._cell_in_row
def visit_entry(self):
self._cell_in_row += 1
def is_stub_column(self):
if len(self.stubs) >= self._cell_in_row:
return self.stubs[self._cell_in_row-1]
return False
class LaTeXTranslator(nodes.NodeVisitor):
# When options are given to the documentclass, latex will pass them
# to other packages, as done with babel.
# Dummy settings might be taken from document settings
# Write code for typesetting with 8-bit tex/pdftex (vs. xetex/luatex) engine
# overwritten by the XeTeX writer
is_xetex = False
# Config setting defaults
# -----------------------
# TODO: use mixins for different implementations.
# list environment for docinfo. else tabularx
## use_optionlist_for_docinfo = False # TODO: NOT YET IN USE
# Use compound enumerations (1.A.1.)
compound_enumerators = False
# If using compound enumerations, include section information.
section_prefix_for_enumerators = False
# This is the character that separates the section ("." subsection ...)
# prefix from the regular list enumerator.
section_enumerator_separator = '-'
# Auxiliary variables
# -------------------
has_latex_toc = False # is there a toc in the doc? (needed by minitoc)
is_toc_list = False # is the current bullet_list a ToC?
section_level = 0
# Flags to encode():
# inside citation reference labels underscores dont need to be escaped
inside_citation_reference_label = False
verbatim = False # do not encode
insert_non_breaking_blanks = False # replace blanks by "~"
insert_newline = False # add latex newline commands
literal = False # literal text (block or inline)
def __init__(self, document, babel_class=Babel):
nodes.NodeVisitor.__init__(self, document)
# Reporter
# ~~~~~~~~
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
# ~~~~~~~~
self.settings = settings = document.settings
self.latex_encoding = self.to_latex_encoding(settings.output_encoding)
self.use_latex_toc = settings.use_latex_toc
self.use_latex_docinfo = settings.use_latex_docinfo
self._use_latex_citations = settings.use_latex_citations
self._reference_label = settings.reference_label
self.hyperlink_color = settings.hyperlink_color
self.compound_enumerators = settings.compound_enumerators
self.font_encoding = getattr(settings, 'font_encoding', '')
self.section_prefix_for_enumerators = (
settings.section_prefix_for_enumerators)
self.section_enumerator_separator = (
settings.section_enumerator_separator.replace('_', r'\_'))
# literal blocks:
self.literal_block_env = ''
self.literal_block_options = ''
if settings.literal_block_env != '':
(none,
self.literal_block_env,
self.literal_block_options,
none ) = re.split('(\w+)(.*)', settings.literal_block_env)
elif settings.use_verbatim_when_possible:
self.literal_block_env = 'verbatim'
#
if self.settings.use_bibtex:
self.bibtex = self.settings.use_bibtex.split(',',1)
# TODO avoid errors on not declared citations.
else:
self.bibtex = None
# language module for Docutils-generated text
# (labels, bibliographic_fields, and author_separators)
self.language_module = languages.get_language(settings.language_code,
document.reporter)
self.babel = babel_class(settings.language_code, document.reporter)
self.author_separator = self.language_module.author_separators[0]
d_options = [self.settings.documentoptions]
if self.babel.language not in ('english', ''):
d_options.append(self.babel.language)
self.documentoptions = ','.join(filter(None, d_options))
self.d_class = DocumentClass(settings.documentclass,
settings.use_part_section)
# graphic package options:
if self.settings.graphicx_option == '':
self.graphicx_package = r'\usepackage{graphicx}'
elif self.settings.graphicx_option.lower() == 'auto':
self.graphicx_package = PreambleCmds.graphicx_auto
else:
self.graphicx_package = (r'\usepackage[%s]{graphicx}' %
self.settings.graphicx_option)
# footnotes:
self.docutils_footnotes = settings.docutils_footnotes
if settings.use_latex_footnotes:
self.docutils_footnotes = True
self.warn('`use_latex_footnotes` is deprecated. '
'The setting has been renamed to `docutils_footnotes` '
'and the alias will be removed in a future version.')
self.figure_footnotes = settings.figure_footnotes
if self.figure_footnotes:
self.docutils_footnotes = True
self.warn('The "figure footnotes" workaround/setting is strongly '
'deprecated and will be removed in a future version.')
# Output collection stacks
# ~~~~~~~~~~~~~~~~~~~~~~~~
# Document parts
self.head_prefix = [r'\documentclass[%s]{%s}' %
(self.documentoptions, self.settings.documentclass)]
self.requirements = SortableDict() # made a list in depart_document()
self.requirements['__static'] = r'\usepackage{ifthen}'
self.latex_preamble = [settings.latex_preamble]
self.fallbacks = SortableDict() # made a list in depart_document()
self.pdfsetup = [] # PDF properties (hyperref package)
self.title = []
self.subtitle = []
self.titledata = [] # \title, \author, \date
## self.body_prefix = ['\\begin{document}\n']
self.body_pre_docinfo = [] # \maketitle
self.docinfo = []
self.dedication = []
self.abstract = []
self.body = []
## self.body_suffix = ['\\end{document}\n']
# A heterogenous stack used in conjunction with the tree traversal.
# Make sure that the pops correspond to the pushes:
self.context = []
# Title metadata:
self.title_labels = []
self.subtitle_labels = []
# (if use_latex_docinfo: collects lists of
# author/organization/contact/address lines)
self.author_stack = []
self.date = []
# PDF properties: pdftitle, pdfauthor
# TODO?: pdfcreator, pdfproducer, pdfsubject, pdfkeywords
self.pdfinfo = []
self.pdfauthor = []
# Stack of section counters so that we don't have to use_latex_toc.
# This will grow and shrink as processing occurs.
# Initialized for potential first-level sections.
self._section_number = [0]
# The current stack of enumerations so that we can expand
# them into a compound enumeration.
self._enumeration_counters = []
# The maximum number of enumeration counters we've used.
# If we go beyond this number, we need to create a new
# counter; otherwise, just reuse an old one.
self._max_enumeration_counters = 0
self._bibitems = []
# object for a table while proccessing.
self.table_stack = []
self.active_table = Table(self, 'longtable', settings.table_style)
# Where to collect the output of visitor methods (default: body)
self.out = self.body
self.out_stack = [] # stack of output collectors
# Process settings
# ~~~~~~~~~~~~~~~~
# Encodings:
# Docutils' output-encoding => TeX input encoding
if self.latex_encoding != 'ascii':
self.requirements['_inputenc'] = (r'\usepackage[%s]{inputenc}'
% self.latex_encoding)
# TeX font encoding
if not self.is_xetex:
if self.font_encoding:
self.requirements['_fontenc'] = (r'\usepackage[%s]{fontenc}' %
self.font_encoding)
# ensure \textquotedbl is defined:
for enc in self.font_encoding.split(','):
enc = enc.strip()
if enc == 'OT1':
self.requirements['_textquotedblOT1'] = (
r'\DeclareTextSymbol{\textquotedbl}{OT1}{`\"}')
elif enc not in ('T1', 'T2A', 'T2B', 'T2C', 'T4', 'T5'):
self.requirements['_textquotedbl'] = (
r'\DeclareTextSymbolDefault{\textquotedbl}{T1}')
# page layout with typearea (if there are relevant document options)
if (settings.documentclass.find('scr') == -1 and
(self.documentoptions.find('DIV') != -1 or
self.documentoptions.find('BCOR') != -1)):
self.requirements['typearea'] = r'\usepackage{typearea}'
# Stylesheets
# (the name `self.stylesheet` is singular because only one
# stylesheet was supported before Docutils 0.6).
self.stylesheet = [self.stylesheet_call(path)
for path in utils.get_stylesheet_list(settings)]
# PDF setup
if self.hyperlink_color in ('0', 'false', 'False', ''):
self.hyperref_options = ''
else:
self.hyperref_options = 'colorlinks=true,linkcolor=%s,urlcolor=%s' % (
self.hyperlink_color, self.hyperlink_color)
if settings.hyperref_options:
self.hyperref_options += ',' + settings.hyperref_options
# LaTeX Toc
# include all supported sections in toc and PDF bookmarks
# (or use documentclass-default (as currently))?
## if self.use_latex_toc:
## self.requirements['tocdepth'] = (r'\setcounter{tocdepth}{%d}' %
## len(self.d_class.sections))
# Section numbering
if settings.sectnum_xform: # section numbering by Docutils
PreambleCmds.secnumdepth = r'\setcounter{secnumdepth}{0}'
else: # section numbering by LaTeX:
secnumdepth = settings.sectnum_depth
# Possible values of settings.sectnum_depth:
# None "sectnum" directive without depth arg -> LaTeX default
# 0 no "sectnum" directive -> no section numbers
# >0 value of "depth" argument -> translate to LaTeX levels:
# -1 part (0 with "article" document class)
# 0 chapter (missing in "article" document class)
# 1 section
# 2 subsection
# 3 subsubsection
# 4 paragraph
# 5 subparagraph
if secnumdepth is not None:
# limit to supported levels
secnumdepth = min(secnumdepth, len(self.d_class.sections))
# adjust to document class and use_part_section settings
if 'chapter' in self.d_class.sections:
secnumdepth -= 1
if self.d_class.sections[0] == 'part':
secnumdepth -= 1
PreambleCmds.secnumdepth = \
r'\setcounter{secnumdepth}{%d}' % secnumdepth
# start with specified number:
if (hasattr(settings, 'sectnum_start') and
settings.sectnum_start != 1):
self.requirements['sectnum_start'] = (
r'\setcounter{%s}{%d}' % (self.d_class.sections[0],
settings.sectnum_start-1))
# TODO: currently ignored (configure in a stylesheet):
## settings.sectnum_prefix
## settings.sectnum_suffix
# Auxiliary Methods
# -----------------
def stylesheet_call(self, path):
"""Return code to reference or embed stylesheet file `path`"""
# is it a package (no extension or *.sty) or "normal" tex code:
(base, ext) = os.path.splitext(path)
is_package = ext in ['.sty', '']
# Embed content of style file:
if self.settings.embed_stylesheet:
if is_package:
path = base + '.sty' # ensure extension
try:
content = io.FileInput(source_path=path,
encoding='utf-8').read()
self.settings.record_dependencies.add(path)
except IOError, err:
msg = u"Cannot embed stylesheet '%s':\n %s." % (
path, SafeString(err.strerror))
self.document.reporter.error(msg)
return '% ' + msg.replace('\n', '\n% ')
if is_package:
content = '\n'.join([r'\makeatletter',
content,
r'\makeatother'])
return '%% embedded stylesheet: %s\n%s' % (path, content)
# Link to style file:
if is_package:
path = base # drop extension
cmd = r'\usepackage{%s}'
else:
cmd = r'\input{%s}'
if self.settings.stylesheet_path:
# adapt path relative to output (cf. config.html#stylesheet-path)
path = utils.relative_path(self.settings._destination, path)
return cmd % path
def to_latex_encoding(self,docutils_encoding):
"""Translate docutils encoding name into LaTeX's.
Default method is remove "-" and "_" chars from docutils_encoding.
"""
tr = { 'iso-8859-1': 'latin1', # west european
'iso-8859-2': 'latin2', # east european
'iso-8859-3': 'latin3', # esperanto, maltese
'iso-8859-4': 'latin4', # north european, scandinavian, baltic
'iso-8859-5': 'iso88595', # cyrillic (ISO)
'iso-8859-9': 'latin5', # turkish
'iso-8859-15': 'latin9', # latin9, update to latin1.
'mac_cyrillic': 'maccyr', # cyrillic (on Mac)
'windows-1251': 'cp1251', # cyrillic (on Windows)
'koi8-r': 'koi8-r', # cyrillic (Russian)
'koi8-u': 'koi8-u', # cyrillic (Ukrainian)
'windows-1250': 'cp1250', #
'windows-1252': 'cp1252', #
'us-ascii': 'ascii', # ASCII (US)
# unmatched encodings
#'': 'applemac',
#'': 'ansinew', # windows 3.1 ansi
#'': 'ascii', # ASCII encoding for the range 32--127.
#'': 'cp437', # dos latin us
#'': 'cp850', # dos latin 1
#'': 'cp852', # dos latin 2
#'': 'decmulti',
#'': 'latin10',
#'iso-8859-6': '' # arabic
#'iso-8859-7': '' # greek
#'iso-8859-8': '' # hebrew
#'iso-8859-10': '' # latin6, more complete iso-8859-4
}
encoding = docutils_encoding.lower()
if encoding in tr:
return tr[encoding]
# drop hyphen or low-line from "latin-1", "latin_1", "utf-8" and similar
encoding = encoding.replace('_', '').replace('-', '')
# strip the error handler
return encoding.split(':')[0]
def language_label(self, docutil_label):
return self.language_module.labels[docutil_label]
def encode(self, text):
"""Return text with 'problematic' characters escaped.
* Escape the ten special printing characters ``# $ % & ~ _ ^ \ { }``,
square brackets ``[ ]``, double quotes and (in OT1) ``< | >``.
* Translate non-supported Unicode characters.
* Separate ``-`` (and more in literal text) to prevent input ligatures.
"""
if self.verbatim:
return text
# Set up the translation table:
table = CharMaps.special.copy()
# keep the underscore in citation references
if self.inside_citation_reference_label:
del(table[ord('_')])
# Workarounds for OT1 font-encoding
if self.font_encoding in ['OT1', ''] and not self.is_xetex:
# * out-of-order characters in cmtt
if self.literal:
# replace underscore by underlined blank,
# because this has correct width.
table[ord('_')] = u'\\underline{~}'
# the backslash doesn't work, so we use a mirrored slash.
# \reflectbox is provided by graphicx:
self.requirements['graphicx'] = self.graphicx_package
table[ord('\\')] = ur'\reflectbox{/}'
# * ``< | >`` come out as different chars (except for cmtt):
else:
table[ord('|')] = ur'\textbar{}'
table[ord('<')] = ur'\textless{}'
table[ord('>')] = ur'\textgreater{}'
if self.insert_non_breaking_blanks:
table[ord(' ')] = ur'~'
# Unicode replacements for 8-bit tex engines (not required with XeTeX/LuaTeX):
if not self.is_xetex:
table.update(CharMaps.unsupported_unicode)
if not self.latex_encoding.startswith('utf8'):
table.update(CharMaps.utf8_supported_unicode)
table.update(CharMaps.textcomp)
table.update(CharMaps.pifont)
# Characters that require a feature/package to render
if [True for ch in text if ord(ch) in CharMaps.textcomp]:
self.requirements['textcomp'] = PreambleCmds.textcomp
if [True for ch in text if ord(ch) in CharMaps.pifont]:
self.requirements['pifont'] = '\\usepackage{pifont}'
text = text.translate(table)
# Break up input ligatures e.g. '--' to '-{}-'.
if not self.is_xetex: # Not required with xetex/luatex
separate_chars = '-'
# In monospace-font, we also separate ',,', '``' and "''" and some
# other characters which can't occur in non-literal text.
if self.literal:
separate_chars += ',`\'"<>'
for char in separate_chars * 2:
# Do it twice ("* 2") because otherwise we would replace
# '---' by '-{}--'.
text = text.replace(char + char, char + '{}' + char)
# Literal line breaks (in address or literal blocks):
if self.insert_newline:
lines = text.split('\n')
# Add a protected space to blank lines (except the last)
# to avoid ``! LaTeX Error: There's no line here to end.``
for i, line in enumerate(lines[:-1]):
if not line.lstrip():
lines[i] += '~'
text = (r'\\' + '\n').join(lines)
if self.literal and not self.insert_non_breaking_blanks:
# preserve runs of spaces but allow wrapping
text = text.replace(' ', ' ~')
return text
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, encode, and return attribute value text."""
return self.encode(whitespace.sub(' ', text))
# TODO: is this used anywhere? -> update (use template) or delete
## def astext(self):
## """Assemble document parts and return as string."""
## head = '\n'.join(self.head_prefix + self.stylesheet + self.head)
## body = ''.join(self.body_prefix + self.body + self.body_suffix)
## return head + '\n' + body
def is_inline(self, node):
"""Check whether a node represents an inline or block-level element"""
return isinstance(node.parent, nodes.TextElement)
def append_hypertargets(self, node):
"""Append hypertargets for all ids of `node`"""
# hypertarget places the anchor at the target's baseline,
# so we raise it explicitely
self.out.append('%\n'.join(['\\raisebox{1em}{\\hypertarget{%s}{}}' %
id for id in node['ids']]))
def ids_to_labels(self, node, set_anchor=True):
"""Return list of label definitions for all ids of `node`
If `set_anchor` is True, an anchor is set with \phantomsection.
"""
labels = ['\\label{%s}' % id for id in node.get('ids', [])]
if set_anchor and labels:
labels.insert(0, '\\phantomsection')
return labels
def push_output_collector(self, new_out):
self.out_stack.append(self.out)
self.out = new_out
def pop_output_collector(self):
self.out = self.out_stack.pop()
# Visitor methods
# ---------------
def visit_Text(self, node):
self.out.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_abbreviation(self, node):
node['classes'].insert(0, 'abbreviation')
self.visit_inline(node)
def depart_abbreviation(self, node):
self.depart_inline(node)
def visit_acronym(self, node):
node['classes'].insert(0, 'acronym')
self.visit_inline(node)
def depart_acronym(self, node):
self.depart_inline(node)
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
self.depart_docinfo_item(node)
def visit_admonition(self, node):
self.fallbacks['admonition'] = PreambleCmds.admonition
if 'error' in node['classes']:
self.fallbacks['error'] = PreambleCmds.error
# strip the generic 'admonition' from the list of classes
node['classes'] = [cls for cls in node['classes']
if cls != 'admonition']
self.out.append('\n\\DUadmonition[%s]{\n' % ','.join(node['classes']))
def depart_admonition(self, node=None):
self.out.append('}\n')
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
self.depart_docinfo_item(node)
def visit_authors(self, node):
# not used: visit_author is called anyway for each author.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
self.out.append( '%\n\\begin{quote}\n')
if node['classes']:
self.visit_inline(node)
def depart_block_quote(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append( '\n\\end{quote}\n')
def visit_bullet_list(self, node):
if self.is_toc_list:
self.out.append( '%\n\\begin{list}{}{}\n' )
else:
self.out.append( '%\n\\begin{itemize}\n' )
# if node['classes']:
# self.visit_inline(node)
def depart_bullet_list(self, node):
# if node['classes']:
# self.depart_inline(node)
if self.is_toc_list:
self.out.append( '\n\\end{list}\n' )
else:
self.out.append( '\n\\end{itemize}\n' )
def visit_superscript(self, node):
self.out.append(r'\textsuperscript{')
if node['classes']:
self.visit_inline(node)
def depart_superscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_subscript(self, node):
self.out.append(r'\textsubscript{') # requires `fixltx2e`
if node['classes']:
self.visit_inline(node)
def depart_subscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_caption(self, node):
self.out.append('\n\\caption{')
def depart_caption(self, node):
self.out.append('}\n')
def visit_title_reference(self, node):
self.fallbacks['titlereference'] = PreambleCmds.titlereference
self.out.append(r'\DUroletitlereference{')
if node['classes']:
self.visit_inline(node)
def depart_title_reference(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append( '}' )
def visit_citation(self, node):
# TODO maybe use cite bibitems
if self._use_latex_citations:
self.push_output_collector([])
else:
# TODO: do we need these?
## self.requirements['~fnt_floats'] = PreambleCmds.footnote_floats
self.out.append(r'\begin{figure}[b]')
self.append_hypertargets(node)
def depart_citation(self, node):
if self._use_latex_citations:
label = self.out[0]
text = ''.join(self.out[1:])
self._bibitems.append([label, text])
self.pop_output_collector()
else:
self.out.append('\\end{figure}\n')
def visit_citation_reference(self, node):
if self._use_latex_citations:
if not self.inside_citation_reference_label:
self.out.append(r'\cite{')
self.inside_citation_reference_label = 1
else:
assert self.body[-1] in (' ', '\n'),\
'unexpected non-whitespace while in reference label'
del self.body[-1]
else:
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
self.out.append('\\hyperlink{%s}{[' % href)
def depart_citation_reference(self, node):
if self._use_latex_citations:
followup_citation = False
# check for a following citation separated by a space or newline
next_siblings = node.traverse(descend=False, siblings=True,
include_self=False)
if len(next_siblings) > 1:
next = next_siblings[0]
if (isinstance(next, nodes.Text) and
next.astext() in (' ', '\n')):
if next_siblings[1].__class__ == node.__class__:
followup_citation = True
if followup_citation:
self.out.append(',')
else:
self.out.append('}')
self.inside_citation_reference_label = False
else:
self.out.append(']}')
def visit_classifier(self, node):
self.out.append( '(\\textbf{' )
def depart_classifier(self, node):
self.out.append( '})\n' )
def visit_colspec(self, node):
self.active_table.visit_colspec(node)
def depart_colspec(self, node):
pass
def visit_comment(self, node):
# Precede every line with a comment sign, wrap in newlines
self.out.append('\n%% %s\n' % node.astext().replace('\n', '\n% '))
raise nodes.SkipNode
def depart_comment(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
def depart_contact(self, node):
self.depart_docinfo_item(node)
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item(node)
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item(node)
def visit_decoration(self, node):
# header and footer
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
self.out.append('\n')
def visit_definition_list(self, node):
self.out.append( '%\n\\begin{description}\n' )
def depart_definition_list(self, node):
self.out.append( '\\end{description}\n' )
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.out.append(' ')
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self.push_output_collector(self.docinfo)
def depart_docinfo(self, node):
self.pop_output_collector()
# Some itmes (e.g. author) end up at other places
if self.docinfo:
# tabularx: automatic width of columns, no page breaks allowed.
self.requirements['tabularx'] = r'\usepackage{tabularx}'
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['docinfo'] = PreambleCmds.docinfo
#
self.docinfo.insert(0, '\n% Docinfo\n'
'\\begin{center}\n'
'\\begin{tabularx}{\\DUdocinfowidth}{lX}\n')
self.docinfo.append('\\end{tabularx}\n'
'\\end{center}\n')
def visit_docinfo_item(self, node, name):
if name == 'author':
self.pdfauthor.append(self.attval(node.astext()))
if self.use_latex_docinfo:
if name in ('author', 'organization', 'contact', 'address'):
# We attach these to the last author. If any of them precedes
# the first author, put them in a separate "author" group
# (in lack of better semantics).
if name == 'author' or not self.author_stack:
self.author_stack.append([])
if name == 'address': # newlines are meaningful
self.insert_newline = True
text = self.encode(node.astext())
self.insert_newline = False
else:
text = self.attval(node.astext())
self.author_stack[-1].append(text)
raise nodes.SkipNode
elif name == 'date':
self.date.append(self.attval(node.astext()))
raise nodes.SkipNode
self.out.append('\\textbf{%s}: &\n\t' % self.language_label(name))
if name == 'address':
self.insert_newline = 1
self.out.append('{\\raggedright\n')
self.context.append(' } \\\\\n')
else:
self.context.append(' \\\\\n')
def depart_docinfo_item(self, node):
self.out.append(self.context.pop())
# for address we did set insert_newline
self.insert_newline = False
def visit_doctest_block(self, node):
self.visit_literal_block(node)
def depart_doctest_block(self, node):
self.depart_literal_block(node)
def visit_document(self, node):
# titled document?
if (self.use_latex_docinfo or len(node) and
isinstance(node[0], nodes.title)):
self.title_labels += self.ids_to_labels(node, set_anchor=False)
def depart_document(self, node):
# Complete header with information gained from walkabout
# * language setup
if (self.babel.otherlanguages or
self.babel.language not in ('', 'english')):
self.requirements['babel'] = self.babel()
# * conditional requirements (before style sheet)
self.requirements = self.requirements.sortedvalues()
# * coditional fallback definitions (after style sheet)
self.fallbacks = self.fallbacks.sortedvalues()
# * PDF properties
self.pdfsetup.append(PreambleCmds.linking % self.hyperref_options)
if self.pdfauthor:
authors = self.author_separator.join(self.pdfauthor)
self.pdfinfo.append(' pdfauthor={%s}' % authors)
if self.pdfinfo:
self.pdfsetup += [r'\hypersetup{'] + self.pdfinfo + ['}']
# Complete body
# * document title (with "use_latex_docinfo" also
# 'author', 'organization', 'contact', 'address' and 'date')
if self.title or (
self.use_latex_docinfo and (self.author_stack or self.date)):
# with the default template, titledata is written to the preamble
self.titledata.append('%%% Title Data')
# \title (empty \title prevents error with \maketitle)
if self.title:
self.title.insert(0, '\phantomsection%\n ')
title = [''.join(self.title)] + self.title_labels
if self.subtitle:
title += [r'\\ % subtitle',
r'\DUdocumentsubtitle{%s}' % ''.join(self.subtitle)
] + self.subtitle_labels
self.titledata.append(r'\title{%s}' % '%\n '.join(title))
# \author (empty \author prevents warning with \maketitle)
authors = ['\\\\\n'.join(author_entry)
for author_entry in self.author_stack]
self.titledata.append(r'\author{%s}' %
' \\and\n'.join(authors))
# \date (empty \date prevents defaulting to \today)
self.titledata.append(r'\date{%s}' % ', '.join(self.date))
# \maketitle in the body formats title with LaTeX
self.body_pre_docinfo.append('\\maketitle\n')
# * bibliography
# TODO insertion point of bibliography should be configurable.
if self._use_latex_citations and len(self._bibitems)>0:
if not self.bibtex:
widest_label = ''
for bi in self._bibitems:
if len(widest_label)<len(bi[0]):
widest_label = bi[0]
self.out.append('\n\\begin{thebibliography}{%s}\n' %
widest_label)
for bi in self._bibitems:
# cite_key: underscores must not be escaped
cite_key = bi[0].replace(r'\_','_')
self.out.append('\\bibitem[%s]{%s}{%s}\n' %
(bi[0], cite_key, bi[1]))
self.out.append('\\end{thebibliography}\n')
else:
self.out.append('\n\\bibliographystyle{%s}\n' %
self.bibtex[0])
self.out.append('\\bibliography{%s}\n' % self.bibtex[1])
# * make sure to generate a toc file if needed for local contents:
if 'minitoc' in self.requirements and not self.has_latex_toc:
self.out.append('\n\\faketableofcontents % for local ToCs\n')
def visit_emphasis(self, node):
self.out.append('\\emph{')
if node['classes']:
self.visit_inline(node)
def depart_emphasis(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_entry(self, node):
self.active_table.visit_entry()
# cell separation
# BUG: the following fails, with more than one multirow
# starting in the second column (or later) see
# ../../../test/functional/input/data/latex.txt
if self.active_table.get_entry_number() == 1:
# if the first row is a multirow, this actually is the second row.
# this gets hairy if rowspans follow each other.
if self.active_table.get_rowspan(0):
count = 0
while self.active_table.get_rowspan(count):
count += 1
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
else:
self.out.append(' & ')
# multirow, multicolumn
# IN WORK BUG TODO HACK continues here
# multirow in LaTeX simply will enlarge the cell over several rows
# (the following n if n is positive, the former if negative).
if 'morerows' in node and 'morecols' in node:
raise NotImplementedError('Cells that '
'span multiple rows *and* columns are not supported, sorry.')
if 'morerows' in node:
self.requirements['multirow'] = r'\usepackage{multirow}'
count = node['morerows'] + 1
self.active_table.set_rowspan(
self.active_table.get_entry_number()-1,count)
# TODO why does multirow end on % ? needs to be checked for below
self.out.append('\\multirow{%d}{%s}{%%' %
(count,self.active_table.get_column_width()))
self.context.append('}')
elif 'morecols' in node:
# the vertical bar before column is missing if it is the first
# column. the one after always.
if self.active_table.get_entry_number() == 1:
bar1 = self.active_table.get_vertical_bar()
else:
bar1 = ''
count = node['morecols'] + 1
self.out.append('\\multicolumn{%d}{%sp{%s}%s}{' %
(count, bar1,
self.active_table.get_multicolumn_width(
self.active_table.get_entry_number(),
count),
self.active_table.get_vertical_bar()))
self.context.append('}')
else:
self.context.append('')
# header / not header
if isinstance(node.parent.parent, nodes.thead):
if self.out[-1].endswith("%"):
self.out.append("\n")
self.out.append('\\textbf{%')
self.context.append('}')
elif self.active_table.is_stub_column():
if self.out[-1].endswith("%"):
self.out.append("\n")
self.out.append('\\textbf{')
self.context.append('}')
else:
self.context.append('')
def depart_entry(self, node):
self.out.append(self.context.pop()) # header / not header
self.out.append(self.context.pop()) # multirow/column
# if following row is spanned from above.
if self.active_table.get_rowspan(self.active_table.get_entry_number()):
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
def visit_row(self, node):
self.active_table.visit_row()
def depart_row(self, node):
self.out.extend(self.active_table.depart_row())
def visit_enumerated_list(self, node):
# We create our own enumeration list environment.
# This allows to set the style and starting value
# and unlimited nesting.
enum_style = {'arabic':'arabic',
'loweralpha':'alph',
'upperalpha':'Alph',
'lowerroman':'roman',
'upperroman':'Roman' }
enum_suffix = ''
if 'suffix' in node:
enum_suffix = node['suffix']
enum_prefix = ''
if 'prefix' in node:
enum_prefix = node['prefix']
if self.compound_enumerators:
pref = ''
if self.section_prefix_for_enumerators and self.section_level:
for i in range(self.section_level):
pref += '%d.' % self._section_number[i]
pref = pref[:-1] + self.section_enumerator_separator
enum_prefix += pref
for ctype, cname in self._enumeration_counters:
enum_prefix += '\\%s{%s}.' % (ctype, cname)
enum_type = 'arabic'
if 'enumtype' in node:
enum_type = node['enumtype']
if enum_type in enum_style:
enum_type = enum_style[enum_type]
counter_name = 'listcnt%d' % len(self._enumeration_counters)
self._enumeration_counters.append((enum_type, counter_name))
# If we haven't used this counter name before, then create a
# new counter; otherwise, reset & reuse the old counter.
if len(self._enumeration_counters) > self._max_enumeration_counters:
self._max_enumeration_counters = len(self._enumeration_counters)
self.out.append('\\newcounter{%s}\n' % counter_name)
else:
self.out.append('\\setcounter{%s}{0}\n' % counter_name)
self.out.append('\\begin{list}{%s\\%s{%s}%s}\n' %
(enum_prefix,enum_type,counter_name,enum_suffix))
self.out.append('{\n')
self.out.append('\\usecounter{%s}\n' % counter_name)
# set start after usecounter, because it initializes to zero.
if 'start' in node:
self.out.append('\\addtocounter{%s}{%d}\n' %
(counter_name,node['start']-1))
## set rightmargin equal to leftmargin
self.out.append('\\setlength{\\rightmargin}{\\leftmargin}\n')
self.out.append('}\n')
def depart_enumerated_list(self, node):
self.out.append('\\end{list}\n')
self._enumeration_counters.pop()
def visit_field(self, node):
# real output is done in siblings: _argument, _body, _name
pass
def depart_field(self, node):
self.out.append('\n')
##self.out.append('%[depart_field]\n')
def visit_field_argument(self, node):
self.out.append('%[visit_field_argument]\n')
def depart_field_argument(self, node):
self.out.append('%[depart_field_argument]\n')
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
if self.out is self.docinfo:
self.out.append(r'\\')
def visit_field_list(self, node):
if self.out is not self.docinfo:
self.fallbacks['fieldlist'] = PreambleCmds.fieldlist
self.out.append('%\n\\begin{DUfieldlist}\n')
def depart_field_list(self, node):
if self.out is not self.docinfo:
self.out.append('\\end{DUfieldlist}\n')
def visit_field_name(self, node):
if self.out is self.docinfo:
self.out.append('\\textbf{')
else:
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_field_name(self, node):
if self.out is self.docinfo:
self.out.append('}: &')
else:
self.out.append(':}]')
def visit_figure(self, node):
self.requirements['float_settings'] = PreambleCmds.float_settings
# The 'align' attribute sets the "outer alignment",
# for "inner alignment" use LaTeX default alignment (similar to HTML)
alignment = node.attributes.get('align', 'center')
if alignment != 'center':
# The LaTeX "figure" environment always uses the full textwidth,
# so "outer alignment" is ignored. Just write a comment.
# TODO: use the wrapfigure environment?
self.out.append('\n\\begin{figure} %% align = "%s"\n' % alignment)
else:
self.out.append('\n\\begin{figure}\n')
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def depart_figure(self, node):
self.out.append('\\end{figure}\n')
def visit_footer(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUfooter}{')
def depart_footer(self, node):
self.out.append('}')
self.requirements['~footer'] = ''.join(self.out)
self.pop_output_collector()
def visit_footnote(self, node):
try:
backref = node['backrefs'][0]
except IndexError:
backref = node['ids'][0] # no backref, use self-ref instead
if self.settings.figure_footnotes:
self.requirements['~fnt_floats'] = PreambleCmds.footnote_floats
self.out.append('\\begin{figure}[b]')
self.append_hypertargets(node)
if node.get('id') == node.get('name'): # explicite label
self.out += self.ids_to_labels(node)
elif self.docutils_footnotes:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
num,text = node.astext().split(None,1)
if self.settings.footnote_references == 'brackets':
num = '[%s]' % num
self.out.append('%%\n\\DUfootnotetext{%s}{%s}{%s}{' %
(node['ids'][0], backref, self.encode(num)))
if node['ids'] == node['names']:
self.out += self.ids_to_labels(node)
# mask newline to prevent spurious whitespace:
self.out.append('%')
## else: # TODO: "real" LaTeX \footnote{}s
def depart_footnote(self, node):
if self.figure_footnotes:
self.out.append('\\end{figure}\n')
else:
self.out.append('}\n')
def visit_footnote_reference(self, node):
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
# if not self.docutils_footnotes:
# TODO: insert footnote content at (or near) this place
# print "footnote-ref to", node['refid']
# footnotes = (self.document.footnotes +
# self.document.autofootnotes +
# self.document.symbol_footnotes)
# for footnote in footnotes:
# # print footnote['ids']
# if node.get('refid', '') in footnote['ids']:
# print 'matches', footnote['ids']
format = self.settings.footnote_references
if format == 'brackets':
self.append_hypertargets(node)
self.out.append('\\hyperlink{%s}{[' % href)
self.context.append(']}')
else:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
self.out.append(r'\DUfootnotemark{%s}{%s}{' %
(node['ids'][0], href))
self.context.append('}')
def depart_footnote_reference(self, node):
self.out.append(self.context.pop())
# footnote/citation label
def label_delim(self, node, bracket, superscript):
if isinstance(node.parent, nodes.footnote):
if not self.figure_footnotes:
raise nodes.SkipNode
if self.settings.footnote_references == 'brackets':
self.out.append(bracket)
else:
self.out.append(superscript)
else:
assert isinstance(node.parent, nodes.citation)
if not self._use_latex_citations:
self.out.append(bracket)
def visit_label(self, node):
"""footnote or citation label: in brackets or as superscript"""
self.label_delim(node, '[', '\\textsuperscript{')
def depart_label(self, node):
self.label_delim(node, ']', '}')
# elements generated by the framework e.g. section numbers.
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUheader}{')
def depart_header(self, node):
self.out.append('}')
self.requirements['~header'] = ''.join(self.out)
self.pop_output_collector()
def to_latex_length(self, length_str, pxunit=None):
"""Convert `length_str` with rst lenght to LaTeX length
"""
if pxunit is not None:
sys.stderr.write('deprecation warning: LaTeXTranslator.to_latex_length()'
' option `pxunit` will be removed.')
match = re.match('(\d*\.?\d*)\s*(\S*)', length_str)
if not match:
return length_str
value, unit = match.groups()[:2]
# no unit or "DTP" points (called 'bp' in TeX):
if unit in ('', 'pt'):
length_str = '%sbp' % value
# percentage: relate to current line width
elif unit == '%':
length_str = '%.3f\\linewidth' % (float(value)/100.0)
elif self.is_xetex and unit == 'px':
# XeTeX does not know the length unit px.
# Use \pdfpxdimen, the macro to set the value of 1 px in pdftex.
# This way, configuring works the same for pdftex and xetex.
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['px'] = '\n\\DUprovidelength{\\pdfpxdimen}{1bp}\n'
length_str = r'%s\pdfpxdimen' % value
return length_str
def visit_image(self, node):
self.requirements['graphicx'] = self.graphicx_package
attrs = node.attributes
# Convert image URI to a local file path
imagepath = urllib.url2pathname(attrs['uri']).replace('\\', '/')
# alignment defaults:
if not 'align' in attrs:
# Set default align of image in a figure to 'center'
if isinstance(node.parent, nodes.figure):
attrs['align'] = 'center'
# query 'align-*' class argument
for cls in node['classes']:
if cls.startswith('align-'):
attrs['align'] = cls.split('-')[1]
# pre- and postfix (prefix inserted in reverse order)
pre = []
post = []
include_graphics_options = []
align_codes = {
# inline images: by default latex aligns the bottom.
'bottom': ('', ''),
'middle': (r'\raisebox{-0.5\height}{', '}'),
'top': (r'\raisebox{-\height}{', '}'),
# block level images:
'center': (r'\noindent\makebox[\textwidth][c]{', '}'),
'left': (r'\noindent{', r'\hfill}'),
'right': (r'\noindent{\hfill', '}'),}
if 'align' in attrs:
# TODO: warn or ignore non-applicable alignment settings?
try:
align_code = align_codes[attrs['align']]
pre.append(align_code[0])
post.append(align_code[1])
except KeyError:
pass # TODO: warn?
if 'height' in attrs:
include_graphics_options.append('height=%s' %
self.to_latex_length(attrs['height']))
if 'scale' in attrs:
include_graphics_options.append('scale=%f' %
(attrs['scale'] / 100.0))
if 'width' in attrs:
include_graphics_options.append('width=%s' %
self.to_latex_length(attrs['width']))
if not (self.is_inline(node) or
isinstance(node.parent, nodes.figure)):
pre.append('\n')
post.append('\n')
pre.reverse()
self.out.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % (','.join(include_graphics_options))
self.out.append('\\includegraphics%s{%s}' % (options, imagepath))
self.out.extend(post)
def depart_image(self, node):
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def visit_inline(self, node): # <span>, i.e. custom roles
self.context.append('}' * len(node['classes']))
for cls in node['classes']:
if cls == 'align-center':
self.fallbacks['align-center'] = PreambleCmds.align_center
if cls.startswith('language-'):
language = self.babel.language_name(cls[9:])
if language:
self.babel.otherlanguages[language] = True
self.out.append(r'\foreignlanguage{%s}{' % language)
else:
self.fallbacks['inline'] = PreambleCmds.inline
self.out.append(r'\DUrole{%s}{' % cls)
def depart_inline(self, node):
self.out.append(self.context.pop())
def visit_interpreted(self, node):
# @@@ Incomplete, pending a proper implementation on the
# Parser/Reader end.
self.visit_literal(node)
def depart_interpreted(self, node):
self.depart_literal(node)
def visit_legend(self, node):
self.fallbacks['legend'] = PreambleCmds.legend
self.out.append('\\begin{DUlegend}')
def depart_legend(self, node):
self.out.append('\\end{DUlegend}\n')
def visit_line(self, node):
self.out.append('\item[] ')
def depart_line(self, node):
self.out.append('\n')
def visit_line_block(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['lineblock'] = PreambleCmds.lineblock
if isinstance(node.parent, nodes.line_block):
self.out.append('\\item[]\n'
'\\begin{DUlineblock}{\\DUlineblockindent}\n')
else:
self.out.append('\n\\begin{DUlineblock}{0em}\n')
if node['classes']:
self.visit_inline(node)
self.out.append('\n')
def depart_line_block(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('\n')
self.out.append('\\end{DUlineblock}\n')
def visit_list_item(self, node):
self.out.append('\n\\item ')
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.literal = True
if 'code' in node['classes'] and (
self.settings.syntax_highlight != 'none'):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['code'] = PreambleCmds.highlight_rules
self.out.append('\\texttt{')
if node['classes']:
self.visit_inline(node)
def depart_literal(self, node):
self.literal = False
if node['classes']:
self.depart_inline(node)
self.out.append('}')
# Literal blocks are used for '::'-prefixed literal-indented
# blocks of text, where the inline markup is not recognized,
# but are also the product of the "parsed-literal" directive,
# where the markup is respected.
#
# In both cases, we want to use a typewriter/monospaced typeface.
# For "real" literal-blocks, we can use \verbatim, while for all
# the others we must use \mbox or \alltt.
#
# We can distinguish between the two kinds by the number of
# siblings that compose this node: if it is composed by a
# single element, it's either
# * a real one,
# * a parsed-literal that does not contain any markup, or
# * a parsed-literal containing just one markup construct.
def is_plaintext(self, node):
"""Check whether a node can be typeset verbatim"""
return (len(node) == 1) and isinstance(node[0], nodes.Text)
def visit_literal_block(self, node):
"""Render a literal block."""
# environments and packages to typeset literal blocks
packages = {'listing': r'\usepackage{moreverb}',
'lstlisting': r'\usepackage{listings}',
'Verbatim': r'\usepackage{fancyvrb}',
# 'verbatim': '',
'verbatimtab': r'\usepackage{moreverb}'}
if not self.active_table.is_open():
# no quote inside tables, to avoid vertical space between
# table border and literal block.
# BUG: fails if normal text precedes the literal block.
self.out.append('%\n\\begin{quote}')
self.context.append('\n\\end{quote}\n')
else:
self.out.append('\n')
self.context.append('\n')
if self.literal_block_env != '' and self.is_plaintext(node):
self.requirements['literal_block'] = packages.get(
self.literal_block_env, '')
self.verbatim = True
self.out.append('\\begin{%s}%s\n' % (self.literal_block_env,
self.literal_block_options))
else:
self.literal = True
self.insert_newline = True
self.insert_non_breaking_blanks = True
if 'code' in node['classes'] and (
self.settings.syntax_highlight != 'none'):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['code'] = PreambleCmds.highlight_rules
self.out.append('{\\ttfamily \\raggedright \\noindent\n')
def depart_literal_block(self, node):
if self.verbatim:
self.out.append('\n\\end{%s}\n' % self.literal_block_env)
self.verbatim = False
else:
self.out.append('\n}')
self.insert_non_breaking_blanks = False
self.insert_newline = False
self.literal = False
self.out.append(self.context.pop())
## def visit_meta(self, node):
## self.out.append('[visit_meta]\n')
# TODO: set keywords for pdf?
# But:
# The reStructuredText "meta" directive creates a "pending" node,
# which contains knowledge that the embedded "meta" node can only
# be handled by HTML-compatible writers. The "pending" node is
# resolved by the docutils.transforms.components.Filter transform,
# which checks that the calling writer supports HTML; if it doesn't,
# the "pending" node (and enclosed "meta" node) is removed from the
# document.
# --- docutils/docs/peps/pep-0258.html#transformer
## def depart_meta(self, node):
## self.out.append('[depart_meta]\n')
def visit_math(self, node, math_env='$'):
"""math role"""
if node['classes']:
self.visit_inline(node)
self.requirements['amsmath'] = r'\usepackage{amsmath}'
math_code = node.astext().translate(unichar2tex.uni2tex_table)
if node.get('ids'):
math_code = '\n'.join([math_code] + self.ids_to_labels(node))
if math_env == '$':
wrapper = u'$%s$'
else:
wrapper = u'\n'.join(['%%',
r'\begin{%s}' % math_env,
'%s',
r'\end{%s}' % math_env])
# print repr(wrapper), repr(math_code)
self.out.append(wrapper % math_code)
if node['classes']:
self.depart_inline(node)
# Content already processed:
raise nodes.SkipNode
def depart_math(self, node):
pass # never reached
def visit_math_block(self, node):
math_env = pick_math_environment(node.astext())
self.visit_math(node, math_env=math_env)
def depart_math_block(self, node):
pass # never reached
def visit_option(self, node):
if self.context[-1]:
# this is not the first option
self.out.append(', ')
def depart_option(self, node):
# flag that the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
"""Append the delimiter betweeen an option and its argument to body."""
self.out.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_option_group(self, node):
self.out.append('\n\\item[')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
self.context.pop() # the flag
self.out.append('] ')
def visit_option_list(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['optionlist'] = PreambleCmds.optionlist
self.out.append('%\n\\begin{DUoptionlist}\n')
def depart_option_list(self, node):
self.out.append('\n\\end{DUoptionlist}\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
pass
def visit_option_string(self, node):
##self.out.append(self.starttag(node, 'span', '', CLASS='option'))
pass
def depart_option_string(self, node):
##self.out.append('</span>')
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item(node)
def visit_paragraph(self, node):
# insert blank line, if the paragraph is not first in a list item
# nor follows a non-paragraph node in a compound
index = node.parent.index(node)
if (index == 0 and (isinstance(node.parent, nodes.list_item) or
isinstance(node.parent, nodes.description))):
pass
elif (index > 0 and isinstance(node.parent, nodes.compound) and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound)):
pass
else:
self.out.append('\n')
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
if node['classes']:
self.visit_inline(node)
def depart_paragraph(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('\n')
def visit_problematic(self, node):
self.requirements['color'] = PreambleCmds.color
self.out.append('%\n')
self.append_hypertargets(node)
self.out.append(r'\hyperlink{%s}{\textbf{\color{red}' % node['refid'])
def depart_problematic(self, node):
self.out.append('}}')
def visit_raw(self, node):
if not 'latex' in node.get('format', '').split():
raise nodes.SkipNode
if not self.is_inline(node):
self.out.append('\n')
if node['classes']:
self.visit_inline(node)
# append "as-is" skipping any LaTeX-encoding
self.verbatim = True
def depart_raw(self, node):
self.verbatim = False
if node['classes']:
self.depart_inline(node)
if not self.is_inline(node):
self.out.append('\n')
def has_unbalanced_braces(self, string):
"""Test whether there are unmatched '{' or '}' characters."""
level = 0
for ch in string:
if ch == '{':
level += 1
if ch == '}':
level -= 1
if level < 0:
return True
return level != 0
def visit_reference(self, node):
# We need to escape #, \, and % if we use the URL in a command.
special_chars = {ord('#'): ur'\#',
ord('%'): ur'\%',
ord('\\'): ur'\\',
}
# external reference (URL)
if 'refuri' in node:
href = unicode(node['refuri']).translate(special_chars)
# problematic chars double caret and unbalanced braces:
if href.find('^^') != -1 or self.has_unbalanced_braces(href):
self.error(
'External link "%s" not supported by LaTeX.\n'
' (Must not contain "^^" or unbalanced braces.)' % href)
if node['refuri'] == node.astext():
self.out.append(r'\url{%s}' % href)
raise nodes.SkipNode
self.out.append(r'\href{%s}{' % href)
return
# internal reference
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
else:
raise AssertionError('Unknown reference.')
if not self.is_inline(node):
self.out.append('\n')
self.out.append('\\hyperref[%s]{' % href)
if self._reference_label:
self.out.append('\\%s{%s}}' %
(self._reference_label, href.replace('#', '')))
raise nodes.SkipNode
def depart_reference(self, node):
self.out.append('}')
if not self.is_inline(node):
self.out.append('\n')
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
def depart_revision(self, node):
self.depart_docinfo_item(node)
def visit_section(self, node):
self.section_level += 1
# Initialize counter for potential subsections:
self._section_number.append(0)
# Counter for this section's level (initialized by parent section):
self._section_number[self.section_level - 1] += 1
def depart_section(self, node):
# Remove counter for potential subsections:
self._section_number.pop()
self.section_level -= 1
def visit_sidebar(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['sidebar'] = PreambleCmds.sidebar
self.out.append('\n\\DUsidebar{\n')
def depart_sidebar(self, node):
self.out.append('}\n')
attribution_formats = {'dash': (u'—', ''), # EM DASH
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.out.append('\\nopagebreak\n\n\\raggedleft ')
self.out.append(prefix)
self.context.append(suffix)
def depart_attribution(self, node):
self.out.append(self.context.pop() + '\n')
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
def depart_status(self, node):
self.depart_docinfo_item(node)
def visit_strong(self, node):
self.out.append('\\textbf{')
if node['classes']:
self.visit_inline(node)
def depart_strong(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.push_output_collector(self.subtitle)
self.fallbacks['documentsubtitle'] = PreambleCmds.documentsubtitle
self.subtitle_labels += self.ids_to_labels(node, set_anchor=False)
# section subtitle: "starred" (no number, not in ToC)
elif isinstance(node.parent, nodes.section):
self.out.append(r'\%s*{' %
self.d_class.section(self.section_level + 1))
else:
self.fallbacks['subtitle'] = PreambleCmds.subtitle
self.out.append('\n\\DUsubtitle[%s]{' % node.parent.tagname)
def depart_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.pop_output_collector()
else:
self.out.append('}\n')
def visit_system_message(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['title'] = PreambleCmds.title
node['classes'] = ['system-message']
self.visit_admonition(node)
self.out.append('\\DUtitle[system-message]{system-message}\n')
self.append_hypertargets(node)
try:
line = ', line~%s' % node['line']
except KeyError:
line = ''
self.out.append('\n\n{\color{red}%s/%s} in \\texttt{%s}%s\n' %
(node['type'], node['level'],
self.encode(node['source']), line))
if len(node['backrefs']) == 1:
self.out.append('\n\\hyperlink{%s}{' % node['backrefs'][0])
self.context.append('}')
else:
backrefs = ['\\hyperlink{%s}{%d}' % (href, i+1)
for (i, href) in enumerate(node['backrefs'])]
self.context.append('backrefs: ' + ' '.join(backrefs))
def depart_system_message(self, node):
self.out.append(self.context.pop())
self.depart_admonition()
def visit_table(self, node):
self.requirements['table'] = PreambleCmds.table
if self.active_table.is_open():
self.table_stack.append(self.active_table)
# nesting longtable does not work (e.g. 2007-04-18)
self.active_table = Table(self,'tabular',self.settings.table_style)
# A longtable moves before \paragraph and \subparagraph
# section titles if it immediately follows them:
if (self.active_table._latex_type == 'longtable' and
isinstance(node.parent, nodes.section) and
node.parent.index(node) == 1 and
self.d_class.section(self.section_level).find('paragraph') != -1):
self.out.append('\\leavevmode')
self.active_table.open()
for cls in node['classes']:
self.active_table.set_table_style(cls)
if self.active_table._table_style == 'booktabs':
self.requirements['booktabs'] = r'\usepackage{booktabs}'
self.push_output_collector([])
def depart_table(self, node):
# wrap content in the right environment:
content = self.out
self.pop_output_collector()
self.out.append('\n' + self.active_table.get_opening())
self.out += content
self.out.append(self.active_table.get_closing() + '\n')
self.active_table.close()
if len(self.table_stack)>0:
self.active_table = self.table_stack.pop()
else:
self.active_table.set_table_style(self.settings.table_style)
# Insert hyperlabel after (long)table, as
# other places (beginning, caption) result in LaTeX errors.
if node.get('ids'):
self.out += self.ids_to_labels(node, set_anchor=False) + ['\n']
def visit_target(self, node):
# Skip indirect targets:
if ('refuri' in node # external hyperlink
or 'refid' in node # resolved internal link
or 'refname' in node): # unresolved internal link
## self.out.append('%% %s\n' % node) # for debugging
return
self.out.append('%\n')
# do we need an anchor (\phantomsection)?
set_anchor = not(isinstance(node.parent, nodes.caption) or
isinstance(node.parent, nodes.title))
# TODO: where else can/must we omit the \phantomsection?
self.out += self.ids_to_labels(node, set_anchor)
def depart_target(self, node):
pass
def visit_tbody(self, node):
# BUG write preamble if not yet done (colspecs not [])
# for tables without heads.
if not self.active_table.get('preamble written'):
self.visit_thead(None)
self.depart_thead(None)
def depart_tbody(self, node):
pass
def visit_term(self, node):
"""definition list term"""
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_term(self, node):
# \leavevmode results in a line break if the
# term is followed by an item list.
self.out.append('}] \leavevmode ')
def visit_tgroup(self, node):
#self.out.append(self.starttag(node, 'colgroup'))
#self.context.append('</colgroup>\n')
pass
def depart_tgroup(self, node):
pass
_thead_depth = 0
def thead_depth (self):
return self._thead_depth
def visit_thead(self, node):
self._thead_depth += 1
if 1 == self.thead_depth():
self.out.append('{%s}\n' % self.active_table.get_colspecs())
self.active_table.set('preamble written',1)
self.out.append(self.active_table.get_caption())
self.out.extend(self.active_table.visit_thead())
def depart_thead(self, node):
if node is not None:
self.out.extend(self.active_table.depart_thead())
if self.active_table.need_recurse():
node.walkabout(self)
self._thead_depth -= 1
def visit_title(self, node):
"""Append section and other titles."""
# Document title
if node.parent.tagname == 'document':
self.push_output_collector(self.title)
self.context.append('')
self.pdfinfo.append(' pdftitle={%s},' %
self.encode(node.astext()))
# Topic titles (topic, admonition, sidebar)
elif (isinstance(node.parent, nodes.topic) or
isinstance(node.parent, nodes.admonition) or
isinstance(node.parent, nodes.sidebar)):
self.fallbacks['title'] = PreambleCmds.title
classes = ','.join(node.parent['classes'])
if not classes:
classes = node.tagname
self.out.append('\\DUtitle[%s]{' % classes)
self.context.append('}\n')
# Table caption
elif isinstance(node.parent, nodes.table):
self.push_output_collector(self.active_table.caption)
self.context.append('')
# Section title
else:
if hasattr(PreambleCmds, 'secnumdepth'):
self.requirements['secnumdepth'] = PreambleCmds.secnumdepth
section_name = self.d_class.section(self.section_level)
self.out.append('\n\n')
# System messages heading in red:
if ('system-messages' in node.parent['classes']):
self.requirements['color'] = PreambleCmds.color
section_title = self.encode(node.astext())
self.out.append(r'\%s[%s]{\color{red}' % (
section_name,section_title))
else:
self.out.append(r'\%s{' % section_name)
if self.section_level > len(self.d_class.sections):
# section level not supported by LaTeX
self.fallbacks['title'] = PreambleCmds.title
# self.out.append('\\phantomsection%\n ')
# label and ToC entry:
bookmark = ['']
# add sections with unsupported level to toc and pdfbookmarks?
## if self.section_level > len(self.d_class.sections):
## section_title = self.encode(node.astext())
## bookmark.append(r'\addcontentsline{toc}{%s}{%s}' %
## (section_name, section_title))
bookmark += self.ids_to_labels(node.parent, set_anchor=False)
self.context.append('%\n '.join(bookmark) + '%\n}\n')
# MAYBE postfix paragraph and subparagraph with \leavemode to
# ensure floats stay in the section and text starts on a new line.
def depart_title(self, node):
self.out.append(self.context.pop())
if (isinstance(node.parent, nodes.table) or
node.parent.tagname == 'document'):
self.pop_output_collector()
def minitoc(self, node, title, depth):
"""Generate a local table of contents with LaTeX package minitoc"""
section_name = self.d_class.section(self.section_level)
# name-prefix for current section level
minitoc_names = {'part': 'part', 'chapter': 'mini'}
if 'chapter' not in self.d_class.sections:
minitoc_names['section'] = 'sect'
try:
minitoc_name = minitoc_names[section_name]
except KeyError: # minitoc only supports part- and toplevel
self.warn('Skipping local ToC at %s level.\n' % section_name +
' Feature not supported with option "use-latex-toc"',
base_node=node)
return
# Requirements/Setup
self.requirements['minitoc'] = PreambleCmds.minitoc
self.requirements['minitoc-'+minitoc_name] = (r'\do%stoc' %
minitoc_name)
# depth: (Docutils defaults to unlimited depth)
maxdepth = len(self.d_class.sections)
self.requirements['minitoc-%s-depth' % minitoc_name] = (
r'\mtcsetdepth{%stoc}{%d}' % (minitoc_name, maxdepth))
# Process 'depth' argument (!Docutils stores a relative depth while
# minitoc expects an absolute depth!):
offset = {'sect': 1, 'mini': 0, 'part': 0}
if 'chapter' in self.d_class.sections:
offset['part'] = -1
if depth:
self.out.append('\\setcounter{%stocdepth}{%d}' %
(minitoc_name, depth + offset[minitoc_name]))
# title:
self.out.append('\\mtcsettitle{%stoc}{%s}\n' % (minitoc_name, title))
# the toc-generating command:
self.out.append('\\%stoc\n' % minitoc_name)
def visit_topic(self, node):
# Topic nodes can be generic topic, abstract, dedication, or ToC.
# table of contents:
if 'contents' in node['classes']:
self.out.append('\n')
self.out += self.ids_to_labels(node)
# add contents to PDF bookmarks sidebar
if isinstance(node.next_node(), nodes.title):
self.out.append('\n\\pdfbookmark[%d]{%s}{%s}\n' %
(self.section_level+1,
node.next_node().astext(),
node.get('ids', ['contents'])[0]
))
if self.use_latex_toc:
title = ''
if isinstance(node.next_node(), nodes.title):
title = self.encode(node.pop(0).astext())
depth = node.get('depth', 0)
if 'local' in node['classes']:
self.minitoc(node, title, depth)
self.context.append('')
return
if depth:
self.out.append('\\setcounter{tocdepth}{%d}\n' % depth)
if title != 'Contents':
self.out.append('\\renewcommand{\\contentsname}{%s}\n' %
title)
self.out.append('\\tableofcontents\n\n')
self.has_latex_toc = True
else: # Docutils generated contents list
# set flag for visit_bullet_list() and visit_title()
self.is_toc_list = True
self.context.append('')
elif ('abstract' in node['classes'] and
self.settings.use_latex_abstract):
self.push_output_collector(self.abstract)
self.out.append('\\begin{abstract}')
self.context.append('\\end{abstract}\n')
if isinstance(node.next_node(), nodes.title):
node.pop(0) # LaTeX provides its own title
else:
self.fallbacks['topic'] = PreambleCmds.topic
# special topics:
if 'abstract' in node['classes']:
self.fallbacks['abstract'] = PreambleCmds.abstract
self.push_output_collector(self.abstract)
if 'dedication' in node['classes']:
self.fallbacks['dedication'] = PreambleCmds.dedication
self.push_output_collector(self.dedication)
self.out.append('\n\\DUtopic[%s]{\n' % ','.join(node['classes']))
self.context.append('}\n')
def depart_topic(self, node):
self.out.append(self.context.pop())
self.is_toc_list = False
if ('abstract' in node['classes'] or
'dedication' in node['classes']):
self.pop_output_collector()
def visit_rubric(self, node):
self.fallbacks['rubric'] = PreambleCmds.rubric
self.out.append('\n\\DUrubric{')
self.context.append('}\n')
def depart_rubric(self, node):
self.out.append(self.context.pop())
def visit_transition(self, node):
self.fallbacks['transition'] = PreambleCmds.transition
self.out.append('\n\n')
self.out.append('%' + '_' * 75 + '\n')
self.out.append(r'\DUtransition')
self.out.append('\n\n')
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def depart_version(self, node):
self.depart_docinfo_item(node)
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s' %
node.__class__.__name__)
# def unknown_visit(self, node):
# def default_visit(self, node):
# vim: set ts=4 et ai :
|
havard024/prego
|
venv/lib/python2.7/site-packages/docutils/writers/latex2e/__init__.py
|
Python
|
mit
| 124,479
|
[
"VisIt"
] |
f7b907e5eb9dfd17bef49ec0d229309bba022f53e262dac0739cf2f17b884c87
|
# Copyright (c) 2016. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from os.path import join, dirname
from nose.tools import eq_
from pysam import AlignmentFile
import varcode
def assert_equal_fields(result, expected):
"""
Assert that fields of two namedtuple objects have same field values.
"""
eq_(result.__class__, expected.__class__)
for field in result.__class__._fields:
result_value = getattr(result, field)
expected_value = getattr(expected, field)
assert result_value == expected_value, \
"Wrong value for '%s', expected %s but got %s" % (
field,
expected_value,
result_value)
def data_path(path):
"""
Return the absolute path to a file in the test directory.
The name specified should be relative to test.
"""
return join(dirname(__file__), path)
def load_bam(bam_path):
return AlignmentFile(data_path(bam_path))
def load_vcf(vcf_path, genome=None):
return varcode.load_vcf(data_path(vcf_path), genome=genome)
|
hammerlab/isovar
|
test/testing_helpers.py
|
Python
|
apache-2.0
| 1,658
|
[
"pysam"
] |
41a012bfb4d9900c053f9f714e2dc688e2c36b6074b3be4cf56bd0326ed26604
|
"""Functions for finding chains in a graph."""
import networkx as nx
from networkx.utils import not_implemented_for
@not_implemented_for("directed")
@not_implemented_for("multigraph")
def chain_decomposition(G, root=None):
"""Returns the chain decomposition of a graph.
The *chain decomposition* of a graph with respect a depth-first
search tree is a set of cycles or paths derived from the set of
fundamental cycles of the tree in the following manner. Consider
each fundamental cycle with respect to the given tree, represented
as a list of edges beginning with the nontree edge oriented away
from the root of the tree. For each fundamental cycle, if it
overlaps with any previous fundamental cycle, just take the initial
non-overlapping segment, which is a path instead of a cycle. Each
cycle or path is called a *chain*. For more information, see [1]_.
Parameters
----------
G : undirected graph
root : node (optional)
A node in the graph `G`. If specified, only the chain
decomposition for the connected component containing this node
will be returned. This node indicates the root of the depth-first
search tree.
Yields
------
chain : list
A list of edges representing a chain. There is no guarantee on
the orientation of the edges in each chain (for example, if a
chain includes the edge joining nodes 1 and 2, the chain may
include either (1, 2) or (2, 1)).
Raises
------
NodeNotFound
If `root` is not in the graph `G`.
Notes
-----
The worst-case running time of this implementation is linear in the
number of nodes and number of edges [1]_.
References
----------
.. [1] Jens M. Schmidt (2013). "A simple test on 2-vertex-
and 2-edge-connectivity." *Information Processing Letters*,
113, 241–244. Elsevier. <https://doi.org/10.1016/j.ipl.2013.01.016>
"""
def _dfs_cycle_forest(G, root=None):
"""Builds a directed graph composed of cycles from the given graph.
`G` is an undirected simple graph. `root` is a node in the graph
from which the depth-first search is started.
This function returns both the depth-first search cycle graph
(as a :class:`~networkx.DiGraph`) and the list of nodes in
depth-first preorder. The depth-first search cycle graph is a
directed graph whose edges are the edges of `G` oriented toward
the root if the edge is a tree edge and away from the root if
the edge is a non-tree edge. If `root` is not specified, this
performs a depth-first search on each connected component of `G`
and returns a directed forest instead.
If `root` is not in the graph, this raises :exc:`KeyError`.
"""
# Create a directed graph from the depth-first search tree with
# root node `root` in which tree edges are directed toward the
# root and nontree edges are directed away from the root. For
# each node with an incident nontree edge, this creates a
# directed cycle starting with the nontree edge and returning to
# that node.
#
# The `parent` node attribute stores the parent of each node in
# the DFS tree. The `nontree` edge attribute indicates whether
# the edge is a tree edge or a nontree edge.
#
# We also store the order of the nodes found in the depth-first
# search in the `nodes` list.
H = nx.DiGraph()
nodes = []
for u, v, d in nx.dfs_labeled_edges(G, source=root):
if d == "forward":
# `dfs_labeled_edges()` yields (root, root, 'forward')
# if it is beginning the search on a new connected
# component.
if u == v:
H.add_node(v, parent=None)
nodes.append(v)
else:
H.add_node(v, parent=u)
H.add_edge(v, u, nontree=False)
nodes.append(v)
# `dfs_labeled_edges` considers nontree edges in both
# orientations, so we need to not add the edge if it its
# other orientation has been added.
elif d == "nontree" and v not in H[u]:
H.add_edge(v, u, nontree=True)
else:
# Do nothing on 'reverse' edges; we only care about
# forward and nontree edges.
pass
return H, nodes
def _build_chain(G, u, v, visited):
"""Generate the chain starting from the given nontree edge.
`G` is a DFS cycle graph as constructed by
:func:`_dfs_cycle_graph`. The edge (`u`, `v`) is a nontree edge
that begins a chain. `visited` is a set representing the nodes
in `G` that have already been visited.
This function yields the edges in an initial segment of the
fundamental cycle of `G` starting with the nontree edge (`u`,
`v`) that includes all the edges up until the first node that
appears in `visited`. The tree edges are given by the 'parent'
node attribute. The `visited` set is updated to add each node in
an edge yielded by this function.
"""
while v not in visited:
yield u, v
visited.add(v)
u, v = v, G.nodes[v]["parent"]
yield u, v
# Create a directed version of H that has the DFS edges directed
# toward the root and the nontree edges directed away from the root
# (in each connected component).
H, nodes = _dfs_cycle_forest(G, root)
# Visit the nodes again in DFS order. For each node, and for each
# nontree edge leaving that node, compute the fundamental cycle for
# that nontree edge starting with that edge. If the fundamental
# cycle overlaps with any visited nodes, just take the prefix of the
# cycle up to the point of visited nodes.
#
# We repeat this process for each connected component (implicitly,
# since `nodes` already has a list of the nodes grouped by connected
# component).
visited = set()
for u in nodes:
visited.add(u)
# For each nontree edge going out of node u...
edges = ((u, v) for u, v, d in H.out_edges(u, data="nontree") if d)
for u, v in edges:
# Create the cycle or cycle prefix starting with the
# nontree edge.
chain = list(_build_chain(H, u, v, visited))
yield chain
|
SpaceGroupUCL/qgisSpaceSyntaxToolkit
|
esstoolkit/external/networkx/algorithms/chains.py
|
Python
|
gpl-3.0
| 6,573
|
[
"VisIt"
] |
98659562848b309a52a072bc8e401ed8250602a8c17d336c2f823feee3b34d14
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Simone Donadello
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=E1101
from scipy.optimize import curve_fit
import numpy as np
def gaussian((X, Y), mx, my, sx, sy):
"""
Returns the result of a Gaussian.
Args:
(X, Y) (tuple of np.array): matrices of the coordinates to be combined,
usually results of np.meshgrid
mx (float): horizontal center
my (float): vertical center
sx (float): horizontal sigma
sy (float): vertical sigma
"""
return np.exp(- (X-mx)**2 / (2*sx**2) - (Y-my)**2 / (2*sy**2))
def thomasfermi((X, Y), mx, my, rx, ry):
"""
Returns the result of a Thomas-Fermi function (inverted parabola).
Args:
(X, Y) (tuple of np.array): matrices of the coordinates to be combined,
usually results of np.meshgrid
mx (float): horizontal center
my (float): vertical center
rx (float): horizontal TF radius
ry (float): vertical TF radius
"""
b = (1 - ((X-mx)/rx)**2 - ((Y-my)/ry)**2)
b = np.maximum(b, 0)
b = np.sqrt(b)
return b**3
class Fitting(object):
"""
Base class for fitting routines. It has some common methods, other must be
overridden for the specific fitting types with child classes.
"""
def __init__(self, img0, par0=None):
"""
Initialize the fitting routine with a given image.
Args:
img0 (np.array): image for the fit
par0 (list): initial guess for the fit (optional, not yet used)
"""
self.img0 = img0
self.par0 = par0 #TODO: not used
#calculates the matrices with the x and y coordinates
ny, nx = self.img0.shape
x = np.arange(nx)
y = np.arange(ny)
self.X, self.Y = np.meshgrid(x, y)
#the fitted image result is initialized to None
self.fitted = None
#list of the parameter string names, must be implemented
self.par_names = tuple([])
def guess_gauss_par0(self, slc_main, slc_max, slc_bkg):
"""
Guess and returns the initial gaussian parameters from the slices
Args:
slc_main (tuple): tuple of 2 slices for the coordinates of the main
area for the gaussian center guess
slc_max (tuple): tuple of 2 slices for the coordinates of the
maximal area for the gaussian amplitude guess
slc_bkg (tuple): tuple of 2 slices for the coordinates of the
background area for the gaussian offset guess
Returns:
Tuple with the Gaussian guessed parameters
"""
height, width = self.img0[slc_main].shape
#center
xm = np.mean(slc_main[1].indices(self.img0.shape[1])[0:-1])
ym = np.mean(slc_main[0].indices(self.img0.shape[0])[0:-1])
offs = np.mean(self.img0[slc_bkg])
amp1 = np.mean(self.img0[slc_max])
return (offs, amp1, xm, ym, width/4.0, height/4.0)
def guess_par0(self, *args, **kwargs):
"""
Parameters guess for the specific function (must be overridden).
"""
pass
def function(self, *args, **kwargs):
"""
Specific function (must be overridden).
"""
pass
def fit(self):
"""
Performs the fitting operations and returns a dictionary with the
fitted parameters.
"""
frame = self.img0
#TODO handle when there is a wrong fit and set fit options
#TODO consider parameters uncertanties
try:
results = curve_fit(self.function, (self.X, self.Y), frame.ravel(),
p0=self.par0)
except RuntimeError:
print "Error while fitting"
results = [self.par0, None]
return dict(zip(self.par_names, results[0]))
class Gauss2d(Fitting):
"""
Gaussian 2D fit.
"""
def __init__(self, img0, par0=None):
super(Gauss2d, self).__init__(img0, par0)
self.par_names = ["offs", "amp1", "mx", "my", "sx", "sy"]
def function(self, (X, Y), offs, amp1, mx, my, sx, sy):
"""
Implements the gaussian fitting function.
(see gaussian() and thomasfermi())
"""
self.fitted = amp1*gaussian((X, Y), mx, my, sx, sy) + offs
return self.fitted.ravel()
def guess_par0(self, slc_main, slc_max, slc_bkg):
"""
Implements the gaussian parameter guess from slices.
(see Fitting.guess_gauss_par0())
"""
offs, amp1, mx, my, sx, sy = self.guess_gauss_par0(slc_main,
slc_max,
slc_bkg)
par0 = (offs, amp1, mx, my, sx, sy)
self.par0 = par0
return par0
class ThomasFermi2d(Fitting):
"""
Thomas-Fermi 2D fit (inverted parabola).
"""
def __init__(self, img0, par0=None):
super(ThomasFermi2d, self).__init__(img0, par0)
self.par_names = ["offs", "amp1", "mx", "my", "rx", "ry"]
def function(self, (X, Y), offs, amp2, mx, my, rx, ry):
"""
Implements the Thomas-Fermi fitting function.
(see gaussian() and thomasfermi())
"""
self.fitted = amp2*thomasfermi((X, Y), mx, my, rx, ry) + offs
return self.fitted.ravel()
def guess_par0(self, slc_main, slc_max, slc_bkg):
"""
Implements the Thomas-Fermi parameter guess.
(see Fitting.guess_gauss_par0())
"""
offs, amp1, mx, my, sx, sy = self.guess_gauss_par0(slc_main,
slc_max,
slc_bkg)
par0 = (offs, amp1, mx, my, sx*2.0, sy*2.0)
self.par0 = par0
return par0
class Bimodal2d(Gauss2d, ThomasFermi2d):
"""
Gaussian+Thomas Fermi bimodal 2D fit.
"""
def __init__(self, img0, par0=None):
super(Bimodal2d, self).__init__(img0, par0)
self.par_names = ["offs", "amp1", "mx", "my", "sx", "sy",
"amp2", "rx", "ry"]
def function(self, (X, Y), offs, amp1, mx, my, sx, sy, amp2, rx, ry):
"""
Implements the bimodal fitting function.
(see gaussian() and thomasfermi())
"""
self.fitted = amp1*gaussian((X, Y), mx, my, sx, sy) +\
amp2*thomasfermi((X, Y), mx, my, rx, ry) + offs
return self.fitted.ravel()
def guess_par0(self, slc_main, slc_max, slc_bkg):
"""
Implements the bimodal parameter guess.
(see Fitting.guess_gauss_par0())
"""
offs, amp1, mx, my, sx, sy = self.guess_gauss_par0(slc_main,
slc_max,
slc_bkg)
par0 = (offs, amp1/2.0, mx, my, sx, sy, amp1/2.0, sx*2.0, sy*2.0)
self.par0 = par0
return par0
|
simondona/img-fft-filter-bec-tn
|
libraries/fitting.py
|
Python
|
gpl-3.0
| 7,664
|
[
"Gaussian"
] |
88738df7593baabd2341056fbfcc132c01d061531ae04b8950fa6877a2a9b105
|
"""
Contains a variety of sensory models, specifically models for the
visual pathway.
"""
import topo
import param
import numbergen
import lancet
import numpy
import imagen
from imagen.patterncoordinator import PatternCoordinator, PatternCoordinatorImages
from topo.base.arrayutil import DivideWithConstant
from topo.submodel import Model, ArraySpec # pyflakes:ignore (API import)
from topo import sheet, transferfn, optimized
from collections import OrderedDict
class SensoryModel(Model):
dims = param.List(default=['xy'],class_=str,doc="""
Stimulus dimensions to include, out of the possible list:
:'xy': Position in x and y coordinates""")
num_inputs = param.Integer(default=2,bounds=(1,None),doc="""
How many input patterns to present per unit area at each
iteration, when using discrete patterns (e.g. Gaussians).""")
class VisualInputModel(SensoryModel):
allowed_dims = ['xy', 'or', 'od', 'dy', 'dr', 'sf']
period = param.Number(default=None, allow_None=True, doc="""
Simulation time between pattern updates on the generator
sheets. If None, the model is allowed to compute an appropriate
value for the period property (a period of 1.0 is typical)""")
dataset = param.ObjectSelector(default='Gaussian',objects=
['Gaussian','Nature','FoliageA','FoliageB'],doc="""
Set of input patterns to use::
:'Gaussian': Two-dimensional Gaussians
:'Nature': Shouval's 1999 monochrome 256x256 images
:'FoliageA': McGill calibrated LMS foliage/ image subset (5)
:'FoliageB': McGill calibrated LMS foliage/ image subset (25)""")
dims = param.List(default=['xy','or'],class_=str,doc="""
Stimulus dimensions to include, out of the possible list:
:'xy': Position in x and y coordinates
:'or': Orientation
:'od': Ocular dominance
:'dy': Disparity
:'dr': Direction of motion
:'sf': Spatial frequency
:'cr': Color (if available, see submodels.color)""")
area = param.Number(default=1.0,bounds=(0,None),
inclusive_bounds=(False,True),doc="""
Linear size of cortical area to simulate.
2.0 gives a 2.0x2.0 Sheet area in V1.""")
dim_fraction = param.Number(default=0.7,bounds=(0.0,1.0),doc="""
Fraction by which the input brightness varies between the two
eyes. Only used if 'od' in 'dims'.""")
contrast=param.Number(default=70, bounds=(0,100),doc="""
Brightness of the input patterns as a contrast (percent). Only
used if 'od' not in 'dims'.""")
sf_spacing=param.Number(default=2.0,bounds=(1,None),doc="""
Determines the factor by which successive SF channels increase
in size. Only used if 'sf' in 'dims'.""")
sf_channels=param.Integer(default=2,bounds=(1,None),softbounds=(1,4),doc="""
Number of spatial frequency channels. Only used if 'sf' in 'dims'.""")
max_disparity = param.Number(default=4.0,bounds=(0,None),doc="""
Maximum disparity between input pattern positions in the left
and right eye. Only used if 'dy' in 'dims'.""")
num_lags = param.Integer(default=4, bounds=(1,None),doc="""
Number of successive frames before showing a new input
pattern. This also determines the number of connections
between each individual LGN sheet and V1. Only used if 'dr' in
'dims'.""")
speed=param.Number(default=2.0/24.0,bounds=(0,None),
softbounds=(0,3.0/24.0),doc="""
Distance in sheet coordinates between successive frames, when
translating patterns. Only used if 'dr' in 'dims'.""")
align_orientations = param.Boolean(default=None,
allow_None=True, doc="""
Whether or not to align pattern orientations together if
composing multiple patterns together. If None,
align_orientations will be set to True when speed is non-zero
(and 'dr' in dims), otherwise it is set to False.""")
__abstract = True
def property_setup(self, properties):
disallowed_dims = [dim for dim in self.dims if dim not in self.allowed_dims]
if disallowed_dims:
raise Exception('%s not in the list of allowed dimensions'
% ','.join(repr(d) for d in disallowed_dims))
properties = super(VisualInputModel, self).property_setup(properties)
# The default period for most Topographica models is 1.0
properties['period'] = 1.0 if self.period is None else self.period
properties['binocular'] = 'od' in self.dims or 'dy' in self.dims
properties['SF']=range(1,self.sf_channels+1) if 'sf' in self.dims else [1]
properties['lags'] = range(self.num_lags) if 'dr' in self.dims else [0]
if 'dr' in self.dims and not numbergen.RandomDistribution.time_dependent:
numbergen.RandomDistribution.time_dependent = True
self.message('Setting time_dependent to True for motion model.')
return properties
def training_pattern_setup(self, **overrides):
# all the below will eventually end up in PatternCoordinator!
disparity_bound = 0.0
position_bound_x = self.area/2.0+0.25
position_bound_y = self.area/2.0+0.25
if 'dy' in self.dims:
disparity_bound = self.max_disparity*0.041665/2.0
#TFALERT: Formerly: position_bound_x = self.area/2.0+0.2
position_bound_x -= disparity_bound
align_orientations = (bool(self.speed) and ('dr' in self.dims)
if self.align_orientations is None
else self.align_orientations)
if 'dr' in self.dims:
position_bound_x+=self.speed*max(self['lags'])
position_bound_y+=self.speed*max(self['lags'])
pattern_labels=['LeftRetina','RightRetina'] if self['binocular'] else ['Retina']
# all the above will eventually end up in PatternCoordinator!
params = dict(features_to_vary=self.dims,
pattern_labels=pattern_labels,
pattern_parameters={'size': 0.088388 if 'or' in self.dims and self.dataset=='Gaussian' \
else 3*0.088388 if self.dataset=='Gaussian' else 10.0,
'aspect_ratio': 4.66667 if 'or' in self.dims else 1.0,
'scale': self.contrast/100.0},
disparity_bound=disparity_bound,
position_bound_x=position_bound_x,
position_bound_y=position_bound_y,
dim_fraction=self.dim_fraction,
reset_period=(max(self['lags'])*self['period'] + self['period']),
speed=self.speed,
align_orientations = align_orientations,
sf_spacing=self.sf_spacing,
sf_max_channel=max(self['SF']),
patterns_per_label=int(self.num_inputs*self.area*self.area))
if self.dataset=='Gaussian':
return PatternCoordinator(**dict(params, **overrides))()
else:
image_folder = 'images/shouval' if self.dataset=='Nature' \
else 'images/mcgill/foliage_a_combined' if self.dataset=='FoliageA' \
else 'images/mcgill/foliage_b_combined' if self.dataset=='FoliageB' \
else None
return PatternCoordinatorImages(image_folder, **dict(params, **overrides))()
@Model.definition
class EarlyVisionModel(VisualInputModel):
retina_density = param.Number(default=24.0,bounds=(0,None),
inclusive_bounds=(False,True),doc="""
The nominal_density to use for the retina.""")
lgn_density = param.Number(default=24.0,bounds=(0,None),
inclusive_bounds=(False,True),doc="""
The nominal_density to use for the LGN.""")
lgnaff_strength = param.Number(default=2.33, doc="""
Overall strength of the afferent projection from the retina to
the LGN sheets.""")
lgnaff_radius=param.Number(default=0.375,bounds=(0,None),doc="""
Connection field radius of a unit in the LGN level to units in
a retina sheet.""")
lgnlateral_radius=param.Number(default=0.5,bounds=(0,None),doc="""
Connection field radius of a unit in the LGN level to
surrounding units, in case gain control is used.""")
v1aff_radius=param.Number(default=0.27083,bounds=(0,None),doc="""
Connection field radius of a unit in V1 to units in a LGN
sheet.""")
center_size = param.Number(default=0.07385,bounds=(0,None),doc="""
The size of the central Gaussian used to compute the
center-surround receptive field.""")
surround_size = param.Number(default=4*0.07385,bounds=(0,None),doc="""
The size of the surround Gaussian used to compute the
center-surround receptive field.""")
gain_control_size = param.Number(default=0.25,bounds=(0,None),doc="""
The size of the divisive inhibitory suppressive field used for
contrast-gain control in the LGN sheets. This also acts as the
corresponding bounds radius.""")
gain_control = param.Boolean(default=True,doc="""
Whether to use divisive lateral inhibition in the LGN for
contrast gain control.""")
gain_control_SF = param.Boolean(default=True,doc="""
Whether to use divisive lateral inhibition in the LGN for
contrast gain control across Spatial Frequency Sheets.""")
def property_setup(self, properties):
properties = super(EarlyVisionModel, self).property_setup(properties)
sheet.SettlingCFSheet.joint_norm_fn = optimized.compute_joint_norm_totals_cython
center_polarities=['On','Off']
# Useful for setting up sheets
properties['polarities'] = lancet.List('polarity', center_polarities)
properties['eyes'] = (lancet.List('eye', ['Left','Right'])
if properties['binocular'] else lancet.Identity())
properties['SFs'] = (lancet.List('SF', properties['SF'])
if max(properties['SF'])>1 else lancet.Identity())
return properties
def sheet_setup(self):
sheets = OrderedDict()
sheets['Retina'] = self['eyes']
sheets['LGN'] = self['polarities'] * self['eyes'] * self['SFs']
return sheets
@Model.GeneratorSheet
def Retina(self, properties):
return Model.GeneratorSheet.params(
period=self['period'],
phase=0.05,
nominal_density=self.retina_density,
nominal_bounds=sheet.BoundingBox(radius=self.area/2.0
+ self.v1aff_radius*self.sf_spacing**(max(self['SF'])-1)
+ self.lgnaff_radius*self.sf_spacing**(max(self['SF'])-1)
+ self.lgnlateral_radius),
input_generator=self['training_patterns'][properties['eye']+'Retina'
if 'eye' in properties
else 'Retina'])
@Model.SettlingCFSheet
def LGN(self, properties):
channel=properties['SF'] if 'SF' in properties else 1
sf_aff_multiplier = self.sf_spacing**(max(self['SF'])-1) if self.gain_control_SF else \
self.sf_spacing**(channel-1)
gain_control = self.gain_control_SF if 'SF' in properties else self.gain_control
return Model.SettlingCFSheet.params(
mask = topo.base.projection.SheetMask(),
measure_maps=False,
output_fns=[transferfn.misc.HalfRectify()],
nominal_density=self.lgn_density,
nominal_bounds=sheet.BoundingBox(radius=self.area/2.0
+ self.v1aff_radius
* sf_aff_multiplier
+ self.lgnlateral_radius),
tsettle=2 if gain_control else 0,
strict_tsettle=1 if gain_control else 0)
@Model.matchconditions('LGN', 'afferent')
def afferent_conditions(self, properties):
return {'level': 'Retina', 'eye': properties.get('eye',None)}
@Model.SharedWeightCFProjection
def afferent(self, src_properties, dest_properties):
channel = dest_properties['SF'] if 'SF' in dest_properties else 1
centerg = imagen.Gaussian(size=self.center_size*self.sf_spacing**(channel-1),
aspect_ratio=1.0,
output_fns=[transferfn.DivisiveNormalizeL1()])
surroundg = imagen.Gaussian(size=self.surround_size*self.sf_spacing**(channel-1),
aspect_ratio=1.0,
output_fns=[transferfn.DivisiveNormalizeL1()])
on_weights = imagen.Composite(generators=[centerg,surroundg],operator=numpy.subtract)
off_weights = imagen.Composite(generators=[surroundg,centerg],operator=numpy.subtract)
return Model.SharedWeightCFProjection.params(
delay=0.05,
strength=self.lgnaff_strength,
name='Afferent',
nominal_bounds_template=sheet.BoundingBox(radius=self.lgnaff_radius
*self.sf_spacing**(channel-1)),
weights_generator=on_weights if dest_properties['polarity']=='On' else off_weights)
@Model.matchconditions('LGN', 'lateral_gain_control')
def lateral_gain_control_conditions(self, properties):
return ({'level': 'LGN', 'polarity':properties['polarity']}
if self.gain_control and self.gain_control_SF else
{'level': 'LGN', 'polarity':properties['polarity'],
'SF': properties.get('SF',None)}
if self.gain_control else None)
@Model.SharedWeightCFProjection
def lateral_gain_control(self, src_properties, dest_properties):
#TODO: Are those 0.25 the same as lgnlateral_radius/2.0?
name='LateralGC'
if 'eye' in src_properties:
name+=src_properties['eye']
if 'SF' in src_properties and self.gain_control_SF:
name+=('SF'+str(src_properties['SF']))
return Model.SharedWeightCFProjection.params(
delay=0.05,
dest_port=('Activity'),
activity_group=(0.6,DivideWithConstant(c=0.11)),
weights_generator=imagen.Gaussian(size=self.gain_control_size,
aspect_ratio=1.0,
output_fns=[transferfn.DivisiveNormalizeL1()]),
nominal_bounds_template=sheet.BoundingBox(radius=self.gain_control_size),
name=name,
strength=0.6/(2 if self['binocular'] else 1))
|
ioam/topographica
|
topo/submodel/earlyvision.py
|
Python
|
bsd-3-clause
| 15,036
|
[
"Gaussian"
] |
bf669af306e46e01435c2e99b16a50c2d9958f6485032dc9d9bb87c8eb73bb8c
|
import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
def test_kde_algorithm_metric_choice():
"""Smoke test for various metrics and algorithms"""
np.random.seed(0)
X = np.random.random((10, 2)) # 2 features required for haversine dist.
Y = np.random.random((10, 2))
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
if __name__ == '__main__':
import nose
nose.runmodule()
|
depet/scikit-learn
|
sklearn/neighbors/tests/test_kde.py
|
Python
|
bsd-3-clause
| 4,720
|
[
"Gaussian"
] |
08fb640bf21f8b0fc0e4c81c30b4e8c9bf8ba04ecce8ee95ce9bc00ae20a8a3f
|
"""
The utility functions used in ORCA calculations are collected.
Notice some functions are general so that they can be used for other DFT tools.
"""
def findStrings(afile,substr):
lines=[]
with open(afile) as astr:
for line in astr:
if substr in line:
lines.append(line)
return lines
def findFinalEnergy(afile):
lines=[]
substr='FINAL SINGLE POINT ENERGY'
with open(afile) as astr:
for line in astr:
if substr in line:
tmpline=line.replace(substr,'').strip()
lines.append(float(tmpline))
return lines[-1]
def findComputeTime(afile):
timestr=''
substr='TOTAL RUN TIME:'
milisecs=0
# find string with time
with open(afile) as astr:
for line in astr:
if substr in line:
timestr=line.replace(substr,'').strip()
# there is probably a better way of doing this
timestr=timestr.split()
milisecs+=int(timestr[-2]) #milisecs
milisecs+=1000*int(timestr[-4]) #seconds
milisecs+=60000*int(timestr[-6]) #minutes
milisecs+=60*60*1000*int(timestr[-8]) #hours
milisecs+=24*60*60*1000*int(timestr[-10]) #days
return milisecs
|
jskDr/jamespy_py3
|
jorca.py
|
Python
|
mit
| 1,132
|
[
"ORCA"
] |
3849f4c1b643ad6ae9b2128c88ef46a8112198218f94265fc5ff98f10365106e
|
###########################################################################
# Mean prediction from Gaussian Processes based on classifier_libsvm_minimal_modular.py
###########################################################################
from numpy import *
from numpy.random import randn
from shogun.Features import *
from shogun.Classifier import *
from shogun.Kernel import *
num=100
dist=1
width=2.1
C=1
traindata_real=concatenate((randn(2,num)-dist, randn(2,num)+dist), axis=1)
testdata_real=concatenate((randn(2,num)-dist, randn(2,num)+dist), axis=1);
trainlab=concatenate((-ones(num), ones(num)));
testlab=concatenate((-ones(num), ones(num)));
feats_train=RealFeatures(traindata_real);
feats_test=RealFeatures(testdata_real);
kernel=GaussianKernel(feats_train, feats_train, width);
labels=BinaryLabels(trainlab);
gp=GaussianProcessRegression(1.0, kernel, feats_train, labels);
gp.train(feats_train);
out=gp.apply_regression(feats_test).get_labels();
testerr=mean(sign(out)!=testlab)
print(testerr)
|
ratschlab/ASP
|
examples/undocumented/python_modular/regression_gaussian_process_modular.py
|
Python
|
gpl-2.0
| 1,012
|
[
"Gaussian"
] |
9adb4e90c6ccb81f8fa9c922a7930fca1aade720c0e8ba4c1ee46e62786c4c64
|
#!/usr/bin/env python
import sys
import subprocess
from setuptools import setup, find_packages
def check_bwa():
p = subprocess.Popen(['bwa'], stderr=subprocess.PIPE)
for line in p.stderr:
line = line.decode()
if line.startswith('Version:'):
major, minor, sub = line.strip().split()[1].split('.')
sub = sub.split('-')[0]
if int(major) >= 0 and int(minor) >= 7 and int(sub) >= 12:
return True
return False
def check_samtools():
p = subprocess.Popen(['samtools'], stderr=subprocess.PIPE)
for line in p.stderr:
line = line.decode()
if line.startswith('Version:'):
major, minor = line.strip().split()[1].split('.')[:2]
minor = minor.split('-')[0]
if int(major) >= 1 and int(minor) >= 2:
return True
return False
def check_minia():
p = subprocess.Popen(['minia'], stdout=subprocess.PIPE)
for line in p.stdout:
line = line.decode()
if line.startswith('[minia options]'):
return True
return False
def check_LAST():
p = subprocess.Popen(['lastal'], stderr=subprocess.PIPE)
for line in p.stderr:
line = line.decode()
if line.startswith('lastal'):
return True
return False
def check_python():
return sys.hexversion >= 0x20702f0
def check_exonerate():
p = subprocess.Popen(['exonerate'], stdout=subprocess.PIPE)
for line in p.stdout:
line = line.decode()
if line.startswith('exonerate from exonerate'):
major, minor = line.strip().split()[-1].split('.')[:2]
minor = minor.split('-')[0]
if int(major) >= 2 and int(minor) >= 2:
return True
return False
if __name__ == '__main__':
if not check_python(): sys.exit('Dependency problem: python >= 2.7.2 is required')
if not check_bwa(): sys.exit('Dependency problem: bwa >= 0.7.12 not found')
if not check_samtools(): sys.exit('Dependency problem: samtools >= 1.0 not found')
if not check_minia(): sys.exit('Dependency problem: minia not found')
if not check_LAST(): sys.exit('Dependency problem: LAST >= 548 not found')
if not check_exonerate(): sys.exit('Dependency problem: exonerate >= 2.2 not found (required for post-filter)')
setup(
name='TEBreak',
version='1.1',
author='Adam Ewing',
author_email='adam.ewing@gmail.com',
description=("Insertion finder for high throughput sequence data"),
license='MIT',
url='https://github.com/adamewing/tebreak',
scripts=['tebreak/tebreak'],
packages=find_packages(),
install_requires = [
'pysam>=0.8.1',
'bx-python>=0.5.0',
'scipy>=0.14.0',
'numpy>=1.9.0',
'scikit-bio>=0.5.5',
]
)
|
adamewing/tebreak
|
setup.py
|
Python
|
mit
| 2,813
|
[
"BWA",
"pysam",
"scikit-bio"
] |
e443e24f8546f8bb45b6fb1710e000cd1f94ce020a8ec4644615c14bb4923fb8
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Built-in distribution-creation functions.
"""
from warnings import warn
import numpy as np
from astropy import units as u
from .core import Distribution
__all__ = ['normal', 'poisson', 'uniform']
def normal(center, *, std=None, var=None, ivar=None, n_samples,
cls=Distribution, **kwargs):
"""
Create a Gaussian/normal distribution.
Parameters
----------
center : `~astropy.units.Quantity`
The center of this distribution
std : `~astropy.units.Quantity` or `None`
The standard deviation/σ of this distribution. Shape must match and unit
must be compatible with ``center``, or be `None` (if ``var`` or ``ivar``
are set).
var : `~astropy.units.Quantity` or `None`
The variance of this distribution. Shape must match and unit must be
compatible with ``center``, or be `None` (if ``std`` or ``ivar`` are set).
ivar : `~astropy.units.Quantity` or `None`
The inverse variance of this distribution. Shape must match and unit
must be compatible with ``center``, or be `None` (if ``std`` or ``var``
are set).
n_samples : int
The number of Monte Carlo samples to use with this distribution
cls : class
The class to use to create this distribution. Typically a
`Distribution` subclass.
Remaining keywords are passed into the constructor of the ``cls``
Returns
-------
distr : ``cls``, usually `Distribution`
The sampled Gaussian distribution.
"""
center = np.asanyarray(center)
if var is not None:
if std is None:
std = np.asanyarray(var)**0.5
else:
raise ValueError('normal cannot take both std and var')
if ivar is not None:
if std is None:
std = np.asanyarray(ivar)**-0.5
else:
raise ValueError('normal cannot take both ivar and '
'and std or var')
if std is None:
raise ValueError('normal requires one of std, var, or ivar')
else:
std = np.asanyarray(std)
randshape = np.broadcast(std, center).shape + (n_samples,)
samples = center[..., np.newaxis] + np.random.randn(*randshape) * std[..., np.newaxis]
return cls(samples, **kwargs)
COUNT_UNITS = (u.count, u.electron, u.dimensionless_unscaled, u.chan, u.bin, u.vox, u.bit, u.byte)
def poisson(center, n_samples, cls=Distribution, **kwargs):
"""
Create a Poisson distribution.
Parameters
----------
center : `~astropy.units.Quantity`
The center value of this distribution (i.e., λ).
n_samples : int
The number of Monte Carlo samples to use with this distribution
cls : class
The class to use to create this distribution. Typically a
`Distribution` subclass.
Remaining keywords are passed into the constructor of the ``cls``
Returns
-------
distr : ``cls``, usually `Distribution`
The sampled poisson distribution.
"""
# we convert to arrays because np.random.poisson has trouble with quantities
has_unit = False
if hasattr(center, 'unit'):
has_unit = True
poissonarr = np.asanyarray(center.value)
else:
poissonarr = np.asanyarray(center)
randshape = poissonarr.shape + (n_samples,)
samples = np.random.poisson(poissonarr[..., np.newaxis], randshape)
if has_unit:
if center.unit == u.adu:
warn('ADUs were provided to poisson. ADUs are not strictly count'
'units because they need the gain to be applied. It is '
'recommended you apply the gain to convert to e.g. electrons.')
elif center.unit not in COUNT_UNITS:
warn('Unit {} was provided to poisson, which is not one of {}, '
'and therefore suspect as a "counting" unit. Ensure you mean '
'to use Poisson statistics.'.format(center.unit, COUNT_UNITS))
# re-attach the unit
samples = samples * center.unit
return cls(samples, **kwargs)
def uniform(*, lower=None, upper=None, center=None, width=None, n_samples,
cls=Distribution, **kwargs):
"""
Create a Uniform distriution from the lower and upper bounds.
Note that this function requires keywords to be explicit, and requires
either ``lower``/``upper`` or ``center``/``width``.
Parameters
----------
lower : array-like
The lower edge of this distribution. If a `~astropy.units.Quantity`, the
distribution will have the same units as ``lower``.
upper : `~astropy.units.Quantity`
The upper edge of this distribution. Must match shape and if a
`~astropy.units.Quantity` must have compatible units with ``lower``.
center : array-like
The center value of the distribution. Cannot be provided at the same
time as ``lower``/``upper``.
width : array-like
The width of the distribution. Must have the same shape and compatible
units with ``center`` (if any).
n_samples : int
The number of Monte Carlo samples to use with this distribution
cls : class
The class to use to create this distribution. Typically a
`Distribution` subclass.
Remaining keywords are passed into the constructor of the ``cls``
Returns
-------
distr : ``cls``, usually `Distribution`
The sampled uniform distribution.
"""
if center is None and width is None:
lower = np.asanyarray(lower)
upper = np.asanyarray(upper)
if lower.shape != upper.shape:
raise ValueError('lower and upper must have consistent shapes')
elif upper is None and lower is None:
center = np.asanyarray(center)
width = np.asanyarray(width)
lower = center - width/2
upper = center + width/2
else:
raise ValueError('either upper/lower or center/width must be given '
'to uniform - other combinations are not valid')
newshape = lower.shape + (n_samples,)
if lower.shape == tuple() and upper.shape == tuple():
width = upper - lower # scalar
else:
width = (upper - lower)[:, np.newaxis]
lower = lower[:, np.newaxis]
samples = lower + width * np.random.uniform(size=newshape)
return cls(samples, **kwargs)
|
bsipocz/astropy
|
astropy/uncertainty/distributions.py
|
Python
|
bsd-3-clause
| 6,437
|
[
"Gaussian"
] |
c52c1a0fc56b735ae4564e411721ce58c40daeccb1f417d4845e9c1c178863e0
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Zsolt Foldvari
# Copyright (C) 2008-2009 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Printing interface based on Gtk.Print* classes.
"""
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
from math import radians
import logging
#-------------------------------------------------------------------------
#
# GTK modules
#
#-------------------------------------------------------------------------
import cairo
try: # the Gramps-Connect server has no DISPLAY
from gi.repository import GObject
from gi.repository import Gtk
except:
pass
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.plug.docgen import PAPER_PORTRAIT
import gramps.plugins.lib.libcairodoc as libcairodoc
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
LOG = logging.getLogger(".GtkPrint")
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
# printer settings (might be needed to align for different platforms)
PRINTER_DPI = 72.0
PRINTER_SCALE = 1.0
# the print settings to remember between print sessions
PRINT_SETTINGS = None
# minimum spacing around a page in print preview
MARGIN = 6
# zoom modes in print preview
(ZOOM_BEST_FIT,
ZOOM_FIT_WIDTH,
ZOOM_FREE,) = list(range(3))
#------------------------------------------------------------------------
#
# Converter functions
#
#------------------------------------------------------------------------
def paperstyle_to_pagesetup(paper_style):
"""Convert a PaperStyle instance into a Gtk.PageSetup instance.
@param paper_style: Gramps paper style object to convert
@param type: PaperStyle
@return: page_setup
@rtype: Gtk.PageSetup
"""
# paper size names according to 'PWG Candidate Standard 5101.1-2002'
# ftp://ftp.pwg.org/pub/pwg/candidates/cs-pwgmsn10-20020226-5101.1.pdf
gramps_to_gtk = {
"Letter": "na_letter",
"Legal": "na_legal",
"A0": "iso_a0",
"A1": "iso_a1",
"A2": "iso_a2",
"A3": "iso_a3",
"A4": "iso_a4",
"A5": "iso_a5",
"B0": "iso_b0",
"B1": "iso_b1",
"B2": "iso_b2",
"B3": "iso_b3",
"B4": "iso_b4",
"B5": "iso_b5",
"B6": "iso_b6",
"B": "na_ledger",
"C": "na_c",
"D": "na_d",
"E": "na_e",
}
# First set the paper size
gramps_paper_size = paper_style.get_size()
gramps_paper_name = gramps_paper_size.get_name()
# All sizes not included in the translation table (even if a standard size)
# are handled as custom format, because we are not intelligent enough.
if gramps_paper_name in gramps_to_gtk:
paper_size = Gtk.PaperSize.new(name=gramps_to_gtk[gramps_paper_name])
LOG.debug("Selected paper size: %s", gramps_to_gtk[gramps_paper_name])
else:
if paper_style.get_orientation() == PAPER_PORTRAIT:
paper_width = gramps_paper_size.get_width() * 10
paper_height = gramps_paper_size.get_height() * 10
else:
paper_width = gramps_paper_size.get_height() * 10
paper_height = gramps_paper_size.get_width() * 10
paper_size = Gtk.PaperSize.new_custom("custom", "Custom Size",
paper_width, paper_height,
Gtk.Unit.MM)
LOG.debug("Selected paper size: (%f,%f)", paper_width, paper_height)
page_setup = Gtk.PageSetup()
page_setup.set_paper_size(paper_size)
# Set paper orientation
if paper_style.get_orientation() == PAPER_PORTRAIT:
page_setup.set_orientation(Gtk.PageOrientation.PORTRAIT)
else:
page_setup.set_orientation(Gtk.PageOrientation.LANDSCAPE)
# Set paper margins
page_setup.set_top_margin(paper_style.get_top_margin() * 10,
Gtk.Unit.MM)
page_setup.set_bottom_margin(paper_style.get_bottom_margin() * 10,
Gtk.Unit.MM)
page_setup.set_left_margin(paper_style.get_left_margin() * 10,
Gtk.Unit.MM)
page_setup.set_right_margin(paper_style.get_right_margin() * 10,
Gtk.Unit.MM)
return page_setup
#------------------------------------------------------------------------
#
# PrintPreview class
#
#------------------------------------------------------------------------
class PrintPreview:
"""Implement a dialog to show print preview.
"""
zoom_factors = {
0.50: '50%',
0.75: '75%',
1.00: '100%',
1.25: '125%',
1.50: '150%',
1.75: '175%',
2.00: '200%',
3.00: '300%',
4.00: '400%',
}
def __init__(self, operation, preview, context, parent):
self._operation = operation
self._preview = preview
self._context = context
self._parent = parent
self.__build_window()
self._current_page = None
# Private
def __build_window(self):
"""Build the window from Glade.
"""
from gramps.gui.glade import Glade
glade_xml = Glade()
self._window = glade_xml.toplevel
self._window.set_transient_for(self._parent)
# remember active widgets for future use
self._swin = glade_xml.get_object('swin')
self._drawing_area = glade_xml.get_object('drawingarea')
self._first_button = glade_xml.get_object('first')
self._prev_button = glade_xml.get_object('prev')
self._next_button = glade_xml.get_object('next')
self._last_button = glade_xml.get_object('last')
self._pages_entry = glade_xml.get_object('entry')
self._pages_label = glade_xml.get_object('label')
self._zoom_fit_width_button = glade_xml.get_object('zoom_fit_width')
self._zoom_fit_width_button.set_stock_id('gramps-zoom-fit-width')
self._zoom_best_fit_button = glade_xml.get_object('zoom_best_fit')
self._zoom_best_fit_button.set_stock_id('gramps-zoom-best-fit')
self._zoom_in_button = glade_xml.get_object('zoom_in')
self._zoom_in_button.set_stock_id('gramps-zoom-in')
self._zoom_out_button = glade_xml.get_object('zoom_out')
self._zoom_out_button.set_stock_id('gramps-zoom-out')
# connect the signals
glade_xml.connect_signals(self)
self._drawing_area.connect("draw", self.on_drawingarea_draw_event)
##def create_surface(self):
##return cairo.PDFSurface(StringIO(),
##self._context.get_width(),
##self._context.get_height())
##def get_page(self, page_no):
##"""Get the cairo surface of the given page.
##Surfaces are also cached for instant access.
##"""
##if page_no >= len(self._page_numbers):
##LOG.debug("Page number %d doesn't exist." % page_no)
##page_no = 0
##if page_no not in self._page_surfaces:
##surface = self.create_surface()
##cr = cairo.Context(surface)
##if PRINTER_SCALE != 1.0:
##cr.scale(PRINTER_SCALE, PRINTER_SCALE)
##self._context.set_cairo_context(cr, PRINTER_DPI, PRINTER_DPI)
##self._preview.render_page(self._page_numbers[page_no])
##self._page_surfaces[page_no] = surface
##return self._page_surfaces[page_no]
def __set_page(self, page_no):
if page_no < 0 or page_no >= self._page_no:
return
if self._current_page != page_no:
self._drawing_area.queue_draw()
self._current_page = page_no
self._first_button.set_sensitive(self._current_page)
self._prev_button.set_sensitive(self._current_page)
self._next_button.set_sensitive(self._current_page < self._page_no - 1)
self._last_button.set_sensitive(self._current_page < self._page_no - 1)
self._pages_entry.set_text('%d' % (self._current_page + 1))
def __set_zoom(self, zoom):
self._zoom = zoom
screen_width = int(self._paper_width * self._zoom + 2 * MARGIN)
screen_height = int(self._paper_height * self._zoom + 2 * MARGIN)
self._drawing_area.set_size_request(screen_width, screen_height)
self._drawing_area.queue_draw()
self._zoom_in_button.set_sensitive(self._zoom !=
max(self.zoom_factors))
self._zoom_out_button.set_sensitive(self._zoom !=
min(self.zoom_factors))
def __zoom_in(self):
zoom = [z for z in self.zoom_factors if z > self._zoom]
if zoom:
return min(zoom)
else:
return self._zoom
def __zoom_out(self):
zoom = [z for z in self.zoom_factors if z < self._zoom]
if zoom:
return max(zoom)
else:
return self._zoom
def __zoom_fit_width(self):
width, height, vsb_w, hsb_h = self.__get_view_size()
zoom = width / self._paper_width
if self._paper_height * zoom > height:
zoom = (width - vsb_w) / self._paper_width
return zoom
def __zoom_best_fit(self):
width, height, vsb_w, hsb_h = self.__get_view_size()
zoom = min(width / self._paper_width, height / self._paper_height)
return zoom
def __get_view_size(self):
"""Get the dimensions of the scrolled window.
"""
width = self._swin.get_allocated_width() - 2 * MARGIN
height = self._swin.get_allocated_height() - 2 * MARGIN
if self._swin.get_shadow_type() != Gtk.ShadowType.NONE:
width -= 2 * self._swin.get_style().xthickness
height -= 2 * self._swin.get_style().ythickness
spacing = GObject.Value()
spacing.init(GObject.TYPE_INT)
spacing = self._swin.style_get_property('scrollbar-spacing', spacing)
if spacing:
spacing = spacing.get_int()
else:
spacing = 0
reqmin, req = self._swin.get_vscrollbar().get_preferred_size()
vsb_w = spacing + req.width
reqmin, req = self._swin.get_hscrollbar().get_preferred_size()
hsb_h = spacing + req.height
return width, height, vsb_w, hsb_h
def __end_preview(self):
self._operation.end_preview()
# Signal handlers
def on_drawingarea_draw_event(self, drawing_area, context):
cr = context
#cr.rectangle(event.area)
#cr.clip()
# get the extents of the page and the screen
paper_w = int(self._paper_width * self._zoom)
paper_h = int(self._paper_height * self._zoom)
width, height, vsb_w, hsb_h = self.__get_view_size()
if paper_h > height:
width -= vsb_w
if paper_w > width:
height -= hsb_h
# put the paper on the middle of the window
xtranslate = MARGIN
if paper_w < width:
xtranslate += (width - paper_w) / 2
ytranslate = MARGIN
if paper_h < height:
ytranslate += (height - paper_h) / 2
cr.translate(xtranslate, ytranslate)
# draw an empty white page
cr.set_source_rgb(1.0, 1.0, 1.0)
cr.rectangle(0, 0, paper_w, paper_h)
cr.fill_preserve()
cr.set_source_rgb(0, 0, 0)
cr.set_line_width(1)
cr.stroke()
if self._orientation == Gtk.PageOrientation.LANDSCAPE:
cr.rotate(radians(-90))
cr.translate(-paper_h, 0)
##page_setup = self._context.get_page_setup()
##cr.translate(page_setup.get_left_margin(Gtk.Unit.POINTS),
##page_setup.get_top_margin(Gtk.Unit.POINTS))
##cr.set_source_surface(self.get_page(0))
##cr.paint()
# draw the content of the currently selected page
# Here we use dpi scaling instead of scaling the cairo context,
# because it gives better result. In the latter case the distance
# of glyphs was changing.
dpi = PRINTER_DPI * self._zoom
self._context.set_cairo_context(cr, dpi, dpi)
self._preview.render_page(self._current_page)
def on_swin_size_allocate(self, scrolledwindow, allocation):
if self._zoom_mode == ZOOM_FIT_WIDTH:
self.__set_zoom(self.__zoom_fit_width())
if self._zoom_mode == ZOOM_BEST_FIT:
self.__set_zoom(self.__zoom_best_fit())
def on_print_clicked(self, toolbutton):
pass
def on_first_clicked(self, toolbutton):
self.__set_page(0)
def on_prev_clicked(self, toolbutton):
self.__set_page(self._current_page - 1)
def on_next_clicked(self, toolbutton):
self.__set_page(self._current_page + 1)
def on_last_clicked(self, toolbutton):
self.__set_page(self._page_no - 1)
def on_entry_activate(self, entry):
try:
new_page = int(entry.get_text()) - 1
except ValueError:
new_page = self._current_page
if new_page < 0 or new_page >= self._page_no:
new_page = self._current_page
self.__set_page(new_page)
def on_zoom_fit_width_toggled(self, toggletoolbutton):
if toggletoolbutton.get_active():
self._zoom_best_fit_button.set_active(False)
self._zoom_mode = ZOOM_FIT_WIDTH
self.__set_zoom(self.__zoom_fit_width())
else:
self._zoom_mode = ZOOM_FREE
def on_zoom_best_fit_toggled(self, toggletoolbutton):
if toggletoolbutton.get_active():
self._zoom_fit_width_button.set_active(False)
self._zoom_mode = ZOOM_BEST_FIT
self.__set_zoom(self.__zoom_best_fit())
else:
self._zoom_mode = ZOOM_FREE
def on_zoom_in_clicked(self, toolbutton):
self._zoom_fit_width_button.set_active(False)
self._zoom_best_fit_button.set_active(False)
self._zoom_mode = ZOOM_FREE
self.__set_zoom(self.__zoom_in())
def on_zoom_out_clicked(self, toolbutton):
self._zoom_fit_width_button.set_active(False)
self._zoom_best_fit_button.set_active(False)
self._zoom_mode = ZOOM_FREE
self.__set_zoom(self.__zoom_out())
def on_window_delete_event(self, widget, event):
self.__end_preview()
return False
def on_quit_clicked(self, toolbutton):
self.__end_preview()
self._window.destroy()
# Public
def start(self):
# get paper/page dimensions
page_setup = self._context.get_page_setup()
self._paper_width = page_setup.get_paper_width(Gtk.Unit.POINTS)
self._paper_height = page_setup.get_paper_height(Gtk.Unit.POINTS)
self._page_width = page_setup.get_page_width(Gtk.Unit.POINTS)
self._page_height = page_setup.get_page_height(Gtk.Unit.POINTS)
self._orientation = page_setup.get_orientation()
# get the total number of pages
##self._page_numbers = [0,]
##self._page_surfaces = {}
self._page_no = self._operation.get_property('n_pages')
self._pages_label.set_text(_('of %d') % self._page_no)
# set zoom level and initial page number
self._zoom_mode = ZOOM_FREE
self.__set_zoom(1.0)
self.__set_page(0)
# let's the show begin...
self._window.show()
#------------------------------------------------------------------------
#
# GtkPrint class
#
#------------------------------------------------------------------------
class GtkPrint(libcairodoc.CairoDoc):
"""Print document via GtkPrint* interface.
Requires Gtk+ 2.10.
"""
def run(self):
"""Run the Gtk Print operation.
"""
global PRINT_SETTINGS
# get a page setup from the paper style we have
page_setup = paperstyle_to_pagesetup(self.paper)
# set up a print operation
operation = Gtk.PrintOperation()
operation.set_default_page_setup(page_setup)
operation.connect("begin_print", self.on_begin_print)
operation.connect("draw_page", self.on_draw_page)
operation.connect("paginate", self.on_paginate)
operation.connect("preview", self.on_preview)
# set print settings if it was stored previously
if PRINT_SETTINGS is not None:
operation.set_print_settings(PRINT_SETTINGS)
# run print dialog
while True:
self.preview = None
res = operation.run(Gtk.PrintOperationAction.PRINT_DIALOG,
self.uistate.window)
if self.preview is None: # cancel or print
break
# set up printing again; can't reuse PrintOperation?
operation = Gtk.PrintOperation()
operation.set_default_page_setup(page_setup)
operation.connect("begin_print", self.on_begin_print)
operation.connect("draw_page", self.on_draw_page)
operation.connect("paginate", self.on_paginate)
operation.connect("preview", self.on_preview)
# set print settings if it was stored previously
if PRINT_SETTINGS is not None:
operation.set_print_settings(PRINT_SETTINGS)
# store print settings if printing was successful
if res == Gtk.PrintOperationResult.APPLY:
PRINT_SETTINGS = operation.get_print_settings()
def on_begin_print(self, operation, context):
"""Setup environment for printing.
"""
# get data from context here only once to save time on pagination
self.page_width = round(context.get_width())
self.page_height = round(context.get_height())
self.dpi_x = context.get_dpi_x()
self.dpi_y = context.get_dpi_y()
def on_paginate(self, operation, context):
"""Paginate the whole document in chunks.
"""
layout = context.create_pango_layout()
finished = self.paginate(layout,
self.page_width,
self.page_height,
self.dpi_x,
self.dpi_y)
# update page number
operation.set_n_pages(len(self._pages))
# start preview if needed
if finished and self.preview:
self.preview.start()
return finished
def on_draw_page(self, operation, context, page_nr):
"""Draw the requested page.
"""
cr = context.get_cairo_context()
layout = context.create_pango_layout()
width = round(context.get_width())
height = round(context.get_height())
dpi_x = context.get_dpi_x()
dpi_y = context.get_dpi_y()
self.draw_page(page_nr, cr, layout, width, height, dpi_x, dpi_y)
def on_preview(self, operation, preview, context, parent):
"""Implement custom print preview functionality.
"""
##if constfunc.win()':
##return False
self.preview = PrintPreview(operation, preview, context, parent)
# give a dummy cairo context to Gtk.PrintContext,
# PrintPreview will update it with the real one
try:
width = int(round(context.get_width()))
except ValueError:
width = 0
try:
height = int(round(context.get_height()))
except ValueError:
height = 0
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
cr = cairo.Context(surface)
context.set_cairo_context(cr, PRINTER_DPI, PRINTER_DPI)
return True
|
SNoiraud/gramps
|
gramps/plugins/docgen/gtkprint.py
|
Python
|
gpl-2.0
| 20,880
|
[
"Brian"
] |
de3f058e14e231ae1d54b59ba44a470cd9fed047c30a100b25b0da52488b77b0
|
import sgmllib
import Bio.File
"""
The LocusLink site is:
http://www.ncbi.nlm.nih.gov/LocusLink/
Parses a Locus web page.
"""
import warnings
warnings.warn("Bio.LocusLink was deprecated, as NCBI's LocusLink was superceded by Entrez Gene. If you still need this module, please get in touch with the Biopython developers (biopython-dev@biopython.org) to avoid permanent removal of this module", DeprecationWarning)
def is_empty_container( item ):
response = 0
if is_container( item ):
if len( item ) == 0:
response = 1
return response
def is_container( item ):
response = 0
if type( item ) in [ type( [] ), type( {} ) ]:
response = 1
return response
def is_substring( a, b ):
if( a.find( b ) < 0 ):
return 0
else:
return 1
def print_params( params ):
print '%s!!!!!\n' % 'PARAMS'
for item in params:
print 'param ' + str( item )
print '-----------------'
def process_list( params ):
len_params = len( params )
container = []
while 1:
try:
element = params.pop()
except:
break
if is_close_token( element ): break
elif is_open_token( element ):
break
else:
container.append( element )
return container
def put( dict, key, val ):
if dict.has_key( key ):
element = dict[ key ]
dict[ key ] = [ element, val ]
else:
dict[ key ] = val
def process_dict( params ):
container = {}
while len( params ) > 0:
element = params.pop()
if type( element ) == type( {} ):
for key, val in element.items():
put( container, key, val )
elif is_close_token( element ): break
elif is_open_token( element ):
params.append( element )
else:
val = params.pop()
if type( val ) == type( [] ):
if len( val ) == 1:
val = val[ 0 ]
try:
put( container, element, val )
except:
print 'Element'
print element
params.append( element )
elif( not is_close_token( val ) ):
try:
put( container, element, val )
except:
print 'Element'
print element
params.append( element )
else:
break
return container
class Token:
def __init__( self, token ):
self.token = token
def __eq__( self, other ):
if not isinstance( other, self.__class__ ):
return 0
if self.token == other.token:
return 1
return 0
def __ne__( self, other ):
if not isinstance( other, Token ):
return 1
if self.token != other.token:
return 1
return 0
def __str__( self ):
output = 'token_%s\n' % self.token
return output
open_list = Token( 'open_list' )
close_list = Token( 'close_list' )
open_dict = Token( 'open_dict' )
close_dict = Token( 'close_dict' )
def is_open_token( target ):
answer = 0
if isinstance( target, Token ):
if ( open_list.__eq__( target ) ) or ( open_dict.__eq__(
target ) ):
answer = 1
return answer
def is_close_token( target ):
answer = 0
if isinstance( target, Token ):
if ( close_list.__eq__( target ) ) or ( close_dict.__eq__(
target ) ):
answer = 1
return answer
def is_token( target ):
return is_open_token( target ) or is_close_token( target )
class Url:
def __init__( self, url, label = '', description = '' ):
self.url = url
self.label = label
self.description = description
def __str__( self ):
output = '%s\n' % self.label
output = output + 'url = %s\n' % self.url
output = output + '%s\n' % self.description
return output
class Record(dict):
def __init__( self ):
dict.__init__( self )
def __str__( self ):
queue_keys = self.keys()
queue_keys.sort()
out = ''
for key in queue_keys:
out = out + '%s:\n' % key.upper()
out = out + self.print_item( self[ key ] )
out = out + '\n'
return out
def print_item( self, item, level = 1 ):
indent = ' '
out = ''
for j in range( 0, level ):
indent = indent + ' '
if( type( item ) == type( '' ) ):
if( item != '' ):
out = out + '%s%s\n' % ( indent, item )
elif( type( item ) == type([])):
for subitem in item:
out = out + self.print_item( subitem, level + 1 )
out = out + '----------------------------------------------\n'
elif( type( item ) == type ( {} ) ):
keys = item.keys()
keys.sort()
for subitem in keys:
out = out + '%skey is %s\n' % ( indent, subitem )
out = out + self.print_item( item[ subitem ], level + 1 )
elif( isinstance( item, dict ) ):
keys = item.keys()
keys.sort()
for subitem in keys:
out = out + '%skey is %s\n' % ( indent, subitem )
out = out + self.print_item( item[ subitem ], level + 1 )
else:
out = out + '%s%s\n' % ( indent, str( item ) )
return out
class LocusLinkParser( sgmllib.SGMLParser ):
def reset( self ):
sgmllib.SGMLParser.reset( self )
self.text = ''
self.record = Record()
self.open_tag_stack = []
self.open_tag = 'open_html'
self.outer_state = 'undefined'
self.section_state = 'undefined'
self.local_title = ''
self.structure_stack = []
self.category = ''
self.context_chain = []
self.outer_state_dict = { 'nomenclature' : 'nomenclature', 'overview' : 'overview', \
'function' : 'function', \
'relationships' : 'relationships', \
'locus' : 'locus', \
'map' : 'map', \
'refseq' : 'refseq', \
'genbank' : 'genbank', \
'external' : 'external_annotation', \
'additional' : 'additional_links' \
}
def parse( self, handle ):
self.reset()
self.feed( handle )
return self.record
#
# Assumes an empty line between records
#
def feed( self, handle ):
if isinstance(handle, Bio.File.UndoHandle):
uhandle = handle
else:
uhandle = Bio.File.UndoHandle(handle)
text = ''
while 1:
line = uhandle.readline()
if not line:
break
text = text + ' ' + line
sgmllib.SGMLParser.feed( self, text )
def get_text( self ):
text = self.text
self.text = ''
return text
def handle_comment( self, comment ):
while comment.startswith( '-' ):
comment = comment[ 1: ]
comment = comment.strip()
comment = comment.lower()
keys = self.outer_state_dict.keys()
for key in keys:
if comment.startswith( key ):
if key in [ 'nomenclature', 'overview', 'function',
'relationships', 'map', 'locus', 'external' ]:
self.structure_stack.append( open_dict )
elif key in [ 'genbank', 'additional' ]:
self.structure_stack.append( open_list )
elif key in [ 'refseq' ]:
self.structure_stack.append( open_list )
self.outer_state = key
self.section_state = 'local_title'
self.detail_state = 'undefined'
if( key == 'refseq' ):
self.detail_state = 'waiting_category'
else:
self.detail_state = 'waiting_key'
break
if comment.startswith( 'end' ):
if is_substring( comment.lower(), self.outer_state ):
if self.outer_state == 'refseq':
self.structure_stack.append( close_list )
elif self.outer_state == 'function':
self.structure_stack.append( close_list )
self.structure_stack.append( close_dict )
self.process_structure_stack()
while 1:
try:
item = self.structure_stack.pop()
except:
item = 'Not Available'
if not is_token( item ) : break
key = self.outer_state
self.record[ self.outer_state_dict[ key ] ] = item
self.outer_state = 'undefined'
def handle_data(self, newtext ):
newtext = newtext.strip()
self.text = self.text + newtext
def start_a( self, attrs ):
self.open_tag_stack.append( self.open_tag )
self.open_tag = 'open_a'
attr_dict = {}
for key, val in attrs:
attr_dict[ key ] = val
outer_state = self.outer_state
if( outer_state in [ 'nomenclature', 'overview', 'relationships', 'locus', 'map', 'genbank', 'refseq', 'additional', 'external' ] ):
if self.section_state == 'local_contents':
if self.detail_state in [ 'scan_val', 'unpaired_key' ]:
if attr_dict.has_key( 'href' ):
href = attr_dict[ 'href' ]
self.text = ''
self.structure_stack.append( Url( href, '' ) )
elif outer_state == 'function':
if self.section_state == 'local_contents':
if self.detail_state in [ 'scan_val', 'unpaired_key', 'may_be_val' ]:
if attr_dict.has_key( 'href' ):
href = attr_dict[ 'href' ]
self.text = ''
self.structure_stack.append( Url( href, '' ) )
def end_a( self ):
try:
self.open_tag = self.open_tag_stack.pop()
except:
self.open_tag = 'open_html'
outer_state = self.outer_state
if( outer_state in [ 'nomenclature', 'overview', 'relationships', 'locus', 'map', 'refseq', 'genbank', 'additional', 'external' ] ):
if self.section_state == 'local_contents':
if self.detail_state in [ 'scan_val', 'unpaired_key' ]:
text = self.get_text()
url = self.structure_stack.pop()
if isinstance( url, Url ):
url.label = text
self.structure_stack.append( url )
elif outer_state == 'function':
if self.section_state == 'local_contents':
if self.detail_state in [ 'scan_val', 'unpaired_key',
'may_be_val' ]:
text = self.get_text()
url = self.structure_stack.pop()
if isinstance( url, Url ):
url.label = text
self.structure_stack.append( url )
def start_b( self, attrs ):
self.open_tag_stack.append( self.open_tag )
self.open_tag = 'open_b'
outer_state = self.outer_state
if( outer_state in [ 'nomenclature', 'overview', 'function', 'relationships', 'locus', 'map', 'refseq', 'genbank', 'additional', 'external' ] ):
self.text = ''
def end_b( self ):
try:
self.open_tag = self.open_tag_stack.pop()
except:
self.open_tag = 'open_html'
outer_state = self.outer_state
if( outer_state in [ 'nomenclature', 'overview', 'function', 'relationships', 'locus', 'map', 'refseq', 'genbank', 'additional', 'external' ] ):
if self.section_state == 'local_contents':
text = self.get_text()
cols = text.split( ':', 1 )
key = cols[ 0 ]
if( outer_state == 'refseq' ):
self.structure_stack.append( cols[ 1 ] )
self.structure_stack.append( open_dict )
self.detail_state = 'waiting_key'
elif outer_state == 'relationships':
self.structure_stack.append( key )
self.structure_stack.append( open_list )
self.detail_state = 'skip'
elif outer_state == 'additional':
self.structure_stack.append( open_dict )
self.structure_stack.append( key )
self.structure_stack.append( open_list )
self.detail_state = 'unpaired_key'
elif outer_state == 'function':
if self.detail_state != 'waiting_key':
self.structure_stack.append( close_list )
self.structure_stack.append( key )
self.detail_state = 'unpaired_key'
self.structure_stack.append( open_list )
self.structure_stack.append( open_list )
try:
val = cols[ 1 ]
if val.strip() != '':
self.structure_stack.append( val )
self.detail_state = 'unpaired_key'
except IndexError:
pass
else:
if self.detail_state != 'waiting_key':
self.structure_stack.append( close_list )
self.detail_state = 'scan_val'
self.structure_stack.append( key )
self.structure_stack.append( open_list )
self.structure_stack.append( open_list )
try:
val = cols[ 1 ]
if val.strip() != '':
self.structure_stack.append( val )
except IndexError:
pass
def start_th( self, attrs ):
self.open_tag_stack.append( self.open_tag )
self.open_tag = 'open_th'
outer_state = self.outer_state
self.text = ''
if outer_state in [ 'function', 'relationships', 'map', 'locus', 'genbank', 'additional', 'external' ]:
if self.section_state == 'local_contents':
self.detail_state = 'scan_headings'
def end_th( self ):
try:
self.open_tag = self.open_tag_stack.pop()
except:
self.open_tag = 'open_html'
outer_state = self.outer_state
if outer_state == 'refseq':
if self.section_state == 'local_contents':
text = self.get_text()
cols = text.strip().split( ':', 1 )
if text.strip().lower().startswith( 'category' ):
self.structure_stack.append( open_dict )
self.structure_stack.append( cols[ 1 ] )
self.structure_stack.append( open_list )
self.structure_stack.append( open_dict )
self.detail_state = 'found_category'
elif self.detail_state in [ 'found_category', 'may_be_val' ]:
if text.strip() != '':
if self.detail_state != 'found_category':
self.structure_stack.append( close_list )
cols = text.split( ':' )
self.structure_stack.append( cols[ 0 ] )
self.structure_stack.append( open_list )
try:
val = cols[ 1 ]
self.structure_stack.append( open_list )
self.structure_stack.append( val )
self.detail_state = 'scan_val'
except IndexError:
self.detail_state = 'may_be_val'
def start_table( self, attrs ):
self.open_tag_stack.append( self.open_tag )
self.open_tag = 'open_table'
self.text = ''
if self.outer_state == 'genbank':
if self.section_state == 'local_contents':
self.detail_state = 'skip'
elif( self.outer_state in [ 'nomenclature', 'overview', 'relationships', 'locus', 'map', 'genbank', 'additional', 'external' ] ):
if self.section_state == 'local_contents':
self.detail_state = 'waiting_key'
def end_table( self ):
try:
self.open_tag = self.open_tag_stack.pop()
except:
self.open_tag = 'open_html'
if( self.section_state == 'local_title' ):
if self.outer_state == 'refseq':
self.section_state = 'local_contents'
elif self.outer_state == 'additional':
self.section_state = 'local_contents'
self.detail_state = 'scan_val'
else:
self.section_state = 'local_contents'
self.detail_state = 'waiting_key'
elif self.section_state == 'local_contents':
if( self.outer_state in [ 'nomenclature', 'relationships', 'locus', 'map', 'external' ] ):
self.structure_stack.append( close_list )
elif ( self.outer_state in [ 'genbank', 'additional' ] ):
if self.detail_state == 'scan_val':
self.structure_stack.append( close_list )
elif self.outer_state == 'refseq':
if self.detail_state in ['may_be_val', 'scan_val' ]:
self.structure_stack.append( close_list )
self.structure_stack.append( close_dict )
self.structure_stack.append( close_list )
self.structure_stack.append( close_dict )
self.detail_state = 'scan_category'
def start_tr( self, attrs ):
top = self.open_tag
self.open_tag_stack.append( self.open_tag )
if top == 'open_table_row':
if self.outer_state == 'refseq':
if self.section_state == 'local_contents':
if self.detail_state in [ 'scan_val', ]:
self.structure_stack.append( close_list )
self.detail_state = 'may_be_val'
self.open_tag_stack.pop()
self.open_tag = 'open_table_row'
self.text = ''
outer_state = self.outer_state
if( outer_state in [ 'relationships', 'locus', 'function', 'genbank', 'external'
] ):
if self.section_state == 'local_contents':
if self.detail_state == 'scan_val':
self.structure_stack.append( open_list )
elif outer_state == 'map':
if self.section_state == 'local_contents':
if self.detail_state == 'scan_val':
self.structure_stack.append( open_list )
elif outer_state == 'additional':
if self.section_state == 'local_contents':
self.detail_state = 'scan_val'
self.structure_stack.append( open_list )
def end_tr( self ):
try:
self.open_tag = self.open_tag_stack.pop()
except:
self.open_tag = 'open_html'
if self.section_state == 'local_contents':
if( self.outer_state in [ 'overview', 'nomenclature', 'relationships',
'locus', 'genbank', 'external' ] ):
if self.detail_state == 'scan_val':
self.structure_stack.append( close_list )
elif self.detail_state == 'unpaired_key':
self.structure_stack.append( close_list )
elif self.detail_state == 'skip':
self.detail_state = 'scan_val'
elif self.detail_state == 'scan_headings':
self.detail_state = 'scan_val'
elif self.outer_state in [ 'additional', ]:
if self.detail_state == 'unpaired_key':
self.structure_stack.append( close_list )
self.structure_stack.append( close_dict )
self.structure_stack.append( close_list )
elif self.detail_state == 'scan_val':
self.structure_stack.append( close_list )
elif self.outer_state in [ 'function', ]:
if self.detail_state == 'scan_headings':
self.detail_state = 'scan_val'
elif self.detail_state == 'unpaired_key':
self.detail_state = 'may_be_val'
self.structure_stack.append( close_list )
elif self.detail_state == 'scan_val':
self.detail_state = 'may_be_val'
self.structure_stack.append( close_list )
elif self.outer_state in [ 'refseq', ]:
if self.section_state == 'local_contents':
if self.detail_state == 'scan_val':
self.structure_stack.append( close_list )
self.detail_state = 'may_be_val'
elif self.outer_state == 'map':
if self.section_state == 'local_contents':
if self.detail_state == 'scan_val':
self.structure_stack.append( close_list )
self.detail_state = 'may_be_val'
def start_td( self, attrs ):
self.open_tag_stack.append( self.open_tag )
self.open_tag = 'open_table_data'
if self.outer_state in [ 'nomenclature', 'overview', 'relationships', 'map', 'locus', 'genbank', 'additional', 'external' ]:
if( self.section_state == 'local_contents' ):
self.text = ''
elif self.outer_state == 'refseq':
if self.section_state == 'local_contents':
self.text = ''
if self.detail_state == 'may_be_val':
self.structure_stack.append( open_list )
self.detail_state = 'scan_val'
def end_td( self ):
try:
self.open_tag = self.open_tag_stack.pop()
except:
self.open_tag = 'open_html'
if self.outer_state in [ 'nomenclature', 'overview', 'relationships', 'locus', 'genbank', 'additional', 'external' ]:
if( self.section_state == 'local_contents' ):
if self.detail_state == 'scan_val':
text = self.get_text()
if( text != '' ):
self.structure_stack.append( text )
elif self.outer_state == 'function':
if self.section_state == 'local_contents':
text = self.get_text()
if( text != '' ):
if self.detail_state == 'may_be_val':
if text.strip() != '':
self.structure_stack.append( open_list )
self.detail_state = 'scan_val'
if self.detail_state in [ 'unpaired_key', 'scan_val' ]:
self.structure_stack.append( text )
elif self.outer_state == 'map':
if self.section_state == 'local_contents':
text = self.get_text()
if( text != '' ):
if self.detail_state == 'may_be_val':
if text.strip() != '':
self.structure_stack.append( open_list )
self.detail_state = 'scan_val'
if self.detail_state == 'scan_val':
self.structure_stack.append( text )
elif self.outer_state == 'refseq':
if self.section_state == 'local_contents':
if self.detail_state == 'scan_val':
text = self.get_text()
if( text != '' ):
self.add_text_to_object( text )
def do_br( self, attrs ):
if self.outer_state in [ 'nomenclature', 'overview', 'function', 'relationships', 'map', 'locus', 'genbank', 'additional', 'external' ]:
if( self.section_state == 'local_contents' ):
if self.detail_state == 'scan_val':
if self.is_contained_by( 'open_table_data' ):
text = self.get_text()
if( text != '' ):
self.structure_stack.append( text )
def add_text_to_object( self, text ):
stack_item = self.structure_stack.pop()
if isinstance( stack_item, Url ):
if stack_item.description == '':
stack_item.description = text
self.structure_stack.append( stack_item )
else:
self.structure_stack.append( stack_item )
self.structure_stack.append( text )
def is_contained_by( self, tag ):
return tag in self.open_tag_stack
def process_structure_stack( self ):
params = []
outer_state = self.outer_state
if outer_state in [ 'nomenclature', 'overview', 'function', 'relationships', 'refseq', 'locus', 'map', 'genbank', 'additional', 'external' ]:
while len( self.structure_stack ) > 1:
len_stack = len( self.structure_stack )
# self.print_stack()
for i in range ( 0, len_stack ):
item = self.structure_stack.pop()
if not is_open_token( item ):
params.append( item )
else: break
if( open_list.__eq__( item ) ):
container = process_list( params )
params.append( container )
else:
container = process_dict( params )
if len( container ) > 0:
params.append( container )
if ( len( self.structure_stack ) == 0 ) or is_open_token(
self.structure_stack[ -1 ] ):
for j in range( 0, len( params ) ):
item = params.pop()
self.structure_stack.append( item )
params = []
def print_stack( self ):
print '%s!!!!!\n' % self.outer_state.upper()
for stack_item in self.structure_stack:
print 'stack has ' + str( stack_item )
print '-----------------'
if( __name__ == '__main__' ):
handle = open( 'Hs13225.htm')
undo_handle = Bio.File.UndoHandle( handle )
locuslink_parser = LocusLinkParser()
record = locuslink_parser.parse( handle )
print record
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/LocusLink/web_parse.py
|
Python
|
apache-2.0
| 26,686
|
[
"Biopython"
] |
7cf57d65cd640cbc1003d7c9d5f6c8171613f4edf81f1bcff1d94696581f3a1d
|
#!/usr/bin/env python
################################################################################
# Copyright (C) 2014, 2015 GenAP, McGill University and Genome Quebec Innovation Centre
#
# This file is part of MUGQIC Pipelines.
#
# MUGQIC Pipelines is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MUGQIC Pipelines is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MUGQIC Pipelines. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# Python Standard Modules
import os
# MUGQIC Modules
from core.config import *
from core.job import *
def blastdbcmd(
entry_file,
entry_cmd,
outfile
):
return Job(
[entry_file],
[outfile],
[
['blast_blastdbcmd', 'module_blast']
],
command="""\
blastdbcmd \\
-db {blast_db} \\
-entry {entry_cmd} \\
-outfmt %f \\
> {outfile}""".format(
blast_db=config.param('blast_blastdbcmd', 'blast_db', type='prefixpath'),
entry_cmd=entry_cmd,
outfile=outfile
))
def blastn_on_db(db, query, output, other_options=""):
return Job(
[query],
[output],
[['DEFAULT', 'module_blast']],
command="""\
blastn {other_options} \\
-db {db} \\
-query {query} \\
-out {output}""".format(
other_options=other_options,
db=db,
query=query,
output=output
))
def dcmegablast(
infile_fasta,
outfmt,
outfile,
coverage_bed,
outdir
):
tmp_outfile = os.path.splitext(outfile)[0] + ".all.tmp"
return Job(
[infile_fasta, coverage_bed],
[outfile, os.path.join(outdir, "blastCov.tsv"), os.path.join(outdir, "contigsCoverage.tsv")],
[
['blast_dcmegablast', 'module_blast'],
['blast_dcmegablast', 'module_R'],
['blast_dcmegablast', 'module_mugqic_tools']
],
command="""\
blastn -task dc-megablast \\
-query {infile_fasta} \\
-outfmt "{outfmt} qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore stitle sskingdoms sscinames scomnames" \\
-out {tmp_outfile} \\
-max_target_seqs {max_target_seqs} \\
-num_threads {threads} \\
-db {blast_db} && \\
pacBioKeepBlastBestHits.pl \\
--infile {tmp_outfile} \\
--n {max_target_seqs} \\
> {outfile} && \\
pacBioMergeCovToBlast.R \\
-c {coverage_bed} \\
-b {outfile} \\
-o {outdir}""".format(
infile_fasta=infile_fasta,
outfmt=outfmt,
tmp_outfile=tmp_outfile,
max_target_seqs=config.param('blast_dcmegablast', 'max_target_seqs', type='posint'),
threads=config.param('blast_dcmegablast', 'threads', type='posint'),
blast_db=config.param('blast_dcmegablast', 'blast_db', type='prefixpath'),
outfile=outfile,
coverage_bed=coverage_bed,
outdir=outdir
))
# Parallel blast using fasta chunks
def parallel_blast(fasta, query, blast, program, db, cpu):
return(Job(
[fasta],
[blast],
[['blast', 'module_perl'], ['blast', 'module_mugqic_tools'], ['blast', 'module_blast']],
command="""\
parallelBlast.pl \\
-file {query} \\
--OUT {blast} \\
-n {cpu} \\
--BLAST "{program} -db {db} -max_target_seqs 1 -outfmt '6 std stitle'" """.format(
query=query,
blast=blast,
cpu=cpu,
program=program,
db=db
))
)
|
ccmbioinfo/mugqic_pipelines
|
bfx/blast.py
|
Python
|
lgpl-3.0
| 3,916
|
[
"BLAST"
] |
25114f00809092ca5cb24679ff0950eae36f0031e8e8f0fd5cea40890685b40b
|
#########
# BatPrepare.py
# Lithium-ion-batteries will loose their capacity, if they were stored full powered.
# This program will rotate the propellers of the drone (but not make it flight), till battery reached 50% capacity.
# Dependencies: a POSIX OS, PS-Drone-API 2.0 beta or higher.
# (w) J. Philipp de Graaff, www.playsheep.de, 2015
##########
# LICENCE:
# Artistic License 2.0 as seen on http://opensource.org/licenses/artistic-license-2.0 (retrieved December 2014)
# Visit www.playsheep.de/drone or see the PS-Drone-API-documentation for an abstract from the Artistic License 2.0.
###########
##### Suggested clean drone startup sequence #####
import time
import api.ps_drone as ps_drone # Import PS-Drone-API
drone = ps_drone.Drone() # Start using drone
drone.startup() # Connects to drone and starts subprocesses
while (drone.getBattery()[0] == -1): time.sleep(0.1) # Wait until the drone has done its reset
print "Battery: "+str(drone.getBattery()[0])+"% "+str(drone.getBattery()[1]) # Gives a battery-status
drone.useDemoMode(True) # Just give me 15 basic dataset per second (is default anyway)
time.sleep(0.5) # Gives time to awake fully
pwm = 100
status = 100
stop = False
odrive = False
while status>50 and not stop:
status=drone.getBattery()[0]
drone.printLineUp()
print "Battery: "+str(drone.getBattery()[0])+"% "+str(drone.getBattery()[1])+" PWM: "+str(pwm)+" Overdrive:"+str(odrive)+" "
time.sleep(0.1)
key=drone.getKey()
if key!="":
if key=="+": pwm += 1
elif key=="-": pwm -= 1
elif key=="o" and odrive: odrive = False
elif key=="o" and not odrive: odrive = True
else: stop = True
if pwm>115 and not odrive: pwm = 115
if pwm>255 and odrive: pwm = 255
if pwm<50: pwm = 50
drone.thrust(pwm,pwm,pwm,pwm)
drone.thrust(0,0,0,0)
if stop: print "Aborted !"
else: print "Done"
|
reixd/ps-drone
|
tools/batPrepare.py
|
Python
|
artistic-2.0
| 2,122
|
[
"VisIt"
] |
7a591910615a4ad54064c598f781b2e75aa4315e9fce3af6ebe4be808cf51002
|
# This script reads the carrier database
# and display it along a path in histogram form
# along with a representation of the carriers in energy space
from __future__ import print_function
from yambopy import *
import matplotlib.gridspec as gridspec
from scipy.optimize import curve_fit
import os
############
# SETTINGS #
############
folder = 'rt-24x24'
calc = 'QSSIN-D-100.0fs-2.07eV-300K-DG' # Where RT carrier output is
path = [[0.0,0.0,0.0],[0.5,0.0,0.0],[0.33333,0.33333,0.0],[0.0,0.0,0.0]]
nbv = 2 ; nbc = 2 # nb of valence and conduction bands
########
# INIT #
########
# For saving pictures
os.system('mkdir -p occupations_v2/%s/%s'%(folder,calc))
# Instance containing bandstructure (as used in RT sim) and occupations
yrt = YamboRTDB(folder=folder,calc=calc)
yrt.get_path(path) # Generates kindex and distances
### aliases
times = [i * 1e15 for i in yrt.times] # carriers output times, in fs
nbands = yrt.nbands # number of bands in the RT simulation
if nbv+nbc != nbands:
raise NameError('Incompatible number of bands, set nbv and nbc in script.')
## 'path-plot' variables
kindex = yrt.bands_indexes # kpoint indexes (in order) to draw path
distances = yrt.bands_distances
eigenvalues = yrt.eigenvalues[kindex,:] # eigenvalues of the bands included in the RT simulation
#
max_occ = np.amax(yrt.occupations[:,kindex,:]) # used to size the distribution plots
occupations = yrt.occupations[:,kindex,:]/max_occ # format time,kindex,band index (from 0 to nbands, only on path)
#
##
## 'fit' variables and function
# FD distrib for fit
def fermi_dirac(E,a,T): # declare E first for fit
return 1/(1+np.exp((E-a)/T))
#
KtoeV = 8.61733e-5
#
# xeng is an array of values to plot the fit properly
xeng = np.linspace(np.amin(eigenvalues[:,list(range(nbv))]), np.amax(eigenvalues[:,list(range(nbv,nbands))]),1000)
##
##############
# EXT. FIELD #
##############
# The external field is read from the o- file
ext = np.loadtxt('%s/%s/pulse/o-pulse.external_field'%(folder,calc))
field = ext[:,2]/max(abs(ext[:,2])) # polarization : x=1,y=2,z=3
##################
# ENERGY DISTRIB #
##################
# Sort the (n,k) pairs between positive and negative energies
# (If the same energy appears twice, it must not be summed over)
list_e=[] ; list_h=[]
for k in range(yrt.nkpoints):
for n in range(yrt.nbands):
e = yrt.eigenvalues[k,n]
if e<=0.0:
list_h.append((k,n))
else:
list_e.append((k,n))
# Map (k,n) to a single index for e and h
# then get the list of indices to sort the energies
nrj = np.zeros((len(list_e)))
for i,(k,n) in enumerate(list_e):
nrj[i]=yrt.eigenvalues[k,n]
sorted_e = np.argsort(nrj)
nrj = np.zeros((len(list_h)))
for i,(k,n) in enumerate(list_h):
nrj[i]=yrt.eigenvalues[k,n]
sorted_h = np.argsort(nrj)
# Build the occupation tables occ_x[t,(nk)_index,(e|occ)]
occ_e = np.zeros((len(times),len(list_e),2))
for t in range(len(times)):
for i,(k,n) in enumerate(list_e):
occ_e[t,i,0]=yrt.eigenvalues[k,n]
occ_e[t,i,1]=yrt.occupations[t,k,n]
occ_h = np.zeros((len(times),len(list_h),2))
for t in range(len(times)):
for i,(k,n) in enumerate(list_h):
occ_h[t,i,0]=yrt.eigenvalues[k,n]
occ_h[t,i,1]=yrt.occupations[t,k,n]
# Sorting
for t in range(len(times)):
occ_e[t,:,:]=occ_e[t,sorted_e,:]
occ_h[t,:,:]=occ_h[t,sorted_h,:]
# *(-1) on holes to fit the same way as electrons
occ_h *= -1
#################
# BAR PLOT DATA #
#################
# Fill arrays with occupation of valence and conduction bands
# Recall that 'occupations' was normalized
# one entry per band
occ_v = np.zeros((len(times),len(kindex),nbv))
occ_c = np.zeros((len(times),len(kindex),nbc))
for n in range(nbv):
occ_v[:,:,n] = -occupations[:,:,n] # minus sign to get positive occupations
for n in range(nbc):
occ_c[:,:,n] = occupations[:,:,n+nbv] # +nbv to read CBs
####################
# TIME LOOP & PLOT #
####################
# Gridspec allows to place subplots on a grid
# spacing for exemple can be customised
gs = gridspec.GridSpec(9, 8)
# y range for band structure & energy plots
ymin_v= np.amin(eigenvalues[:,:nbv])-0.1
ymin_c= np.amin(eigenvalues[:,nbv:])-0.1
ymax_v= np.amax(eigenvalues[:,:nbv])+0.1
ymax_c= np.amax(eigenvalues[:,nbv:])+0.1
###
for t in range(len(times)):
#for t in (30,):
i=t
print(times[i])
name = 'occupations_v2/'+folder+'/'+calc+'/%d.png' % (times[t])
fig = plt.figure()
fig.suptitle('Occupation of the bands and fit to the Fermi-Dirac distribution',fontsize=14,ha='center')
####### bandstructure w/ occupation plot
ax1c = plt.subplot(gs[0:4,0:-2])
ax1v = plt.subplot(gs[4:8,0:-2])
# remove x ticks
ax1c.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax1v.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
# set x range
ax1c.set_xlim((0,distances[-1]))
ax1v.set_xlim((0,distances[-1]))
# y range is defined with ax3 and ax4 (they share y axis with ax1)
# Plot band structure
ax1v.plot(distances,eigenvalues[:,:nbv],'k-',lw=2,zorder=0)
ax1c.plot(distances,eigenvalues[:,nbv:],'k-',lw=2,zorder=0)
# VB
for n in range(nbv):
ax1v.scatter(distances,eigenvalues[:,n],s=400*occ_v[t,:,n],color='blue',alpha=0.5)
# CB
for n in range(nbc):
ax1c.scatter(distances,eigenvalues[:,nbv+n],s=400*occ_c[t,:,n],color='red',alpha=0.5)
# text and labels
fig.text(0.05,0.6,'Energy (eV)',size=16,rotation='vertical')
fig.text(0.50,0.91, '%d fs'%times[t],size=16)
######## field plot
ax2 = plt.subplot(gs[-1,:])
# remove ticks and labels
ax2.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax2.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')
# text
ax2.set_ylabel('Field')
# frame size
ax2.set_xlim((0,times[-1]))
ax2.set_ylim((-1.3,1.3))
ax2.plot(field[:int(times[t])])
## Plot of the occupation as a function of energy (rotated to match the band structure)
ax3 = plt.subplot(gs[0:4,-2:],sharey=ax1c)
ax4 = plt.subplot(gs[4:8,-2:],sharey=ax1v)
# plot the data
try: # does not break if fit is not found
fit,cov = curve_fit(fermi_dirac,occ_e[i,:,0],occ_e[i,:,1])
except RuntimeError:
fit=np.array([0,0])
ax3.fill_betweenx(occ_e[i,:,0],0,occ_e[i,:,1],color='red')
ax3.plot(fermi_dirac(xeng,fit[0],fit[1]),xeng,'k-')
ax3.text(0.5,0.9,'Electrons\nT = %d K'%(fit[1]/KtoeV),transform=ax3.transAxes,ha='center',va='center')
try:
fit,cov = curve_fit(fermi_dirac,occ_h[i,:,0],occ_h[i,:,1])
except RuntimeError:
fit=np.array([0,0])
ax4.fill_betweenx(-occ_h[i,:,0],0,occ_h[i,:,1],color='blue')
ax4.plot(fermi_dirac(xeng,fit[0],fit[1]),-xeng,'k-')
ax4.text(0.5,0.1,'Holes\nT = %d K'%(fit[1]/KtoeV),transform=ax4.transAxes,ha='center',va='center')
# set x and y range
ax4.set_xlim(-0.1*max_occ,1.1*max_occ)
ax3.set_xlim(-0.1*max_occ,1.1*max_occ)
ax3.set_ylim(( ymin_c,ymax_c ))
ax4.set_ylim(( ymin_v,ymax_v ))
# hide some ticks/labels
ax3.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax3.tick_params(axis='y',labelleft='off',labelright='off')
ax4.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax4.tick_params(axis='y',labelleft='off',labelright='off')
plt.savefig( name ,transparent=False,dpi=300)
print(name)
#plt.show()
plt.close(fig)
|
alexandremorlet/yambopy
|
scripts/realtime/plot_occ2.py
|
Python
|
bsd-3-clause
| 7,553
|
[
"DIRAC"
] |
edf26ecc4492eab6cb8656914ac8484cdb5de57c254c4206b6aa3a97db1e9f0f
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Testmodule for MPI-IO.
"""
import espressomd
import espressomd.io
from espressomd.interactions import AngleHarmonic
import numpy
import unittest as ut
import random
import os
from argparse import Namespace
# Number of particles
npart = 10
# Number of different bond types
nbonds = 100
filename = "testdata.mpiio"
exts = ["head", "pref", "id", "type", "pos", "vel", "boff", "bond"]
filenames = [filename + "." + ext for ext in exts]
def clean_files():
for f in filenames:
if os.path.isfile(f):
os.remove(f)
def randint_different_from(a, b, n):
"""Returns a random integer in [a, b) that is not n."""
r = n
while r == n:
r = random.randint(a, b - 1)
return r
def random_particles():
"""Returns a list of random particle descriptions."""
parts = []
for i in range(npart):
p = Namespace()
p.id = i
p.type = random.randint(0, 100)
p.pos = numpy.random.rand(3)
p.v = numpy.random.rand(3)
p.bonds = []
# Up to 20 bonds; otherwise this test will take ages
for _ in range(random.randint(0, 20)):
btype = random.randint(0, nbonds - 1)
# Don't create loops, i.e. exclude "i" itself
p1 = randint_different_from(0, npart, i)
p2 = randint_different_from(0, npart, i)
p.bonds.append((btype, p1, p2))
parts.append(p)
return parts
class MPIIOTest(ut.TestCase):
"""
Test class for the MPI-IO core functionality.
Generates random particles, dumps them, reads them in,
again and then checks the input against the initially created random
particles.
"""
s = espressomd.system.System(box_l=[1, 1, 1])
# Just a bunch of random interactions such that add_bond does not throw
for i in range(nbonds):
s.bonded_inter[i] = AngleHarmonic(bend=i, phi0=i)
test_particles = random_particles()
def setUp(self):
"""Sets up a system from test_particles and prepares environment
for the tests."""
clean_files() # Prior call might not have completed successfully
for p in self.test_particles:
self.s.part.add(id=p.id, type=p.type, pos=p.pos, v=p.v)
for b in p.bonds:
self.s.part[p.id].add_bond(b)
def tearDown(self):
clean_files()
def check_files_exist(self):
"""Checks if all necessary files have been written."""
for fn in filenames:
self.assertTrue(os.path.isfile(fn))
def check_sample_system(self):
"""Checks the particles in the ESPResSo system "self.s" against the
true values in "self.test_particles"."""
for p, q in zip(self.s.part, self.test_particles):
self.assertEqual(p.id, q.id)
self.assertEqual(p.type, q.type)
numpy.testing.assert_array_equal(numpy.copy(p.pos), q.pos)
numpy.testing.assert_array_equal(numpy.copy(p.v), q.v)
self.assertEqual(len(p.bonds), len(q.bonds))
# Check all bonds
for bp, bq in zip(p.bonds, q.bonds):
# Bond type - "bend" stores the index of the bond
self.assertEqual(bp[0].params["bend"], bq[0])
# Bond partners
numpy.testing.assert_array_equal(bp[1:], bq[1:])
def test_mpiio(self):
espressomd.io.mpiio.mpiio.write(
filename, types=True, positions=True, velocities=True, bonds=True)
self.check_files_exist()
self.s.part.clear() # Clear to be on the safe side
espressomd.io.mpiio.mpiio.read(
filename, types=True, positions=True, velocities=True, bonds=True)
self.check_sample_system()
if __name__ == '__main__':
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/mpiio.py
|
Python
|
gpl-3.0
| 4,479
|
[
"ESPResSo"
] |
8d7a0c73123414891197720ebb4e58d8e55d4709f0bd5ad52433306d0741402d
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRnaseqmap(RPackage):
"""rnaSeq secondary analyses
The rnaSeqMap library provides classes and functions to analyze the RNA-
sequencing data using the coverage profiles in multiple samples at a
time"""
homepage = "https://bioconductor.org/packages/rnaSeqMap"
git = "https://git.bioconductor.org/packages/rnaSeqMap.git"
version('2.48.0', commit='a8c515e518cebf571d1524c3a8a986ba7d1557db')
version('2.42.0', commit='3a3a1030cc38d79d04536e0ab16114e4fa6721cf')
version('2.40.1', commit='c122d645b3503fb1a061f5515e4f8cf2863b3ba3')
version('2.38.0', commit='5eb9583bfacd375161739a8ae6057204487f8b9e')
version('2.36.0', commit='69c46fa467be0ac30776ede85a521f7622539b7e')
version('2.34.0', commit='7881bc00600ed824ac437edf3cfba35573261e46')
depends_on('r@2.11.0:', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-genomicalignments', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-edger', type=('build', 'run'))
depends_on('r-deseq', type=('build', 'run'))
depends_on('r-dbi', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-rnaseqmap/package.py
|
Python
|
lgpl-2.1
| 1,493
|
[
"Bioconductor"
] |
30804a63f179094568dc0faab60bf8698228424a772b615fe953c6008e7c6aa8
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import copy
import datetime
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from nova.compute.monitors import base as monitor_base
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import pci_device_pool
from nova import rpc
from nova import test
from nova.tests.unit.pci import fakes as pci_fakes
from nova.tests import uuidsentinel
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_COERCED = {'virt_stat': '10'}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
'parent_addr': '0000:00:01.0',
},
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
'parent_addr': '0000:00:01.0',
},
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_PF,
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
},
{
'label': 'label_8086_7891',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': None,
'parent_addr': '0000:08:01.0',
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1,
'dev_type': fields.PciDeviceType.SRIOV_VF
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1,
'dev_type': fields.PciDeviceType.SRIOV_PF
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891',
'numa_node': None,
'dev_type': fields.PciDeviceType.SRIOV_VF
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
@mock.patch('stevedore.enabled.EnabledExtensionManager')
def setUp(self, _mock_ext_mgr):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self._set_pci_passthrough_whitelist()
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._instance_types = {}
self.stubs.Set(objects.InstanceList, 'get_by_host_and_node',
self._fake_instance_get_by_host_and_node)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
self.compute = self._create_compute_node()
self.updated = False
self.deleted = False
self.update_call_count = 0
def _set_pci_passthrough_whitelist(self):
self.flags(pci_passthrough_whitelist=[
'{"vendor_id": "8086", "product_id": "0443"}',
'{"vendor_id": "8086", "product_id": "7891"}'])
def _create_compute_node(self, values=None):
# This creates a db representation of a compute_node.
compute = {
"id": 1,
"uuid": uuidsentinel.fake_compute_node,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": '{"num_instances": "1"}',
"hypervisor_hostname": "fakenode",
'hypervisor_version': 1,
'hypervisor_type': 'fake-hyp',
'disk_available_least': None,
'host_ip': None,
'metrics': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'cpu_allocation_ratio': None,
'ram_allocation_ratio': None,
'disk_allocation_ratio': None,
}
if values:
compute.update(values)
return compute
def _create_compute_node_obj(self, context):
# Use the db representation of a compute node returned
# by _create_compute_node() to create an equivalent compute
# node object.
compute = self._create_compute_node()
compute_obj = objects.ComputeNode()
compute_obj = objects.ComputeNode._from_db_object(
context, compute_obj, compute)
return compute_obj
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
"report_count": 0,
'disabled': False,
'disabled_reason': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'last_seen_up': None,
'forced_down': False,
'version': 0,
}
return service
def _fake_instance_obj(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
instance_uuid = str(uuid.uuid1())
instance = objects.Instance(context=self.context, uuid=instance_uuid,
flavor=flavor)
instance.update({
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': {},
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
})
if stash:
instance.old_flavor = flavor
instance.new_flavor = flavor
instance.numa_topology = kwargs.pop('numa_topology', None)
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
instance_type = objects.Flavor(**instance_type)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_by_host_and_node(self, context, host, nodename,
expected_attrs=None):
return objects.InstanceList(
objects=[i for i in self._instances.values() if i['host'] == host])
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node_obj(self.context)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
@mock.patch('nova.objects.Instance.save')
def test_disabled_instance_context_claim(self, mock_save):
# instance context manager variation:
instance = self._fake_instance_obj()
self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance_obj(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stub_out('nova.db.service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stub_out('nova.db.compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stub_out('nova.db.compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node(values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
# return no compute node
raise exception.ComputeHostNotFound(host=host)
def test_create_compute_node(self):
self.tracker.compute_node = None
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.tracker = self._tracker()
self._migrations = {}
self.stub_out('nova.db.service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stub_out('nova.db.compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stub_out('nova.db.compute_node_update',
self._fake_compute_node_update)
self.stub_out('nova.db.compute_node_delete',
self._fake_compute_node_delete)
self.stub_out('nova.db.migration_update',
self._fake_migration_update)
self.stub_out('nova.db.migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
self.compute = self._create_compute_node()
return self.compute
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = list(self._migrations.values())[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology,
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def assertEqualPciDevicePool(self, expected, observed):
self.assertEqual(expected.product_id, observed.product_id)
self.assertEqual(expected.vendor_id, observed.vendor_id)
self.assertEqual(expected.tags, observed.tags)
self.assertEqual(expected.count, observed.count)
def assertEqualPciDevicePoolList(self, expected, observed):
ex_objs = expected.objects
ob_objs = observed.objects
self.assertEqual(len(ex_objs), len(ob_objs))
for i in range(len(ex_objs)):
self.assertEqualPciDevicePool(ex_objs[i], ob_objs[i])
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = getattr(tracker.compute_node, field)
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, objects.NUMATopology.obj_from_db_obj(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_ram_mb)
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_disk_gb)
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected = pci_device_pool.from_pci_stats(driver.pci_stats)
self.assertEqual(len(expected),
len(self.tracker.compute_node.pci_device_pools))
for expected_pool, actual_pool in zip(
expected, self.tracker.compute_node.pci_device_pools):
self.assertEqual(expected_pool, actual_pool)
def test_set_instance_host_and_node(self):
inst = objects.Instance()
with mock.patch.object(inst, 'save') as mock_save:
self.tracker._set_instance_host_and_node(inst)
mock_save.assert_called_once_with()
self.assertEqual(self.tracker.host, inst.host)
self.assertEqual(self.tracker.nodename, inst.node)
self.assertEqual(self.tracker.host, inst.launched_on)
def test_unset_instance_host_and_node(self):
inst = objects.Instance()
with mock.patch.object(inst, 'save') as mock_save:
self.tracker._set_instance_host_and_node(inst)
self.tracker._unset_instance_host_and_node(inst)
self.assertEqual(2, mock_save.call_count)
self.assertIsNone(inst.host)
self.assertIsNone(inst.node)
self.assertEqual(self.tracker.host, inst.launched_on)
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def test_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
urs_mock = self.tracker.scheduler_client.update_resource_stats
self.tracker._update(self.context)
urs_mock.reset_mock()
# change a compute node value to simulate a change
self.tracker.compute_node.local_gb_used += 1
self.tracker._update(self.context)
urs_mock.assert_called_once_with(self.tracker.compute_node)
def test_no_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
self.tracker._update(self.context)
update = self.tracker.scheduler_client.update_resource_stats
update.reset_mock()
self.tracker._update(self.context)
self.assertFalse(update.called, "update_resource_stats should not be "
"called when there is no change")
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected_pools = pci_device_pool.from_pci_stats(driver.pci_stats)
observed_pools = self.tracker.compute_node.pci_device_pools
self.assertEqualPciDevicePoolList(expected_pools, observed_pools)
def _driver(self):
return FakeVirtDriver(pci_support=True)
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=mem),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([]))])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD}
instance = self._fake_instance_obj(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_additive_claims(self, mock_save, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node.memory_mb_used)
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node.local_gb_used)
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node.vcpus_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_context_claim_with_exception(self, mock_save, mock_get):
instance = self._fake_instance_obj(memory_mb=1, root_gb=1,
ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance_obj(task_state=task_states.SCHEDULING)
with mock.patch.object(instance, 'save'):
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node.current_workload)
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.current_workload)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_cpu_stats(self, mock_save, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
vcpus = 1
instance = self._fake_instance_obj(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance_obj(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance_obj(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_deleted_instances_with_migrations(self, mock_migration_list):
migration = objects.Migration(context=self.context,
migration_type='resize',
instance_uuid='invalid')
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_instances_with_live_migrations(self, mock_instance_list,
mock_migration_list):
instance = self._fake_instance_obj()
migration = objects.Migration(context=self.context,
migration_type='live-migration',
instance_uuid=instance.uuid)
mock_migration_list.return_value = [migration]
mock_instance_list.return_value = [instance]
with mock.patch.object(self.tracker, '_pair_instances_to_migrations'
) as mock_pair:
self.tracker.update_available_resource(self.context)
self.assertTrue(mock_pair.called)
self.assertEqual(
instance.uuid,
mock_pair.call_args_list[0][0][0][0].instance_uuid)
self.assertEqual(instance.uuid,
mock_pair.call_args_list[0][0][1][0].uuid)
self.assertEqual(
['system_metadata', 'numa_topology', 'flavor',
'migration_context'],
mock_instance_list.call_args_list[0][1]['expected_attrs'])
self.assertEqual(FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqual(ROOT_GB + EPHEMERAL_GB,
self.tracker.compute_node.local_gb_used)
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
def test_pair_instances_to_migrations(self):
migrations = [objects.Migration(instance_uuid=uuidsentinel.instance1),
objects.Migration(instance_uuid=uuidsentinel.instance2)]
instances = [objects.Instance(uuid=uuidsentinel.instance2),
objects.Instance(uuid=uuidsentinel.instance1)]
self.tracker._pair_instances_to_migrations(migrations, instances)
order = [uuidsentinel.instance1, uuidsentinel.instance2]
for i, migration in enumerate(migrations):
self.assertEqual(order[i], migration.instance.uuid)
@mock.patch('nova.compute.claims.Claim')
@mock.patch('nova.objects.Instance.save')
def test_claim_saves_numa_topology(self, mock_save, mock_claim):
def fake_save():
self.assertEqual(set(['numa_topology', 'host', 'node',
'launched_on']),
inst.obj_what_changed())
mock_save.side_effect = fake_save
inst = objects.Instance(host=None, node=None, memory_mb=1024)
inst.obj_reset_changes()
numa = objects.InstanceNUMATopology()
claim = mock.MagicMock()
claim.claimed_numa_topology = numa
mock_claim.return_value = claim
with mock.patch.object(self.tracker, '_update_usage_from_instance'):
self.tracker.instance_claim(self.context, inst)
mock_save.assert_called_once_with()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_sets_instance_host_and_node(self, mock_get):
instance = self._fake_instance_obj()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class _MoveClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(_MoveClaimTestCase, self).setUp()
self.instance = self._fake_instance_obj()
self.instance_type = self._fake_flavor_create()
self.claim_method = self.tracker._move_claim
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get, mock_save):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.claim_method(
self.context, self.instance, self.instance_type, limits=limits)
mock_save.assert_called_once_with()
mock_save.reset_mock()
instance2 = self._fake_instance_obj()
self.claim_method(
self.context, instance2, self.instance_type, limits=limits)
mock_save.assert_called_once_with()
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_move_type_not_tracked(self, mock_get, mock_save):
self.claim_method(self.context, self.instance, self.instance_type,
limits=self.limits, move_type="live-migration")
mock_save.assert_called_once_with()
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.Instance.save')
@mock.patch.object(objects.Migration, 'save')
def test_existing_migration(self, save_mock, save_inst_mock):
migration = objects.Migration(self.context, id=42,
instance_uuid=self.instance.uuid,
source_compute='fake-other-compute',
source_node='fake-other-node',
status='accepted',
migration_type='evacuation')
self.claim_method(self.context, self.instance, self.instance_type,
migration=migration)
self.assertEqual(self.tracker.host, migration.dest_compute)
self.assertEqual(self.tracker.nodename, migration.dest_node)
self.assertEqual("pre-migrating", migration.status)
self.assertEqual(1, len(self.tracker.tracked_migrations))
save_mock.assert_called_once_with()
save_inst_mock.assert_called_once_with()
class ResizeClaimTestCase(_MoveClaimTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
self.claim_method = self.tracker.resize_claim
def test_move_type_not_tracked(self):
self.skipTest("Resize_claim does already sets the move_type.")
def test_existing_migration(self):
self.skipTest("Resize_claim does not support having existing "
"migration record.")
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance_obj()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
@mock.patch.object(resource_tracker.LOG, 'warning')
def test_get_host_metrics_exception(self, mock_LOG_warning):
monitor = mock.MagicMock()
monitor.add_metrics_to_list.side_effect = Exception
self.tracker.monitors = [monitor]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warning.assert_called_once_with(
u'Cannot get the metrics from %(mon)s; error: %(exc)s', mock.ANY)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
class FakeCPUMonitor(monitor_base.MonitorBase):
NOW_TS = timeutils.utcnow()
def __init__(self, *args):
super(FakeCPUMonitor, self).__init__(*args)
self.source = 'FakeCPUMonitor'
def get_metric_names(self):
return set(["cpu.frequency"])
def get_metrics(self):
return [("cpu.frequency", 100, self.NOW_TS)]
self.tracker.monitors = [FakeCPUMonitor(None)]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [
{
'timestamp': FakeCPUMonitor.NOW_TS.isoformat(),
'name': 'cpu.frequency',
'value': 100,
'source': 'FakeCPUMonitor'
},
]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = self._create_compute_node()
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.items():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class UpdateUsageFromMigrationsTestCase(BaseTrackerTestCase):
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
def test_no_migrations(self, mock_update_usage):
migrations = []
self.tracker._update_usage_from_migrations(self.context, migrations)
self.assertFalse(mock_update_usage.called)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_instance_not_found(self, mock_get_instance, mock_update_usage):
mock_get_instance.side_effect = exception.InstanceNotFound(
instance_id='some_id',
)
migration = objects.Migration(
context=self.context,
instance_uuid='some_uuid',
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, 'some_uuid')
self.assertFalse(mock_update_usage.called)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_update_usage_called(self, mock_get_instance, mock_update_usage):
instance = self._fake_instance_obj()
mock_get_instance.return_value = instance
migration = objects.Migration(
context=self.context,
instance_uuid=instance.uuid,
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
mock_update_usage.assert_called_once_with(
self.context, instance, None, migration)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_flavor_not_found(self, mock_get_instance, mock_update_usage):
mock_update_usage.side_effect = exception.FlavorNotFound(flavor_id='')
instance = self._fake_instance_obj()
mock_get_instance.return_value = instance
migration = objects.Migration(
context=self.context,
instance_uuid=instance.uuid,
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
mock_update_usage.assert_called_once_with(
self.context, instance, None, migration)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_not_resizing_state(self, mock_get_instance, mock_update_usage):
instance = self._fake_instance_obj()
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.SUSPENDING
mock_get_instance.return_value = instance
migration = objects.Migration(
context=self.context,
instance_uuid=instance.uuid,
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
self.assertFalse(mock_update_usage.called)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_use_most_recent(self, mock_get_instance, mock_update_usage):
instance = self._fake_instance_obj()
mock_get_instance.return_value = instance
migration_2002 = objects.Migration(
id=2002,
context=self.context,
instance_uuid=instance.uuid,
updated_at=datetime.datetime(2002, 1, 1, 0, 0, 0),
)
migration_2003 = objects.Migration(
id=2003,
context=self.context,
instance_uuid=instance.uuid,
updated_at=datetime.datetime(2003, 1, 1, 0, 0, 0),
)
migration_2001 = objects.Migration(
id=2001,
context=self.context,
instance_uuid=instance.uuid,
updated_at=datetime.datetime(2001, 1, 1, 0, 0, 0),
)
self.tracker._update_usage_from_migrations(
self.context, [migration_2002, migration_2003, migration_2001])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
mock_update_usage.assert_called_once_with(
self.context, instance, None, migration_2003)
|
zhimin711/nova
|
nova/tests/unit/compute/test_resource_tracker.py
|
Python
|
apache-2.0
| 56,861
|
[
"exciting"
] |
0e23503459e6743a27ea11a4ad3e2e57b66d7119e25a4b840e1efb477d0cdd89
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Any
from typing import Dict
from ORCA.vars.Links import TriggerLinkActions
from ORCA.vars.Replace import ReplaceVars
from ORCA.utils.TypeConvert import ToUnicode
import ORCA.vars.Globals
import ORCA.Globals as Globals
__all__ = ['DelVar',
'GetVar',
'SetDefVar',
'SetVar',
'ExistLVar',
'ExistVar',
'SetVarWithOutVarTranslation'
]
def _SetVarSub(uVarName:str, oVarValue:Any) -> None:
"""
Sub Routine to set a value to a variable. internal use only. Triggers will be executed on a change
:param string uVarName: Variable name to use
:param Any oVarValue: Value to set, usually a unicode string, can be any other object
"""
if "$var(" in uVarName:
uVarName = ReplaceVars(uVarName)
NewValue = oVarValue
OldValue = ORCA.vars.Globals.dUserVars.get(uVarName)
ORCA.vars.Globals.dUserVars[uVarName] = NewValue
if OldValue != NewValue:
TriggerLinkActions(uVarName=uVarName)
def SetDefVar(uVarName:str, uVarValue:str, dArray:Dict[str,str]) -> None:
"""
Sets a definition variable
:param str uVarName: The name of the definition variable
:param str uVarValue: The value for the definition variable
:param dict dArray: The array which holds all definition vars for this context
"""
dArray[uVarName] = uVarValue
def SetVar(uVarName:str, oVarValue:Any, uContext:str=u'') -> None:
"""
Sets a specific variable with a given value.
:param str uVarName: Variable name to use. This can be a variable as well
:param object oVarValue: Value to set, usually a unicode string, can be any other object.
If you pass a dict, then for each dict member a varname with its value will get assigned (the dict member names will be separed by an underscore)
If you pass a list or tuple, then for each list member a varname with its value will get assigned as a array [x] where x starts with 0)
:param str uContext: The context for the variable. Internally the context will be added as a prefix to the variable name
"""
SetVarWithOutVarTranslation(ReplaceVars(uVarName), oVarValue, uContext)
def SetVarWithOutVarTranslation(uVarName:str, oVarValue:Any, uContext:str=u'') -> None:
"""
Sets a specific variable with a given value.
:param str uVarName: Variable name to use. This can't be a variable
:param oVarValue: Value to set, usually a unicode string, can be any other object.
If you pass a dict, then for each dict member a varname with its value will get assigned (the dict member names will be separed by an underscore)
If you pass a list or tuple, then for each list member a varname with its value will get assigned as a array [x] where x starts with 0)
:param str uContext: The context for the variable. Internally the context will be added as a prefix to the variable name
"""
uRealVarName:str = uContext + uVarName
if isinstance(oVarValue, str):
_SetVarSub(uVarName = uRealVarName, oVarValue = oVarValue)
return
if type(oVarValue) == dict:
for uKey in oVarValue:
_SetVarSub(uVarName = uRealVarName + u'_' + uKey, oVarValue = oVarValue[uKey])
return
if type(oVarValue) == list or type(oVarValue) == tuple:
i = 0
for uValue in oVarValue:
_SetVarSub(uVarName = uRealVarName + u'_[' + ToUnicode(i)+u']', oVarValue = oVarValue[uValue])
i += 1
return
_SetVarSub(uVarName = uRealVarName, oVarValue = oVarValue)
def DelVar(uVarName:str, uContext:str=u'') -> None:
"""
Deletes a variable from the internal storage. This will not trigger a var link action by purpose. No error of warning is raised if the var does not exist
:param str uVarName: The variable, which should be deleted
:param str uContext: The context for the variable. Internally the context will be added as a prefix to the variable name
"""
if uContext + uVarName in ORCA.vars.Globals.dUserVars:
del ORCA.vars.Globals.dUserVars[uContext + uVarName]
def GetVar(uVarName:str, uContext:str=u'') -> str:
"""
Returns the value of a variable. Returns an empty string, if the variable does not exist
:param str uVarName: The variable name from where the value should get returned. Can a variable name itself
:param str uContext: The context for the variable. Internally the context will be added as a prefix to the variable name
:return: The variable value assigned to the variable. Usually a string, but can be an object as well
"""
if uVarName==u"":
return u''
return ORCA.vars.Globals.dUserVars.get(uContext + ReplaceVars(uVarName),u'')
def ExistLVar(uVarName:str) -> bool:
"""
Checks, if a language Var exists
:param str uVarName: The language variable name to check
:return: True/False, depends of the language var exists
"""
return Globals.oLanguage.dIDToString.get(uVarName) is not None
def ExistVar(uVarName:str, uContext:str=u'') -> bool:
"""
Checks, if a Var exists
:param str uVarName: The variable name to check
:param str uContext: The context for the variable. Internally the context will be added as a prefix to the variable name
:return: True/False, depends of the var exists
"""
return ORCA.vars.Globals.dUserVars.get(uContext + ReplaceVars(uVarName)) is not None
|
thica/ORCA-Remote
|
src/ORCA/vars/Access.py
|
Python
|
gpl-3.0
| 6,419
|
[
"ORCA"
] |
f6ebcca25569c8deb21f28b8650c7a5d8d22d3c91cb7d9f69571c7a6ffabb963
|
import tkSimpleDialog
import tkMessageBox
from pymol.wizard import Wizard
from pymol import cmd, preset
import os,sys
cwd = os.getcwd() #ensure pymol can find libraries
sys.path.append(cwd)
import selector_prot
import p3d.protein
import p3d.geo
cmd.set_wizard(selector.selector('{name}','{chain}','{resid}','{resid2}'))
cmd.load('{prot}_all.pdb')
#initial view
preset.publication('all')
cmd.show("lines",'resid {resid}')
cmd.hide("sticks")
if {resid} < {resid2}:
cmd.zoom('resid {resid}:{resid2}')
else:
cmd.zoom('resid {resid2}:{resid}')
print {resid},{resid2}
#tkMessageBox.showerror('Testing','Testing')
#new = tkSimpleDialog.askstring('Testing:','TT')
#model = p3d.protein.Protein('2jg4a.pdb')
#print model.output()
#print new
|
tmorrell/SamStruct
|
inputs/pymol_view_prot.py
|
Python
|
gpl-2.0
| 749
|
[
"PyMOL"
] |
a1ad37ad94d59216bfa33ecf871962b4fb4dc6d97961369b4ab71e282d2808d5
|
# -*- coding: utf-8 -*-
#
# ase-espresso documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 25 10:09:02 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
import sphinx_bootstrap_theme
if sys.version_info.major == 3:
from unittest.mock import MagicMock
else:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['argparse', 'hostlist', 'seaborn',
'matplotlib', 'matplotlib.pyplot', 'matplotlib.colors',
'matplotlib.cm', 'scipy', 'scipy.optimize',
'scipy.interpolate', 'pandas', 'path', 'pexpect']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
module_dir = os.path.normpath(os.path.join(__location__, "../../"))
sys.path.insert(0, os.path.abspath(module_dir))
autosummary_generate = True
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ase-espresso'
copyright = u'2017, Lukasz Mentel'
author = u'Lukasz Mentel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.4'
# The full version, including alpha/beta/rc tags.
release = '0.3.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'bootswatch_theme': 'yeti'}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
try:
from espresso import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ase-espressodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ase-espresso.tex', u'ase-espresso Documentation',
u'Lukasz Mentel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ase-espresso', u'ase-espresso Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ase-espresso', u'ase-espresso Documentation',
author, 'ase-espresso', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
lmmentel/ase-espresso
|
docs/source/conf.py
|
Python
|
gpl-3.0
| 10,437
|
[
"ASE",
"ESPResSo"
] |
8319fe091dbe45e0193de264396c1b2a0ac95544ad8e023ef1a606e436dd5141
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for categorical diffusion and training loop."""
import functools
from absl import logging
import flax
import jax
import jax.numpy as jnp
import ml_collections
import numpy as onp
import PIL
import tensorflow.compat.v2 as tf
# for left-multiplication for RGB -> Y'PbPr
RGB_TO_YUV = onp.array([[0.29900, -0.16874, 0.50000],
[0.58700, -0.33126, -0.41869],
[0.11400, 0.50000, -0.08131]])
def normalize_data(x, mode=None):
if mode is None or mode == 'rgb':
return x / 127.5 - 1.
elif mode == 'rgb_unit_var':
return 2. * normalize_data(x, mode='rgb')
elif mode == 'yuv':
return (x / 127.5 - 1.).dot(RGB_TO_YUV)
else:
raise NotImplementedError(mode)
def log_min_exp(a, b, epsilon=1.e-6):
"""Computes the log(exp(a) - exp(b)) (b<a) in a numerically stable fashion."""
y = a + jnp.log1p(-jnp.exp(b - a) + epsilon)
return y
def sample_categorical(logits, uniform_noise):
"""Samples from a categorical distribution.
Args:
logits: logits that determine categorical distributions. Shape should be
broadcastable under addition with noise shape, and of the form (...,
num_classes).
uniform_noise: uniform noise in range [0, 1). Shape: (..., num_classes).
Returns:
samples: samples.shape == noise.shape, with samples.shape[-1] equal to
num_classes.
"""
# For numerical precision clip the noise to a minimum value
uniform_noise = jnp.clip(
uniform_noise, a_min=jnp.finfo(uniform_noise.dtype).tiny, a_max=1.)
gumbel_noise = -jnp.log(-jnp.log(uniform_noise))
sample = jnp.argmax(logits + gumbel_noise, axis=-1)
return jax.nn.one_hot(sample, num_classes=logits.shape[-1])
def categorical_kl_logits(logits1, logits2, eps=1.e-6):
"""KL divergence between categorical distributions.
Distributions parameterized by logits.
Args:
logits1: logits of the first distribution. Last dim is class dim.
logits2: logits of the second distribution. Last dim is class dim.
eps: float small number to avoid numerical issues.
Returns:
KL(C(logits1) || C(logits2)): shape: logits1.shape[:-1]
"""
out = (
jax.nn.softmax(logits1 + eps, axis=-1) *
(jax.nn.log_softmax(logits1 + eps, axis=-1) -
jax.nn.log_softmax(logits2 + eps, axis=-1)))
return jnp.sum(out, axis=-1)
def categorical_kl_probs(probs1, probs2, eps=1.e-6):
"""KL divergence between categorical distributions.
Distributions parameterized by logits.
Args:
probs1: probs of the first distribution. Last dim is class dim.
probs2: probs of the second distribution. Last dim is class dim.
eps: float small number to avoid numerical issues.
Returns:
KL(C(probs) || C(logits2)): shape: logits1.shape[:-1]
"""
out = probs1 * (jnp.log(probs1 + eps) - jnp.log(probs2 + eps))
return jnp.sum(out, axis=-1)
def categorical_log_likelihood(x, logits):
"""Log likelihood of a discretized Gaussian specialized for image data.
Assumes data `x` consists of integers [0, num_classes-1].
Args:
x: where to evaluate the distribution. shape = (bs, ...), dtype=int32/int64
logits: logits, shape = (bs, ..., num_classes)
Returns:
log likelihoods
"""
log_probs = jax.nn.log_softmax(logits)
x_onehot = jax.nn.one_hot(x, logits.shape[-1])
return jnp.sum(log_probs * x_onehot, axis=-1)
def meanflat(x):
"""Take the mean over all axes except the first batch dimension."""
return x.mean(axis=tuple(range(1, len(x.shape))))
def global_norm(pytree):
return jnp.sqrt(
jnp.sum(
jnp.asarray([jnp.sum(jnp.square(x)) for x in jax.tree_leaves(pytree)
])))
@functools.partial(jax.jit, static_argnums=(2,))
def _foldin_and_split(rng, foldin_data, num):
return jax.random.split(jax.random.fold_in(rng, foldin_data), num)
def jax_randint(key, minval=0, maxval=2**20):
return int(jax.random.randint(key, shape=(), minval=minval, maxval=maxval))
class RngGen(object):
"""Random number generator state utility for Jax."""
def __init__(self, init_rng):
self._base_rng = init_rng
self._counter = 0
def __iter__(self):
return self
def __next__(self):
return self.advance(1)
def advance(self, count):
self._counter += count
return jax.random.fold_in(self._base_rng, self._counter)
def split(self, num):
self._counter += 1
return _foldin_and_split(self._base_rng, self._counter, num)
def clip_by_global_norm(pytree, clip_norm, use_norm=None):
if use_norm is None:
use_norm = global_norm(pytree)
# assert use_norm.shape == ()
assert not use_norm.shape
scale = clip_norm * jnp.minimum(1.0 / use_norm, 1.0 / clip_norm)
return jax.tree_map(lambda x: x * scale, pytree), use_norm
def apply_ema(decay, avg, new):
return jax.tree_multimap(lambda a, b: decay * a + (1. - decay) * b, avg, new)
def count_params(pytree):
return sum([x.size for x in jax.tree_leaves(pytree)])
def copy_pytree(pytree):
return jax.tree_map(jnp.array, pytree)
def dist(fn, accumulate, axis_name='batch'):
"""Wrap a function in pmap and device_get(unreplicate(.)) its return value."""
if accumulate == 'concat':
accumulate_fn = functools.partial(
allgather_and_reshape, axis_name=axis_name)
elif accumulate == 'mean':
accumulate_fn = functools.partial(jax.lax.pmean, axis_name=axis_name)
elif accumulate == 'none':
accumulate_fn = None
else:
raise NotImplementedError(accumulate)
@functools.partial(jax.pmap, axis_name=axis_name)
def pmapped_fn(*args, **kwargs):
out = fn(*args, **kwargs)
return out if accumulate_fn is None else jax.tree_map(accumulate_fn, out)
def wrapper(*args, **kwargs):
return jax.device_get(
flax.jax_utils.unreplicate(pmapped_fn(*args, **kwargs)))
return wrapper
def allgather_and_reshape(x, axis_name='batch'):
"""Allgather and merge the newly inserted axis w/ the original batch axis."""
y = jax.lax.all_gather(x, axis_name=axis_name)
assert y.shape[1:] == x.shape
return y.reshape(y.shape[0] * x.shape[0], *x.shape[1:])
def write_config_json(config, path):
if tf.io.gfile.exists(path):
return
with tf.io.gfile.GFile(path, 'w') as f:
f.write(config.to_json_best_effort(sort_keys=True, indent=4) + '\n')
def tf_to_numpy(tf_batch):
"""TF to NumPy, using ._numpy() to avoid copy."""
# pylint: disable=protected-access
return jax.tree_map(
lambda x: x._numpy() if hasattr(x, '_numpy') else x,
tf_batch)
def numpy_iter(tf_dataset):
return map(tf_to_numpy, iter(tf_dataset))
@functools.partial(jax.pmap, axis_name='batch')
def _check_synced(pytree):
mins = jax.lax.pmin(pytree, axis_name='batch')
equals = jax.tree_multimap(jnp.array_equal, pytree, mins)
return jnp.all(jnp.asarray(jax.tree_leaves(equals)))
def assert_synced(pytree):
"""Check that `pytree` is the same across all replicas.
Args:
pytree: the pytree to check (should be replicated)
Raises:
RuntimeError: if sync check failed
"""
# assert_synced.problem = pytree
# raise NotImplementedError()
equals = _check_synced(pytree)
assert equals.shape == (jax.local_device_count(),)
equals = all(jax.device_get(equals)) # no unreplicate
logging.info('Sync check result: %d', equals)
if not equals:
raise RuntimeError('Sync check failed!')
@functools.partial(jax.pmap, axis_name='i')
def _barrier(x):
return jax.lax.psum(x, axis_name='i')
def barrier():
"""MPI-like barrier."""
jax.device_get(_barrier(jnp.ones((jax.local_device_count(),))))
def np_tile_imgs(imgs, *, pad_pixels=1, pad_val=255, num_col=0):
"""NumPy utility: tile a batch of images into a single image.
Args:
imgs: np.ndarray: a uint8 array of images of shape [n, h, w, c]
pad_pixels: int: number of pixels of padding to add around each image
pad_val: int: padding value
num_col: int: number of columns in the tiling; defaults to a square
Returns:
np.ndarray: one tiled image: a uint8 array of shape [H, W, c]
"""
if pad_pixels < 0:
raise ValueError('Expected pad_pixels >= 0')
if not 0 <= pad_val <= 255:
raise ValueError('Expected pad_val in [0, 255]')
imgs = onp.asarray(imgs)
if imgs.dtype != onp.uint8:
raise ValueError('Expected uint8 input')
# if imgs.ndim == 3:
# imgs = imgs[..., None]
n, h, w, c = imgs.shape
if c not in [1, 3]:
raise ValueError('Expected 1 or 3 channels')
if num_col <= 0:
# Make a square
ceil_sqrt_n = int(onp.ceil(onp.sqrt(float(n))))
num_row = ceil_sqrt_n
num_col = ceil_sqrt_n
else:
# Make a B/num_per_row x num_per_row grid
assert n % num_col == 0
num_row = int(onp.ceil(n / num_col))
imgs = onp.pad(
imgs,
pad_width=((0, num_row * num_col - n), (pad_pixels, pad_pixels),
(pad_pixels, pad_pixels), (0, 0)),
mode='constant',
constant_values=pad_val)
h, w = h + 2 * pad_pixels, w + 2 * pad_pixels
imgs = imgs.reshape(num_row, num_col, h, w, c)
imgs = imgs.transpose(0, 2, 1, 3, 4)
imgs = imgs.reshape(num_row * h, num_col * w, c)
if pad_pixels > 0:
imgs = imgs[pad_pixels:-pad_pixels, pad_pixels:-pad_pixels, :]
if c == 1:
imgs = imgs[Ellipsis, 0]
return imgs
def save_tiled_imgs(filename, imgs, pad_pixels=1, pad_val=255, num_col=0):
PIL.Image.fromarray(
np_tile_imgs(
imgs, pad_pixels=pad_pixels, pad_val=pad_val,
num_col=num_col)).save(filename)
|
google-research/google-research
|
d3pm/images/utils.py
|
Python
|
apache-2.0
| 10,056
|
[
"Gaussian"
] |
85bb0023b21d980ef2a5a545910941e7dd531c16f2cec63899678162a44cced0
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
'''
Created on Mar 5, 2012
'''
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 5, 2012"
import unittest
import os
from pymatgen.alchemy.transmuters import CifTransmuter, PoscarTransmuter
from pymatgen.alchemy.filters import ContainsSpecieFilter
from pymatgen.transformations.standard_transformations import \
SubstitutionTransformation, RemoveSpeciesTransformation, \
OrderDisorderedStructureTransformation
from pymatgen.transformations.advanced_transformations import \
SuperTransformation
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class CifTransmuterTest(unittest.TestCase):
def test_init(self):
trans = []
trans.append(SubstitutionTransformation({"Fe": "Mn", "Fe2+": "Mn2+"}))
tsc = CifTransmuter.from_filenames([os.path.join(test_dir,
"MultiStructure.cif")],
trans)
self.assertEqual(len(tsc), 2)
expected_ans = set(["Mn", "O", "Li", "P"])
for s in tsc:
els = set([el.symbol
for el in s.final_structure.composition.elements])
self.assertEqual(expected_ans, els)
class PoscarTransmuterTest(unittest.TestCase):
def test_init(self):
trans = []
trans.append(SubstitutionTransformation({"Fe": "Mn"}))
tsc = PoscarTransmuter.from_filenames([os.path.join(test_dir,
"POSCAR"),
os.path.join(test_dir,
"POSCAR")],
trans)
self.assertEqual(len(tsc), 2)
expected_ans = set(["Mn", "O", "P"])
for s in tsc:
els = set([el.symbol
for el in s.final_structure.composition.elements])
self.assertEqual(expected_ans, els)
def test_transmuter(self):
tsc = PoscarTransmuter.from_filenames(
[os.path.join(test_dir, "POSCAR")])
tsc.append_transformation(RemoveSpeciesTransformation('O'))
self.assertEqual(len(tsc[0].final_structure), 8)
tsc.append_transformation(SubstitutionTransformation({"Fe":
{"Fe2+": 0.25,
"Mn3+": .75},
"P": "P5+"}))
tsc.append_transformation(OrderDisorderedStructureTransformation(),
extend_collection=50)
self.assertEqual(len(tsc), 4)
t = SuperTransformation([SubstitutionTransformation({"Fe2+": "Mg2+"}),
SubstitutionTransformation({"Fe2+": "Zn2+"}),
SubstitutionTransformation({"Fe2+": "Be2+"})])
tsc.append_transformation(t, extend_collection=True)
self.assertEqual(len(tsc), 12)
for x in tsc:
self.assertEqual(len(x), 5, 'something might be wrong with the number of transformations in the history') #should be 4 trans + starting structure
#test the filter
tsc.apply_filter(ContainsSpecieFilter(['Zn2+', 'Be2+', 'Mn4+'],
strict_compare=True, AND=False))
self.assertEqual(len(tsc), 8)
self.assertEqual(tsc.get_transformed_structures()[0].as_dict()[
'history'][-1]['@class'], 'ContainsSpecieFilter')
tsc.apply_filter(ContainsSpecieFilter(['Be2+']))
self.assertEqual(len(tsc), 4)
#Test set_parameter and add_tag.
tsc.set_parameter("para1", "hello")
self.assertEqual(tsc.transformed_structures[0]
.as_dict()['other_parameters']['para1'], 'hello')
tsc.add_tags(["world", "universe"])
self.assertEqual(tsc.transformed_structures[0]
.as_dict()['other_parameters']['tags'],
["world", "universe"])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
sonium0/pymatgen
|
pymatgen/alchemy/tests/test_transmuters.py
|
Python
|
mit
| 4,528
|
[
"pymatgen"
] |
3bfa497c09a950bedfb00a3d32a7386fac3e37d7ea87fedf02457a4d9a17d492
|
# -*- coding: utf-8 -*-
""" Chemical Signalling model loaded into moose can be save into Genesis-Kkit format """
__author__ = "Harsha Rani"
__copyright__ = "Copyright 2017, Harsha Rani and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Harsha Rani"
__email__ = "hrani@ncbs.res.in"
__status__ = "Development"
__updated__ = "Jan 08 2020"
#Jan 8: added a line to add compartment info
# permeability from moose is in uM which to be converted to mM for genesis
#2020
# 2018
# Dec 08: using restoreXreacs from fixXreacs
# Nov 22: searched for _xfer_ instead of xfer
# Nov 21: xfer pool are not written and cleaned up if part of Reaction/Enzyme or notes,
# group are checked under all the mesh
# Oct 16: Channels are written back to genesis
# only zeroth element is taken for written back to genesis, this is true for CubeMesh and CylMesh
# 2017
# Aug 8 : All the moose object which doesn't have compartment are not written. Error message are appended
import sys
import random
import re
import moose
from moose.chemUtil.chemConnectUtil import *
from moose.chemUtil.graphUtils import *
from moose.fixXreacs import restoreXreacs
foundmatplotlib_ = False
try:
import matplotlib
foundmatplotlib_ = True
except Exception as e:
pass
GENESIS_COLOR_SEQUENCE = ((248, 0, 255), (240, 0, 255), (232, 0, 255), (224, 0, 255), (216, 0, 255), (208, 0, 255),
(200, 0, 255), (192, 0, 255), (184, 0, 255), (176, 0, 255), (168, 0, 255), (160, 0, 255), (152, 0, 255), (144, 0, 255),
(136, 0, 255), (128, 0, 255), (120, 0, 255), (112, 0, 255), (104, 0, 255), (96, 0, 255), (88, 0, 255), (80, 0, 255),
(72, 0, 255), (64, 0, 255), (56, 0, 255), (48, 0, 255), (40, 0, 255), (32, 0, 255), (24, 0, 255), (16, 0, 255),
(8, 0, 255), (0, 0, 255), (0, 8, 248), (0, 16, 240), (0, 24, 232), (0, 32, 224), (0, 40, 216), (0, 48, 208),
(0, 56, 200), (0, 64, 192), (0, 72, 184), (0, 80, 176), (0, 88, 168), (0, 96, 160), (0, 104, 152), (0, 112, 144),
(0, 120, 136), (0, 128, 128), (0, 136, 120), (0, 144, 112), (0, 152, 104), (0, 160, 96), (0, 168, 88), (0, 176, 80),
(0, 184, 72), (0, 192, 64), (0, 200, 56), (0, 208, 48), (0, 216, 40), (0, 224, 32), (0, 232, 24), (0, 240, 16), (0, 248, 8),
(0, 255, 0), (8, 255, 0), (16, 255, 0), (24, 255, 0), (32, 255, 0), (40, 255, 0), (48, 255, 0), (56, 255, 0), (64, 255, 0),
(72, 255, 0), (80, 255, 0), (88, 255, 0), (96, 255, 0), (104, 255, 0), (112, 255, 0), (120, 255, 0), (128, 255, 0),
(136, 255, 0), (144, 255, 0), (152, 255, 0), (160, 255, 0), (168, 255, 0), (176, 255, 0), (184, 255, 0), (192, 255, 0),
(200, 255, 0), (208, 255, 0), (216, 255, 0), (224, 255, 0), (232, 255, 0), (240, 255, 0), (248, 255, 0), (255, 255, 0),
(255, 248, 0), (255, 240, 0), (255, 232, 0), (255, 224, 0), (255, 216, 0), (255, 208, 0), (255, 200, 0), (255, 192, 0),
(255, 184, 0), (255, 176, 0), (255, 168, 0), (255, 160, 0), (255, 152, 0), (255, 144, 0), (255, 136, 0), (255, 128, 0),
(255, 120, 0), (255, 112, 0), (255, 104, 0), (255, 96, 0), (255, 88, 0), (255, 80, 0), (255, 72, 0), (255, 64, 0),
(255, 56, 0), (255, 48, 0), (255, 40, 0), (255, 32, 0), (255, 24, 0), (255, 16, 0), (255, 8, 0), (255, 0, 0))
#Todo : To be written
# --StimulusTable
def mooseWriteKkit( modelpath, filename, sceneitems={}):
global foundmatplotlib_
if not foundmatplotlib_:
print('No maplotlib found.'
'\nThis module can be installed by following command in terminal:'
'\n\t sudo apt install python-maplotlib', "")
return False
else:
error = ""
if filename.rfind('.') != -1:
filename = filename[:filename.rfind('.')]
else:
filename = filename[:len(filename)]
filename = filename+'.g'
global NA
NA = 6.0221415e23
global cmin,cmax,xmin,xmax,ymin,ymax
cmin, xmin, ymin = 0, 0, 0
cmax, xmax, ymax = 1, 1, 1
moose.fixXreacs.restoreXreacs(modelpath)
compt = moose.wildcardFind(modelpath+'/##[0][ISA=ChemCompt]')
maxVol = estimateDefaultVol(compt)
positionInfoExist = True
if compt:
if bool(sceneitems):
cmin,cmax,xmin1,xmax1,ymin1,ymax1 = findMinMax(sceneitems)
elif not bool(sceneitems):
srcdesConnection = {}
setupItem(modelpath,srcdesConnection)
meshEntry,xmin,xmax,ymin,ymax,positionInfoExist,sceneitems = setupMeshObj(modelpath)
if not positionInfoExist:
#cmin,cmax,sceneitems = autoCoordinates(meshEntry,srcdesConnection)
sceneitems = autoCoordinates(meshEntry,srcdesConnection)
if not positionInfoExist:
# if position are not from kkit, then zoom factor is applied while
# writing to genesis. Like if position is from pyqtSceneItem or auto-coordinates
cmin,cmax,xmin1,xmax1,ymin1,ymax1 = findMinMax(sceneitems)
for k,v in list(sceneitems.items()):
anno = moose.element(k.path+'/info')
#x1 = calPrime(v['x'])
#y1 = calPrime(v['y'])
#sceneitems[k]['x'] = x1
#sceneitems[k]['y'] = y1
f = open(filename, 'w')
writeHeader (f,maxVol)
gtId_vol = writeCompartment(modelpath,compt,f)
errors = ""
error = writePool(modelpath,f,gtId_vol,sceneitems)
errors = errors+error
reacList,error= writeReac(modelpath,f,sceneitems)
errors = errors+error
enzList,error = writeEnz(modelpath,f,sceneitems)
errors = errors+error
chanList, error = writeConcChan(modelpath,f,sceneitems)
errors = errors+error
error = writeStimulus(modelpath,f)
errors = errors+error
error = writeSumtotal(modelpath,f)
errors = errors+error
#writeSumtotal(modelpath,f)
f.write("simundump xgraph /graphs/conc1 0 0 99 0.001 0.999 0\n"
"simundump xgraph /graphs/conc2 0 0 100 0 1 0\n")
tgraphs = moose.wildcardFind(modelpath+'/##[0][ISA=Table2]')
first, second = " ", " "
if tgraphs:
first,second = writeplot(tgraphs,f)
if first:
f.write(first)
f.write("simundump xgraph /moregraphs/conc3 0 0 100 0 1 0\n"
"simundump xgraph /moregraphs/conc4 0 0 100 0 1 0\n")
if second:
f.write(second)
f.write("simundump xcoredraw /edit/draw 0 -6 4 -2 6\n"
"simundump xtree /edit/draw/tree 0 \\\n"
" /kinetics/#[],/kinetics/#[]/#[],/kinetics/#[]/#[]/#[][TYPE!=proto],/kinetics/#[]/#[]/#[][TYPE!=linkinfo]/##[] \"edit_elm.D <v>; drag_from_edit.w <d> <S> <x> <y> <z>\" auto 0.6\n"
"simundump xtext /file/notes 0 1\n")
storeReacMsg(reacList,f)
storeEnzMsg(enzList,f)
storeChanMsg(chanList,f)
if tgraphs:
storePlotMsgs(tgraphs,f)
writeFooter1(f)
writeNotes(modelpath,f)
writeFooter2(f)
print('Written to file '+filename)
return errors, True
else:
print("Warning: writeKkit:: No model found on " , modelpath)
return False
def findMinMax(sceneitems):
cmin = 0.0
cmax = 1.0
xmin,xymin = 0.0,0.0
xmax,xymax = 1.0,1.0
xycord = []
xcord = []
ycord = []
for k,v in list(sceneitems.items()):
xycord.append(v['x'])
xycord.append(v['y'])
xcord.append(v['x'])
ycord.append(v['y'])
xmin = min(xcord)
xmax = max(xcord)
ymin = min(ycord)
ymax = max(ycord)
cmin = min(xycord)
cmax = max(xycord)
return cmin,cmax,xmin,xmax,ymin,ymax
def calPrime(x):
prime = int((20*(float(x-cmin)/float(cmax-cmin)))-10)
return prime
def storeCplxEnzMsgs( enz, f ):
for sub in enz.neighbors["subOut"]:
s = "addmsg /kinetics/" + trimPath( moose.element(sub) ) + " /kinetics/" + trimPath(enz) + " SUBSTRATE n \n";
s = s+ "addmsg /kinetics/" + trimPath( enz ) + " /kinetics/" + trimPath( sub ) + " REAC sA B \n";
f.write(s)
for prd in enz.neighbors["prd"]:
s = "addmsg /kinetics/" + trimPath( enz ) + " /kinetics/" + trimPath(prd) + " MM_PRD pA\n";
f.write( s )
for enzOut in enz.neighbors["enzOut"]:
s = "addmsg /kinetics/" + trimPath( enzOut ) + " /kinetics/" + trimPath(enz) + " ENZYME n\n";
s = s+ "addmsg /kinetics/" + trimPath( enz ) + " /kinetics/" + trimPath(enzOut) + " REAC eA B\n";
f.write( s )
def storeMMenzMsgs( enz, f):
subList = enz.neighbors["subOut"]
prdList = enz.neighbors["prd"]
enzDestList = enz.neighbors["enzDest"]
for esub in subList:
es = "addmsg /kinetics/" + trimPath(moose.element(esub)) + " /kinetics/" + trimPath(enz) + " SUBSTRATE n \n";
es = es+"addmsg /kinetics/" + trimPath(enz) + " /kinetics/" + trimPath(moose.element(esub)) + " REAC sA B \n";
f.write(es)
for eprd in prdList:
es = "addmsg /kinetics/" + trimPath( enz ) + " /kinetics/" + trimPath( moose.element(eprd)) + " MM_PRD pA \n";
f.write(es)
for eenzDest in enzDestList:
enzDest = "addmsg /kinetics/" + trimPath( moose.element(eenzDest)) + " /kinetics/" + trimPath( enz ) + " ENZYME n \n";
f.write(enzDest)
def storeEnzMsg( enzList, f):
for enz in enzList:
enzClass = enz.className
if (enzClass == "ZombieMMenz" or enzClass == "MMenz"):
storeMMenzMsgs(enz, f)
else:
storeCplxEnzMsgs( enz, f )
def storeChanMsg(chanList,f):
for channel in chanList:
for chanOL in channel.neighbors['out']:
eo = "addmsg /kinetics/" + trimPath(moose.element(channel)) + " /kinetics/" + trimPath(moose.element(chanOL))+ " REAC B A \n";
eo = eo +"addmsg /kinetics/" + trimPath(moose.element(chanOL)) + " /kinetics/"+trimPath(moose.element(channel))+" PRODUCT n vol \n";
f.write(eo)
for chanIL in channel.neighbors['in']:
ei = "addmsg /kinetics/" + trimPath(moose.element(channel)) + " /kinetics/" + trimPath(moose.element(chanIL))+ " REAC A B \n";
ei = ei +"addmsg /kinetics/" + trimPath(moose.element(chanIL)) + " /kinetics/"+trimPath(moose.element(channel))+" SUBSTRATE n vol \n";
f.write(ei)
for chanSNC in channel.neighbors['setNumChan']:
cff = "addmsg /kinetics/"+trimPath(moose.element(chanSNC))+ " /kinetics/"+trimPath(moose.element(channel))+ " NUMCHAN n \n"
f.write(cff)
def writeConcChan(modelpath,f,sceneitems):
error = ""
concChanList = moose.wildcardFind(modelpath+'/##[0][ISA=ConcChan]')
for cChan in concChanList:
if findCompartment(cChan) == moose.element('/'):
error = error + " \n "+cChan.path+ " doesn't have compartment ignored to write to genesis"
else:
x = random.randrange(0,10)
y = random.randrange(0,10)
textcolor = ""
color = ""
if len(moose.element(cChan).neighbors['setNumChan']) == 1:
chanParent = moose.element(moose.element(cChan).neighbors['setNumChan'][0])
if not chanParent.isA['PoolBase']:
print(" raise exception Channel doesn't have pool as parent %s",moose.element(cChan).path)
return False,"raise exception Channel doesn't have pool as parent"
else:
vol = chanParent.volume * NA * 1e-3;
cinfo = cChan.path+'/info'
if moose.exists(cinfo):
x = moose.Annotator(cinfo).getField('x')
y = moose.Annotator(cinfo).getField('y')
#x = sceneitems[cChan]['x']
#y = sceneitems[cChan]['y']
color = moose.Annotator(cinfo).getField('color')
color = getColorCheck(color,GENESIS_COLOR_SEQUENCE)
textcolor = moose.Annotator(cinfo).getField('textColor')
textcolor = getColorCheck(textcolor,GENESIS_COLOR_SEQUENCE)
else:
error = error + "\n x and y co-ordinates are not specified for `" + cChan.name+ "` zero will be assigned \n "
if color == "" or color == " ":
color = getRandomColor()
if textcolor == "" or textcolor == " ":
textcolor = getRandomColor()
f.write("simundump kchan /kinetics/" + trimPath(cChan)+ " " + str(int(1)) + " " + str(cChan.permeability/1000.0 )+ " " +
str(int(0)) + " " +
str(int(0)) + " " +
str(int(0)) + " " +
str(int(0)) + " " +
str("") + " " +
str(textcolor) + " " + str(color) + " \"\"" +
" " + str(int(x)) + " " + str(int(y)) + " "+str(int(0))+"\n")
return concChanList,error
def writeEnz( modelpath,f,sceneitems):
error = ""
enzList = moose.wildcardFind(modelpath+'/##[0][ISA=EnzBase]')
for enz in enzList:
if findCompartment(enz) == moose.element('/'):
error = error + " \n "+enz.path+ " doesn't have compartment ignored to write to genesis"
else:
x = random.randrange(0,10)
y = random.randrange(0,10)
textcolor = ""
color = ""
k1 = 0;
k2 = 0;
k3 = 0;
nInit = 0;
concInit = 0;
n = 0;
conc = 0;
if len(moose.element(enz).neighbors['enzDest']) == 1:
enzParent = moose.element(moose.element(enz).neighbors['enzDest'][0])
if not enzParent.isA['PoolBase']:
print(" raise exception enz doesn't have pool as parent %s",moose.element(enz).path)
return False
else:
vol = enzParent.volume * NA * 1e-3;
isMichaelisMenten = 0;
enzClass = enz.className
if (enzClass == "ZombieMMenz" or enzClass == "MMenz"):
k1 = enz.numKm
k3 = enz.kcat
k2 = 4.0*k3;
k1 = (k2 + k3) / k1;
isMichaelisMenten = 1;
elif (enzClass == "ZombieEnz" or enzClass == "Enz"):
k1 = enz.k1
k2 = enz.k2
k3 = enz.k3
if enz.neighbors['cplx']:
cplx = enz.neighbors['cplx'][0]
nInit = cplx.nInit;
else:
cplx = moose.Pool(enz.path+"/cplx")
moose.Annotator(cplx.path+'/info')
moose.connect( enz, 'cplx', cplx, 'reac' )
nInit = cplx.nInit
einfo = enz.path+'/info'
if moose.exists(einfo):
x = sceneitems[enz]['x']
y = sceneitems[enz]['y']
color = moose.Annotator(einfo).getField('color')
color = getColorCheck(color,GENESIS_COLOR_SEQUENCE)
textcolor = moose.Annotator(einfo).getField('textColor')
textcolor = getColorCheck(textcolor,GENESIS_COLOR_SEQUENCE)
else:
error = error + "\n x and y co-ordinates are not specified for `" + enz.name+ "` zero will be assigned \n "
if color == "" or color == " ":
color = getRandomColor()
if textcolor == "" or textcolor == " ":
textcolor = getRandomColor()
f.write("simundump kenz /kinetics/" + trimPath(enz) + " " + str(int(0))+ " " +
str(concInit) + " " +
str(conc) + " " +
str(nInit) + " " +
str(n) + " " +
str(vol) + " " +
str(k1) + " " +
str(k2) + " " +
str(k3) + " " +
str(0) + " " +
str(isMichaelisMenten) + " " +
"\"\"" + " " +
str(textcolor) + " " + str(color) + " \"\"" +
" " + str(int(x)) + " " + str(int(y)) + " "+str(int(0))+"\n")
return enzList,error
def nearestColorIndex(color, color_sequence):
#Trying to find the index to closest color map from the rainbow pickle file for matching the Genesis color map
distance = [ (color[0] - temp[0]) ** 2 + (color[1] - temp[1]) ** 2 + (color[2] - temp[2]) ** 2
for temp in color_sequence]
minindex = 0
for i in range(1, len(distance)):
if distance[minindex] > distance[i] : minindex = i
return minindex
def storeReacMsg(reacList,f):
for reac in reacList:
reacPath = trimPath( reac);
sublist = reac.neighbors["subOut"]
prdlist = reac.neighbors["prd"]
for sub in sublist:
s = "addmsg /kinetics/" + trimPath( sub ) + " /kinetics/" + reacPath + " SUBSTRATE n \n";
s = s + "addmsg /kinetics/" + reacPath + " /kinetics/" + trimPath( sub ) + " REAC A B \n";
f.write(s)
for prd in prdlist:
s = "addmsg /kinetics/" + trimPath( prd ) + " /kinetics/" + reacPath + " PRODUCT n \n";
s = s + "addmsg /kinetics/" + reacPath + " /kinetics/" + trimPath( prd ) + " REAC B A\n";
f.write( s)
def writeReac(modelpath,f,sceneitems):
error = ""
reacList = moose.wildcardFind(modelpath+'/##[0][ISA=Reac]')
for reac in reacList:
if findCompartment(reac) == moose.element('/'):
error = error + " \n "+reac.path+ " doesn't have compartment ignored to write to genesis"
else:
color = ""
textcolor = ""
kf = reac.numKf
kb = reac.numKb
# if sceneitems != None:
# value = sceneitems[reac]
# x = calPrime(value['x'])
# y = calPrime(value['y'])
rinfo = reac.path+'/info'
if moose.exists(rinfo):
x = sceneitems[reac]['x']
y = sceneitems[reac]['y']
color = moose.Annotator(rinfo).getField('color')
color = getColorCheck(color,GENESIS_COLOR_SEQUENCE)
textcolor = moose.Annotator(rinfo).getField('textColor')
textcolor = getColorCheck(textcolor,GENESIS_COLOR_SEQUENCE)
else:
x = 0
y = 0
error = error + "\n x and y co-ordinates are not specified for `" + reac.name+ "` zero will be assigned \n "
if color == "" or color == " ":
color = getRandomColor()
if textcolor == "" or textcolor == " ":
textcolor = getRandomColor()
f.write("simundump kreac /kinetics/" + trimPath(reac) + " " +str(0) +" "+ str(kf) + " " + str(kb) + " \"\" " +
str(color) + " " + str(textcolor) + " " + str(int(x)) + " " + str(int(y)) + " "+ str(0)+"\n")
return reacList,error
def trimPath(mobj):
mobj = moose.element(mobj)
original = mobj
while not mobj.isA['ChemCompt'] and mobj.path != "/":
mobj = moose.element(mobj.parent)
if mobj.path == "/":
print("%s object doesn't have a parent compartment as a parent." % original)
return
# other than the kinetics compartment, all the othername are converted to group in Genesis which are place under /kinetics
# Any moose object comes under /kinetics then one level down the path is taken.
# e.g /group/poolObject or /Reac
if mobj.name != "kinetics":# and ( (mobj.className != "CubeMesh") and (mobj.className != "CylMesh") and (mobj.className != "EndoMesh") and (mobj.className != "NeuroMesh")):
splitpath = original.path[(original.path.find(mobj.name)):len(original.path)]
else:
pos = original.path.find(mobj.name)
slash = original.path.find('/',pos+1)
splitpath = original.path[slash+1:len(original.path)]
splitpath = re.sub(r"\[[0-9]+\]", "", splitpath)
s = splitpath.replace("_dash_",'-')
s = splitpath.replace("_space_","_")
return s
# def writeSumtotal( modelpath,f):
# funclist = moose.wildcardFind(modelpath+'/##[ISA=Function]')
# for func in funclist:
# funcInputs = moose.element(func.path+'/x[0]')
# s = ""
# for funcInput in funcInputs.neighbors["input"]:
# s = s+ "addmsg /kinetics/" + trimPath(funcInput)+ " /kinetics/" + trimPath(moose.element(func.parent)) + " SUMTOTAL n nInit\n"
# f.write(s)
def writeSumtotal( modelpath,f):
error = ""
funclist = moose.wildcardFind(modelpath+'/##[0][ISA=Function]')
s = ""
for func in funclist:
fInfound = True
fOutfound = True
funcInputs = moose.element(func.path+'/x[0]')
if not len(funcInputs.neighbors["input"]):
fInfound = False
error = error +' \n /'+ (moose.element(func)).parent.name+ '/'+moose.element(func).name + ' function doesn\'t have input which is not allowed in genesis. \n This function is not written down into genesis file\n'
if not len(func.neighbors["valueOut"]):
error = error +'Function'+func.path+' has not been connected to any output, this function is not written to genesis file'
fOutfound = False
else:
for srcfunc in func.neighbors["valueOut"]:
if srcfunc.className in ["ZombiePool","ZombieBufPool","Pool","BufPool"]:
functionOut = moose.element(srcfunc)
else:
error = error +' \n Function output connected to '+srcfunc.name+ ' which is a '+ srcfunc.className+' which is not allowed in genesis, this function '+(moose.element(func)).path+' is not written to file'
fOutfound = False
if fInfound and fOutfound:
srcPool = []
for funcInput in funcInputs.neighbors["input"]:
if funcInput not in srcPool:
srcPool.append(funcInput)
if trimPath(funcInput) != None and trimPath(functionOut) != None:
s = "addmsg /kinetics/" + trimPath(funcInput)+ " /kinetics/"+ trimPath(functionOut)+ " SUMTOTAL n nInit\n"
f.write(s)
else:
error = error + '\n Genesis doesn\'t allow same moluecule connect to function mutiple times. \n Pool \''+ moose.element(funcInput).name + '\' connected to '+ (moose.element(func)).path
return error
def writeStimulus(modelpath,f):
error = ""
if len(moose.wildcardFind(modelpath+'/##[0][ISA=StimulusTable]')):
error = error +'\n StimulusTable is not written into genesis. This is in Todo List'
return error
def storePlotMsgs( tgraphs,f):
s = ""
if tgraphs:
for graph in tgraphs:
slash = graph.path.find('graphs')
if not slash > -1:
slash = graph.path.find('graph')
if slash > -1:
foundConc = True
if not ( (graph.path.find('conc1') > -1 ) or
(graph.path.find('conc2') > -1 ) or
(graph.path.find('conc3') > -1 ) or
(graph.path.find('conc4') > -1) ):
foundConc = False
#conc = graph.path.find('conc')
# if conc > -1 :
# tabPath = graph.path[slash:len(graph.path)]
# else:
# slash1 = graph.path.find('/',slash)
# tabPath = "/graphs/conc1" +graph.path[slash1:len(graph.path)]
if foundConc == True:
tabPath = "/"+graph.path[slash:len(graph.path)]
else:
slash1 = graph.path.find('/',slash)
tabPath = "/graphs/conc1" +graph.path[slash1:len(graph.path)]
if len(moose.element(graph).msgOut):
poolPath = (moose.element(graph).msgOut)[0].e2.path
poolEle = moose.element(poolPath)
poolName = poolEle.name
bgPath = (poolEle.path+'/info')
bg = moose.Annotator(bgPath).color
bg = getColorCheck(bg,GENESIS_COLOR_SEQUENCE)
tabPath = re.sub(r"\[[0-9]+\]", "", tabPath)
s = s+"addmsg /kinetics/" + trimPath( poolEle ) + " " + tabPath + \
" PLOT Co *" + poolName + " *" + str(bg) +"\n";
f.write(s)
def writeplot( tgraphs,f ):
first, second = " ", " "
if tgraphs:
for graphs in tgraphs:
slash = graphs.path.find('graphs')
if not slash > -1:
slash = graphs.path.find('graph')
if slash > -1:
foundConc = True
if not ( (graphs.path.find('conc1') > -1 ) or
(graphs.path.find('conc2') > -1 ) or
(graphs.path.find('conc3') > -1 ) or
(graphs.path.find('conc4') > -1) ):
foundConc = False
if foundConc == True:
tabPath = "/"+graphs.path[slash:len(graphs.path)]
else:
slash1 = graphs.path.find('/',slash)
tabPath = "/graphs/conc1" +graphs.path[slash1:len(graphs.path)]
if len(moose.element(graphs).msgOut):
poolPath = (moose.element(graphs).msgOut)[0].e2.path
poolEle = moose.element(poolPath)
poolAnno = (poolEle.path+'/info')
fg = moose.Annotator(poolAnno).textColor
fg = getColorCheck(fg,GENESIS_COLOR_SEQUENCE)
tabPath = re.sub(r"\[[0-9]+\]", "", tabPath)
if tabPath.find("conc1") >= 0 or tabPath.find("conc2") >= 0:
first = first + "simundump xplot " + tabPath + " 3 524288 \\\n" + "\"delete_plot.w <s> <d>; edit_plot.D <w>\" " + str(fg) + " 0 0 1\n"
if tabPath.find("conc3") >= 0 or tabPath.find("conc4") >= 0:
second = second + "simundump xplot " + tabPath + " 3 524288 \\\n" + "\"delete_plot.w <s> <d>; edit_plot.D <w>\" " + str(fg) + " 0 0 1\n"
return first,second
def writePool(modelpath,f,volIndex,sceneitems):
error = ""
color = ""
textcolor = ""
for p in moose.wildcardFind(modelpath+'/##[0][ISA=PoolBase]'):
if findCompartment(p) == moose.element('/'):
error += "\n%s doesn't have compartment ignored to write to genesis" % p
else:
slave_enable = 0
if p.isA["BufPool"] or p.isA["ZombieBufPool"]:
pool_children = p.children
if pool_children== 0:
slave_enable = 4
else:
for pchild in pool_children:
if (not pchild.isA["ZombieFunction"]) and (not pchild.isA["Function"]):
slave_enable = 4
else:
slave_enable = 0
break
pp = moose.element(p.parent)
if (not pp.isA['Enz']) and (not pp.isA['ZombieEnz']):
# Assuming "p.parent.className !=Enzyme is cplx which is not written to genesis"
# x = sceneitems[p]['x']
# y = sceneitems[p]['y']
# if sceneitems != None:
# value = sceneitems[p]
# x = calPrime(value['x'])
# y = calPrime(value['y'])
pinfo = p.path+'/info'
if moose.exists(pinfo):
x = sceneitems[p]['x']
y = sceneitems[p]['y']
else:
x = 0
y = 0
error = error + " \n x and y co-ordinates are not specified for `" + p.name+ "` zero will be assigned"+ " \n "
if moose.exists(pinfo):
color = moose.Annotator(pinfo).getField('color')
color = getColorCheck(color,GENESIS_COLOR_SEQUENCE)
textcolor = moose.Annotator(pinfo).getField('textColor')
textcolor = getColorCheck(textcolor,GENESIS_COLOR_SEQUENCE)
poolsCmpt = findCompartment(p)
geometryName = volIndex[float(poolsCmpt.volume)]
volume = p.volume * NA * 1e-3
if color == "" or color == " ":
color = getRandomColor()
if textcolor == "" or textcolor == " ":
textcolor = getRandomColor()
f.write("simundump kpool /kinetics/" + trimPath(p) + " 0 " +
str(p.diffConst) + " " +
str(0) + " " +
str(0) + " " +
str(0) + " " +
str(p.nInit) + " " +
str(0) + " " + str(0) + " " +
str(volume)+ " " +
str(slave_enable) +
" /kinetics"+ geometryName + " " +
str(color) +" " + str(textcolor) + " " + str(int(x)) + " " + str(int(y)) + " "+ str(0)+"\n")
return error
def getColorCheck(color,GENESIS_COLOR_SEQUENCE):
if isinstance(color, str):
if color.startswith("#"):
color = ( int(color[1:3], 16)
, int(color[3:5], 16)
, int(color[5:7], 16)
)
index = nearestColorIndex(color, GENESIS_COLOR_SEQUENCE)
index = index/2
return index
elif color.startswith("("):
color = eval(color)[0:3]
index = nearestColorIndex(color, GENESIS_COLOR_SEQUENCE)
#This is because in genesis file index of the color is half
index = index/2
return index
else:
index = color
return index
elif isinstance(color, tuple):
color = [int(x) for x in color[0:3]]
#color = map(int,color)
index = nearestColorIndex(color, GENESIS_COLOR_SEQUENCE)
return index
elif isinstance(color, int):
index = color
return index
else:
raise Exception("Invalid Color Value!")
def getRandomColor():
ignoreColor= ["mistyrose","antiquewhite","aliceblue","azure","bisque","black","blanchedalmond","blue","cornsilk","darkolivegreen","darkslategray","dimgray","floralwhite","gainsboro","ghostwhite","honeydew","ivory","lavender","lavenderblush","lemonchiffon","lightcyan","lightgoldenrodyellow","lightgray","lightyellow","linen","mediumblue","mintcream","navy","oldlace","papayawhip","saddlebrown","seashell","snow","wheat","white","whitesmoke","aquamarine","lightsalmon","moccasin","limegreen","snow","sienna","beige","dimgrey","lightsage"]
matplotcolor = {}
for name,hexno in matplotlib.colors.cnames.items():
matplotcolor[name]=hexno
k = random.choice(list(matplotcolor.keys()))
if k in ignoreColor:
return getRandomColor()
else:
return k
def writeCompartment(modelpath,compts,f):
index = 0
volIndex = {}
for compt in compts:
if compt.name != "kinetics":
x = xmin+6
y = ymax+1
f.write("simundump group /kinetics/" + compt.name + " 0 " + "blue" + " " + "green" + " x 0 0 \"\" defaultfile \\\n" )
f.write( " defaultfile.g 0 0 0 " + str(int(x)) + " " + str(int(y)) + " 0\n")
i = 0
l = len(compts)
geometry = ""
for compt in compts:
# if isinstance(compt,moose.CylMesh):
# print " 1 "
# size = (compt.volume/compt.numDiffCompts)
# else:
size = compt.volume
ndim = compt.numDimensions
vecIndex = l-i-1
i = i+1
x = xmin+4
y = ymax+1
#geometryname = compt.name
if vecIndex > 0:
geometry = geometry+"simundump geometry /kinetics" + "/geometry[" + str(vecIndex) +"] 0 " + str(size) + " " + str(ndim) + " sphere " +" \"\" white black "+ str(int(x)) + " " +str(int(y)) +" 0\n";
volIndex[float(size)] = "/geometry["+str(vecIndex)+"]"
#geometry = geometry+"simundump geometry /kinetics/" + str(geometryname) + " " +str(size) + " " + str(ndim) + " sphere " +" \"\" white black " + str(int(x)) + " "+str(int(y))+ " 0\n";
#volIndex[float(size)] = geometryname
else:
#geometry = geometry+"simundump geometry /kinetics/" + str(geometryname) + " " + str(size) + " " + str(ndim) + " sphere " +" \"\" white black " + str(int(x)) + " "+str(int(y))+ " 0\n";
geometry = geometry+"simundump geometry /kinetics" + "/geometry 0 " + str(size) + " " + str(ndim) + " sphere " +" \"\" white black " + str(int(x)) + " "+str(int(y))+ " 0\n";
volIndex[float(size)] = "/geometry"
#volIndex[float(size)] = geometryname
f.write(geometry)
writeGroup(modelpath,f)
return volIndex
def writeGroup(modelpath,f):
ignore = ["graphs","moregraphs","geometry","groups","conc1","conc2","conc3","conc4","model","data","graph_0","graph_1","graph_2","graph_3","graph_4","graph_5"]
for g in moose.wildcardFind(modelpath+'/##[0][TYPE=Neutral]'):
if not g.name in ignore:
if trimPath(g) != None:
x = xmin+1
y = ymax+1
f.write("simundump group /kinetics/" + trimPath(g) + " 0 " + "blue" + " " + "green" + " x 0 0 \"\" defaultfile \\\n")
f.write(" defaultfile.g 0 0 0 " + str(int(x)) + " " + str(int(y)) + " 0\n")
def writeHeader(f,maxVol):
simdt = 0.001
plotdt = 0.1
rawtime = 100
maxtime = 100
defaultVol = maxVol
f.write("//genesis\n"
"// kkit Version 11 flat dumpfile\n\n"
"// Saved on " + str(rawtime)+"\n"
"include kkit {argv 1}\n"
"FASTDT = " + str(simdt)+"\n"
"SIMDT = " +str(simdt)+"\n"
"CONTROLDT = " +str(plotdt)+"\n"
"PLOTDT = " +str(plotdt)+"\n"
"MAXTIME = " +str(maxtime)+"\n"
"TRANSIENT_TIME = 2"+"\n"
"VARIABLE_DT_FLAG = 0"+"\n"
"DEFAULT_VOL = " +str(defaultVol)+"\n"
"VERSION = 11.0 \n"
"setfield /file/modpath value ~/scripts/modules\n"
"kparms\n\n"
)
f.write( "//genesis\n"
"initdump -version 3 -ignoreorphans 1\n"
"simobjdump table input output alloced step_mode stepsize x y z\n"
"simobjdump xtree path script namemode sizescale\n"
"simobjdump xcoredraw xmin xmax ymin ymax\n"
"simobjdump xtext editable\n"
"simobjdump xgraph xmin xmax ymin ymax overlay\n"
"simobjdump xplot pixflags script fg ysquish do_slope wy\n"
"simobjdump group xtree_fg_req xtree_textfg_req plotfield expanded movealone \\\n"
" link savename file version md5sum mod_save_flag x y z\n"
"simobjdump geometry size dim shape outside xtree_fg_req xtree_textfg_req x y z\n"
"simobjdump kpool DiffConst CoInit Co n nInit mwt nMin vol slave_enable \\\n"
" geomname xtree_fg_req xtree_textfg_req x y z\n"
"simobjdump kreac kf kb notes xtree_fg_req xtree_textfg_req x y z\n"
"simobjdump kenz CoComplexInit CoComplex nComplexInit nComplex vol k1 k2 k3 \\\n"
" keepconc usecomplex notes xtree_fg_req xtree_textfg_req link x y z\n"
"simobjdump stim level1 width1 delay1 level2 width2 delay2 baselevel trig_time \\\n"
" trig_mode notes xtree_fg_req xtree_textfg_req is_running x y z\n"
"simobjdump xtab input output alloced step_mode stepsize notes editfunc \\\n"
" xtree_fg_req xtree_textfg_req baselevel last_x last_y is_running x y z\n"
"simobjdump kchan perm gmax Vm is_active use_nernst notes xtree_fg_req \\\n"
" xtree_textfg_req x y z\n"
"simobjdump transport input output alloced step_mode stepsize dt delay clock \\\n"
" kf xtree_fg_req xtree_textfg_req x y z\n"
"simobjdump proto x y z\n"
)
def estimateDefaultVol(compts):
maxVol = 0
vol = []
for compt in compts:
vol.append(compt.volume)
if len(vol) > 0:
return max(vol)
return maxVol
def writeNotes(modelpath,f):
notes = ""
#items = moose.wildcardFind(modelpath+"/##[ISA=ChemCompt],/##[ISA=Reac],/##[ISA=PoolBase],/##[ISA=EnzBase],/##[ISA=Function],/##[ISA=StimulusTable]")
items = []
items = moose.wildcardFind(modelpath+"/##[0][ISA=ChemCompt]") +\
moose.wildcardFind(modelpath+"/##[0][ISA=PoolBase]") +\
moose.wildcardFind(modelpath+"/##[0][ISA=Reac]") +\
moose.wildcardFind(modelpath+"/##[0][ISA=EnzBase]") +\
moose.wildcardFind(modelpath+"/##[0][ISA=Function]") +\
moose.wildcardFind(modelpath+"/##[0][ISA=StimulusTable]")
for item in items:
if not re.search(r"xfer",item.name):
if not moose.exists(item.path+'/info'):
continue
info = item.path+'/info'
notes = moose.Annotator(info).getField('notes')
if not notes:
continue
# The below form fails in python3 because the \n gets
# printed as regular text rather than a carriage return.
#m = r'call /kinetics/{0}/notes LOAD \ \n"{1}"\n'.format(
#trimPath(item), moose.Annotator(info).getField('notes')
#)
m = 'call /kinetics/{0}/notes LOAD \\\n'.format(trimPath(item) )
f.write(m)
m = '"{}"\n'.format(moose.Annotator(info).getField('notes'))
f.write(m)
# f.write("call /kinetics/"+trimPath(item)+"/notes LOAD \ \n\""+moose.Annotator(info).getField('notes')+"\"\n")
def writeFooter1(f):
f.write("\nenddump\n // End of dump\n")
def writeFooter2(f):
f.write("complete_loading\n")
if __name__ == "__main__":
import os
filename = sys.argv[1]
filepath, filenameWithext = os.path.split(filename)
if filenameWithext.find('.') != -1:
modelpath = filenameWithext[:filenameWithext.find('.')]
else:
modelpath = filenameWithext
moose.loadModel(filename,'/'+modelpath,"gsl")
output = modelpath+"_.g"
written = mooseWriteKkit('/'+modelpath,output)
if written:
print(" file written to ",output)
else:
print(" could be written to kkit format")
|
BhallaLab/moose-core
|
python/moose/genesis/writeKkit.py
|
Python
|
gpl-3.0
| 39,455
|
[
"MOOSE"
] |
70293b9dbb9c2ce00638f0c317e72255bb6f7650be9c2a0462c293f16d188c29
|
# qsub -- utilities for batch submission systems
# Copyright (c) 2010 Oliver Beckstein <orbeckst@gmail.com>
# Made available under GNU Pulic License v3.
"""
:mod:`gromacs.qsub` -- utilities for batch submission systems
=============================================================
The module helps writing submission scripts for various batch submission
queuing systems. The known ones are listed stored as
:class:`~gromacs.qsub.QueuingSystem` instances in
:data:`~gromacs.qsub.queuing_systems`; append new ones to this list.
The working paradigm is that template scripts are provided (see
:data:`gromacs.config.templates`) and only a few place holders are substituted
(using :func:`gromacs.cbook.edit_txt`).
*User-supplied template scripts* can be stored in
:data:`gromacs.config.qscriptdir` (by default ``~/.gromacswrapper/qscripts``)
and they will be picked up before the package-supplied ones.
At the moment, some of the functions in :mod:`gromacs.setup` use this module
but it is fairly independent and could conceivably be used for a wider range of
projects.
Queuing system templates
------------------------
The queuing system scripts are highly specific and you will need to add
your own. Templates should be shell scripts. Some parts of the
templates are modified by the
:func:`~gromacs.qsub.generate_submit_scripts` function. The "place
holders" that can be replaced are shown in the table below. Typically,
the place holders are either shell variable assignments or batch
submission system commands. The table shows SGE_ commands but PBS_ and
LoadLeveler_ have similar constructs; e.g. PBS commands start with
``#PBS`` and LoadLeveller uses ``#@`` with its own command keywords).
.. Table:: Substitutions in queuing system templates.
=============== =========== ================ ================= =====================================
place holder default replacement description regex
=============== =========== ================ ================= =====================================
#$ -N GMX_MD *sgename* job name `/^#.*(-N|job_name)/`
#$ -l walltime= 00:20:00 *walltime* max run time `/^#.*(-l walltime|wall_clock_limit)/`
#$ -A BUDGET *budget* account `/^#.*(-A|account_no)/`
DEFFNM= md *deffnm* default gmx name `/^ *DEFFNM=/`
STARTDIR= . *startdir* remote jobdir `/^ *STARTDIR=/`
WALL_HOURS= 0.33 *walltime* h mdrun's -maxh `/^ *WALL_HOURS=/`
NPME= *npme* PME nodes `/^ *NPME=/`
MDRUN_OPTS= "" *mdrun_opts* more options `/^ *MDRUN_OPTS=/`
=============== =========== ================ ================= =====================================
Lines with place holders should not have any white space at the
beginning. The regular expression pattern ("regex") is used to find
the lines for the replacement and the literal default values
("default") are replaced. (Exception: any value that follows an equals
sign "=" is replaced, regardless of the default value in the table
*except* for ``MDRUN_OPTS`` where *only "" will be replace*.) Not all
place holders have to occur in a template; for instance, if a queue
has no run time limitation then one would probably not include
*walltime* and *WALL_HOURS* place holders.
The line ``# JOB_ARRAY_PLACEHOLDER`` can be replaced by
:func:`~gromacs.qsub.generate_submit_array` to produce a "job array"
(also known as a "task array") script that runs a large number of
related simulations under the control of a single queuing system
job. The individual array tasks are run from different sub
directories. Only queuing system scripts that are using the
:program:`bash` shell are supported for job arrays at the moment.
A queuing system script *must* have the appropriate suffix to be properly
recognized, as shown in the table below.
.. Table:: Suffices for queuing system templates. Pure shell-scripts are only used to run locally.
============================== =========== ===========================
Queuing system suffix notes
============================== =========== ===========================
Sun Gridengine .sge Sun's `Sun Gridengine`_
Portable Batch queuing system .pbs OpenPBS_ and `PBS Pro`_
LoadLeveler .ll IBM's `LoadLeveler`_
bash script .bash, .sh `Advanced bash scripting`_
csh script .csh avoid_ csh_
============================== =========== ===========================
.. _OpenPBS: http://www.mcs.anl.gov/research/projects/openpbs/
.. _PBS: OpenPBS_
.. _PBS Pro: http://www.pbsworks.com/Product.aspx?id=1
.. _Sun Gridengine: http://gridengine.sunsource.net/
.. _SGE: Sun Gridengine_
.. _LoadLeveler: http://www-03.ibm.com/systems/software/loadleveler/index.html
.. _Advanced bash scripting: http://tldp.org/LDP/abs/html/
.. _avoid: http://www.grymoire.com/Unix/CshTop10.txt
.. _csh: http://www.faqs.org/faqs/unix-faq/shell/csh-whynot/
Example queuing system script template for PBS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following script is a usable PBS_ script for a super computer. It
contains almost all of the replacement tokens listed in the table
(indicated by ++++++). ::
#!/bin/bash
# File name: ~/.gromacswrapper/qscripts/supercomputer.somewhere.fr_64core.pbs
#PBS -N GMX_MD
# ++++++
#PBS -j oe
#PBS -l select=8:ncpus=8:mpiprocs=8
#PBS -l walltime=00:20:00
# ++++++++
# host: supercomputer.somewhere.fr
# queuing system: PBS
# set this to the same value as walltime; mdrun will stop cleanly
# at 0.99 * WALL_HOURS
WALL_HOURS=0.33
# ++++
# deffnm line is possibly modified by gromacs.setup
# (leave it as it is in the template)
DEFFNM=md
# ++
TPR=${DEFFNM}.tpr
OUTPUT=${DEFFNM}.out
PDB=${DEFFNM}.pdb
MDRUN_OPTS=""
# ++
# If you always want to add additional MDRUN options in this script then
# you can either do this directly in the mdrun commandline below or by
# constructs such as the following:
## MDRUN_OPTS="-npme 24 $MDRUN_OPTS"
# JOB_ARRAY_PLACEHOLDER
#++++++++++++++++++++++ leave the full commented line intact!
# avoids some failures
export MPI_GROUP_MAX=1024
# use hard coded path for time being
GMXBIN="/opt/software/SGI/gromacs/4.0.3/bin"
MPIRUN=/usr/pbs/bin/mpiexec
APPLICATION=$GMXBIN/mdrun_mpi
$MPIRUN $APPLICATION -stepout 1000 -deffnm ${DEFFNM} -s ${TPR} -c ${PDB} -cpi \
$MDRUN_OPTS \
-maxh ${WALL_HOURS} > $OUTPUT
rc=$?
# dependent jobs will only start if rc == 0
exit $rc
Save the above script in ``~/.gromacswrapper/qscripts`` under the name
``supercomputer.somewhere.fr_64core.pbs``. This will make the script
immediately usable. For example, in order to set up a production MD run with
:func:`gromacs.setup.MD` for this super computer one would use ::
gromacs.setup.MD(..., qscripts=['supercomputer.somewhere.fr_64core.pbs', 'local.sh'])
This will generate submission scripts based on
``supercomputer.somewhere.fr_64core.pbs`` and also the default ``local.sh``
that is provided with *GromacsWrapper*.
In order to modify ``MDRUN_OPTS`` one would use the additonal *mdrun_opts*
argument, for instance::
gromacs.setup.MD(..., qscripts=['supercomputer.somewhere.fr_64core.pbs', 'local.sh'],
mdrun_opts="-v -npme 20 -dlb yes -nosum")
Currently there is no good way to specify the number of processors when
creating run scripts. You will need to provide scripts with different numbers
of cores hard coded or set them when submitting the scripts with command line
options to :program:`qsub`.
Classes and functions
---------------------
.. autoclass:: QueuingSystem
:members:
.. autofunction:: generate_submit_scripts
.. autofunction:: generate_submit_array
.. autofunction:: detect_queuing_system
.. autodata:: queuing_systems
"""
from __future__ import absolute_import, with_statement
import os
import errno
from os.path import relpath
import warnings
from . import config
from . import cbook
from .utilities import asiterable, Timedelta
from .exceptions import AutoCorrectionWarning
import logging
logger = logging.getLogger('gromacs.qsub')
class QueuingSystem(object):
"""Class that represents minimum information about a batch submission system."""
def __init__(self, name, suffix, qsub_prefix, array_variable=None, array_option=None):
"""Define a queuing system's functionality
:Arguments:
*name*
name of the queuing system, e.g. 'Sun Gridengine'
*suffix*
suffix of input files, e.g. 'sge'
*qsub_prefix*
prefix string that starts a qsub flag in a script, e.g. '#$'
:Keywords:
*array_variable*
environment variable exported for array jobs, e.g.
'SGE_TASK_ID'
*array_option*
qsub option format string to launch an array (e.g. '-t %d-%d')
"""
self.name = name
self.suffix = suffix
self.qsub_prefix = qsub_prefix
self.array_variable = array_variable
self.array_option = array_option
def flag(self, *args):
"""Return string for qsub flag *args* prefixed with appropriate inscript prefix."""
return " ".join((self.qsub_prefix,)+args)
def has_arrays(self):
"""True if known how to do job arrays."""
return self.array_variable is not None
def array_flag(self, directories):
"""Return string to embed the array launching option in the script."""
return self.flag(self.array_option % (1,len(directories)))
def array(self, directories):
"""Return multiline string for simple array jobs over *directories*.
.. Warning:: The string is in ``bash`` and hence the template must also
be ``bash`` (and *not* ``csh`` or ``sh``).
"""
if not self.has_arrays():
raise NotImplementedError('Not known how make array jobs for '
'queuing system %(name)s' % vars(self))
hrule = '#'+60*'-'
lines = [
'',
hrule,
'# job array:',
self.array_flag(directories),
hrule,
'# directories for job tasks',
'declare -a jobdirs']
for i,dirname in enumerate(asiterable(directories)):
idx = i+1 # job array indices are 1-based
lines.append('jobdirs[{idx:d}]={dirname!r}'.format(**vars()))
lines.extend([
'# Switch to the current tasks directory:',
'wdir="${{jobdirs[${{{array_variable!s}}}]}}"'.format(**vars(self)),
'cd "$wdir" || { echo "ERROR: failed to enter $wdir."; exit 1; }',
hrule,
''
])
return "\n".join(lines)
def isMine(self, scriptname):
"""Primitive queuing system detection; only looks at suffix at the moment."""
suffix = os.path.splitext(scriptname)[1].lower()
if suffix.startswith('.'):
suffix = suffix[1:]
return self.suffix == suffix
def __repr__(self):
return "<"+self.name+" QueuingSystem instance>"
#: Pre-defined queuing systems (SGE, PBS). Add your own here.
queuing_systems = [
QueuingSystem('Sun Gridengine', 'sge', '#$', array_variable='SGE_TASK_ID', array_option='-t %d-%d'),
QueuingSystem('PBS', 'pbs', '#PBS', array_variable='PBS_ARRAY_INDEX', array_option='-J %d-%d'),
QueuingSystem('LoadLeveler', 'll', '#@'), # no idea how to do arrays in LL
]
def detect_queuing_system(scriptfile):
"""Return the queuing system for which *scriptfile* was written."""
for qs in queuing_systems:
if qs.isMine(scriptfile):
return qs
return None
def generate_submit_scripts(templates, prefix=None, deffnm='md', jobname='MD', budget=None,
mdrun_opts=None, walltime=1.0, jobarray_string=None, startdir=None,
npme=None, **kwargs):
"""Write scripts for queuing systems.
This sets up queuing system run scripts with a simple search and replace in
templates. See :func:`gromacs.cbook.edit_txt` for details. Shell scripts
are made executable.
:Arguments:
*templates*
Template file or list of template files. The "files" can also be names
or symbolic names for templates in the templates directory. See
:mod:`gromacs.config` for details and rules for writing templates.
*prefix*
Prefix for the final run script filename; by default the filename will be
the same as the template. [None]
*dirname*
Directory in which to place the submit scripts. [.]
*deffnm*
Default filename prefix for :program:`mdrun` ``-deffnm`` [md]
*jobname*
Name of the job in the queuing system. [MD]
*budget*
Which budget to book the runtime on [None]
*startdir*
Explicit path on the remote system (for run scripts that need to `cd`
into this directory at the beginning of execution) [None]
*mdrun_opts*
String of additional options for :program:`mdrun`.
*walltime*
Maximum runtime of the job in hours. [1]
*npme*
number of PME nodes
*jobarray_string*
Multi-line string that is spliced in for job array functionality
(see :func:`gromacs.qsub.generate_submit_array`; do not use manually)
*kwargs*
all other kwargs are ignored
:Returns: list of generated run scripts
"""
if not jobname[0].isalpha():
jobname = 'MD_'+jobname
wmsg = "To make the jobname legal it must start with a letter: changed to {0!r}".format(jobname)
logger.warn(wmsg)
warnings.warn(wmsg, category=AutoCorrectionWarning)
if prefix is None:
prefix = ""
if mdrun_opts is not None:
mdrun_opts = '"'+str(mdrun_opts)+'"' # TODO: could test if quotes already present
dirname = kwargs.pop('dirname', os.path.curdir)
wt = Timedelta(hours=walltime)
walltime = wt.strftime("%h:%M:%S")
wall_hours = wt.ashours
def write_script(template):
submitscript = os.path.join(dirname, prefix + os.path.basename(template))
logger.info("Setting up queuing system script {submitscript!r}...".format(**vars()))
# These substitution rules are documented for the user in the module doc string
cbook.edit_txt(template,
[('^ *DEFFNM=','(?<==)(.*)', deffnm),
('^#.*(-N|job_name)', '((?<=-N\s)|(?<=job_name\s))\s*\w+', jobname),
('^#.*(-A|account_no)', '((?<=-A\s)|(?<=account_no\s))\s*\w+', budget),
('^#.*(-l walltime|wall_clock_limit)', '(?<==)(\d+:\d+:\d+)', walltime),
('^ *WALL_HOURS=', '(?<==)(.*)', wall_hours),
('^ *STARTDIR=', '(?<==)(.*)', startdir),
('^ *NPME=', '(?<==)(.*)', npme),
('^ *MDRUN_OPTS=', '(?<==)("")', mdrun_opts), # only replace literal ""
('^# JOB_ARRAY_PLACEHOLDER', '^.*$', jobarray_string),
],
newname=submitscript)
ext = os.path.splitext(submitscript)[1]
if ext in ('.sh', '.csh', '.bash'):
os.chmod(submitscript, 0o755)
return submitscript
return [write_script(template) for template in config.get_templates(templates)]
def generate_submit_array(templates, directories, **kwargs):
"""Generate a array job.
For each ``work_dir`` in *directories*, the array job will
1. cd into ``work_dir``
2. run the job as detailed in the template
It will use all the queuing system directives found in the
template. If more complicated set ups are required, then this
function cannot be used.
:Arguments:
*templates*
Basic template for a single job; the job array logic is spliced into
the position of the line ::
# JOB_ARRAY_PLACEHOLDER
The appropriate commands for common queuing systems (Sun Gridengine, PBS)
are hard coded here. The queuing system is detected from the suffix of
the template.
*directories*
List of directories under *dirname*. One task is set up for each
directory.
*dirname*
The array script will be placed in this directory. The *directories*
**must** be located under *dirname*.
*kwargs*
See :func:`gromacs.setup.generate_submit_script` for details.
"""
dirname = kwargs.setdefault('dirname', os.path.curdir)
reldirs = [relpath(p, start=dirname) for p in asiterable(directories)]
missing = [p for p in (os.path.join(dirname, subdir) for subdir in reldirs)
if not os.path.exists(p)]
if len(missing) > 0:
logger.debug("template=%(template)r: dirname=%(dirname)r reldirs=%(reldirs)r", vars())
logger.error("Some directories are not accessible from the array script: "
"%(missing)r", vars())
def write_script(template):
qsystem = detect_queuing_system(template)
if qsystem is None or not qsystem.has_arrays():
logger.warning("Not known how to make a job array for %(template)r; skipping...", vars())
return None
kwargs['jobarray_string'] = qsystem.array(reldirs)
return generate_submit_scripts(template, **kwargs)[0] # returns list of length 1
# must use config.get_templates() because we need to access the file for detecting
return [write_script(template) for template in config.get_templates(templates)]
|
PicoCentauri/GromacsWrapper
|
gromacs/qsub.py
|
Python
|
gpl-3.0
| 18,105
|
[
"Gromacs"
] |
a3cc64931cb15cc1d35a27289ab8b7f46287ed07a351a558bb616d6206599bc7
|
#+
#
# This file is part of h5py, a low-level Python interface to the HDF5 library.
#
# Copyright (C) 2008 Andrew Collette
# http://h5py.alfven.org
# License: BSD (See LICENSE.txt for full license)
#
# $Date$
#
#-
"""
Provides high-level Python objects for HDF5 files, groups, and datasets.
Objects in this module are designed to provide a friendly, Python-style
interface to native HDF5 concepts like files, datasets, groups and
attributes. The module is written in pure Python and uses the standard
h5py low-level interface exclusively.
Most components defined here are re-exported into the root h5py package
namespace, because they are the most straightforward and intuitive
way to interact with HDF5.
"""
from __future__ import with_statement
import os
import sys
import weakref
import threading
import warnings
import os.path as op
import posixpath as pp
import numpy
from h5py import h5, h5f, h5g, h5s, h5t, h5d, h5a, \
h5p, h5r, h5z, h5i, h5fd, h5o, h5l, \
version, filters, _extras
import h5py.selections as sel
from h5py.h5e import register_thread
config = h5.get_config()
phil = threading.RLock()
def is_hdf5(fname):
""" Determine if a file is valid HDF5 (False if it doesn't exist). """
register_thread()
fname = os.path.abspath(fname)
if os.path.isfile(fname):
try:
fname = fname.encode(sys.getfilesystemencoding())
except (UnicodeError, LookupError):
pass
return h5f.is_hdf5(fname)
return False
def _guess_dtype(data):
""" Attempt to guess an appropriate dtype for the object, returning None
if nothing is appropriate (or if it should be left up the the array
constructor to figure out)
"""
if isinstance(data, h5r.RegionReference):
return h5t.special_dtype(ref=h5r.RegionReference)
if isinstance(data, h5r.Reference):
return h5t.special_dtype(ref=h5r.Reference)
return None
# === Base classes ============================================================
class HLObject(object):
"""
Base class for high-level interface objects.
All objects of this class support the following properties:
id: Low-level identifer, compatible with the h5py.h5* modules.
name: Name of this object in the HDF5 file. May not be unique.
attrs: HDF5 attributes of this object. See AttributeManager class.
file: The File instance associated with this object
parent: (A) parent of this object, according to dirname(obj.name)
ref: An HDF5 reference to this object.
Equality comparison and hashing are based on native HDF5 object
identity.
"""
@property
def file(self):
"""Return a File instance associated with this object"""
return self._file
@property
def _lapl(self):
"""Default link access property list (1.8)"""
lapl = h5p.create(h5p.LINK_ACCESS)
fapl = h5p.create(h5p.FILE_ACCESS)
fapl.set_fclose_degree(h5f.CLOSE_STRONG)
lapl.set_elink_fapl(fapl)
return lapl
@property
def _lcpl(self):
"""Default link creation property list (1.8)"""
lcpl = h5p.create(h5p.LINK_CREATE)
lcpl.set_create_intermediate_group(True)
return lcpl
@property
def id(self):
""" Low-level identifier appropriate for this object """
return self._id
@property
def name(self):
"""Name of this object in the HDF5 file. Not necessarily unique."""
register_thread()
return h5i.get_name(self.id)
@property
def attrs(self):
"""Provides access to HDF5 attributes. See AttributeManager."""
return AttributeManager(self)
@property
def parent(self):
"""Return the parent group of this object.
This is always equivalent to file[posixpath.basename(obj.name)].
"""
if self.name is None:
raise ValueError("Parent of an anonymous object is undefined")
return self.file[pp.dirname(self.name)]
@property
def ref(self):
""" An (opaque) HDF5 reference to this object """
register_thread()
return h5r.create(self.id, '.', h5r.OBJECT)
def __init__(self, oid):
""" Setup this object, given its low-level identifier """
register_thread()
self._id = oid
if not isinstance(self, File):
fid = h5i.get_file_id(oid)
self._file = File(None, bind=fid)
def __nonzero__(self):
register_thread()
return self.id.__nonzero__()
def __hash__(self):
register_thread()
return hash(self.id)
def __eq__(self, other):
register_thread()
if hasattr(other, 'id'):
return self.id == other.id
return False
def __ne__(self, other):
return not self.__eq__(other)
class _DictCompat(object):
"""
Contains dictionary-style compatibility methods for groups and
attributes.
"""
def keys(self):
""" Get a list containing member names """
with phil:
return list(self)
def iterkeys(self):
""" Get an iterator over member names """
with phil:
return iter(self)
def values(self):
""" Get a list containing member objects """
with phil:
return [self[x] for x in self]
def itervalues(self):
""" Get an iterator over member objects """
with phil:
for x in self:
yield self[x]
def items(self):
""" Get a list of tuples containing (name, object) pairs """
with phil:
return [(x, self[x]) for x in self]
def iteritems(self):
""" Get an iterator over (name, object) pairs """
with phil:
for x in self:
yield (x, self[x])
def get(self, name, default=None):
""" Retrieve the member, or return default if it doesn't exist """
with phil:
if name in self:
return self[name]
return default
# Compatibility methods
def listnames(self):
""" Deprecated alias for keys() """
warnings.warn("listnames() is deprecated; use keys() instead", DeprecationWarning)
return self.keys()
def iternames(self):
""" Deprecated alias for iterkeys() """
warnings.warn("iternames() is deprecated; use iterkeys() instead", DeprecationWarning)
return self.iterkeys()
def listobjects(self):
""" Deprecated alias for values() """
warnings.warn("listobjects() is deprecated; use values() instead", DeprecationWarning)
return self.values()
def iterobjects(self):
""" Deprecated alias for itervalues() """
warnings.warn("iterobjects() is deprecated; use itervalues() instead", DeprecationWarning)
return self.itervalues()
def listitems(self):
""" Deprecated alias for items() """
warnings.warn("listitems() is deprecated; use items() instead", DeprecationWarning)
return self.items()
class Group(HLObject, _DictCompat):
""" Represents an HDF5 group.
It's recommended to use the Group/File method create_group to create
these objects, rather than trying to create them yourself.
Groups implement a basic dictionary-style interface, supporting
__getitem__, __setitem__, __len__, __contains__, keys(), values()
and others.
They also contain the necessary methods for creating new groups and
datasets. Group attributes can be accessed via <group>.attrs.
"""
def __init__(self, parent_object, name, create=False, _rawid=None):
""" Create a new Group object, from a parent object and a name.
If "create" is False (default), try to open the given group,
raising an exception if it doesn't exist. If "create" is True,
create a new HDF5 group and link it into the parent group.
It's recommended to use __getitem__ or create_group() rather than
calling the constructor directly.
"""
register_thread()
with phil:
if _rawid is not None:
id = _rawid
elif create:
if config.API_18:
id = h5g.create(parent_object.id, name, lcpl=self._lcpl)
else:
id = h5g.create(parent_object.id, name)
else:
id = h5g.open(parent_object.id, name)
HLObject.__init__(self, id)
def _set18(self, name, obj):
""" HDF5 1.8 __setitem__. PHIL should already be held.
Distinct from 1.6 version in that it uses the proper link creation
and access property lists, which enable creation of intermediate
groups and proper handling of external links.
"""
plists = {'lcpl': self._lcpl, 'lapl': self._lapl}
if isinstance(obj, HLObject):
h5o.link(obj.id, self.id, name, **plists)
elif isinstance(obj, SoftLink):
self.id.links.create_soft(name, obj.path, **plists)
elif isinstance(obj, ExternalLink):
self.id.links.create_external(name, obj.filename, obj.path, **plists)
elif isinstance(obj, numpy.dtype):
htype = h5t.py_create(obj)
htype.commit(self.id, name, lcpl=self._lcpl)
else:
ds = self.create_dataset(None, data=obj, dtype=_guess_dtype(obj))
h5o.link(ds.id, self.id, name, **plists)
def _set16(self, name, obj):
""" HDF5 1.6 __setitem__. PHIL should already be held. """
if isinstance(obj, HLObject):
self.id.link(h5i.get_name(obj.id), name, link_type=h5g.LINK_HARD)
elif isinstance(obj, SoftLink):
self.id.link(obj.path, name, link_type=h5g.LINK_SOFT)
elif isinstance(obj, numpy.dtype):
htype = h5t.py_create(obj)
htype.commit(self.id, name)
else:
self.create_dataset(name, data=obj)
def __setitem__(self, name, obj):
""" Add an object to the group. The name must not already be in use.
The action taken depends on the type of object assigned:
Named HDF5 object (Dataset, Group, Datatype)
A hard link is created at "name" which points to the
given object.
SoftLink or ExternalLink
Create the corresponding link.
Numpy ndarray
The array is converted to a dataset object, with default
settings (contiguous storage, etc.).
Numpy dtype
Commit a copy of the datatype as a named datatype in the file.
Anything else
Attempt to convert it to an ndarray and store it. Scalar
values are stored as scalar datasets. Raise ValueError if we
can't understand the resulting array dtype.
"""
register_thread()
with phil:
if config.API_18:
self._set18(name, obj)
else:
self._set16(name, obj)
def _get18(self, name):
""" HDF5 1.8 __getitem__
Works with string names. Respects link access properties.
"""
objinfo = h5o.get_info(self.id, name, lapl=self._lapl)
cls = {h5o.TYPE_GROUP: Group, h5o.TYPE_DATASET: Dataset,
h5o.TYPE_NAMED_DATATYPE: Datatype}.get(objinfo.type)
if cls is None:
raise TypeError("Unknown object type")
oid = h5o.open(self.id, name, lapl=self._lapl)
return cls(self, None, _rawid=oid)
def _get16(self, name):
""" HDF5 1.6 __getitem__ """
objinfo = h5g.get_objinfo(self.id, name)
cls = {h5g.DATASET: Dataset, h5g.GROUP: Group,
h5g.TYPE: Datatype}.get(objinfo.type)
if cls is None:
raise TypeError("Unknown object type")
return cls(self, name)
def _getref(self, ref):
""" Dereference and open (1.6 and 1.8) """
cls = {h5g.DATASET: Dataset, h5g.GROUP: Group,
h5g.TYPE: Datatype}.get(h5r.get_obj_type(ref, self.id))
if cls is None:
raise ValueError("Unrecognized object type")
return cls(self, None, _rawid=h5r.dereference(ref, self.id))
def __getitem__(self, name):
""" Open an object attached to this group.
"""
register_thread()
with phil:
if isinstance(name, h5r.Reference):
return self._getref(name)
elif config.API_18:
return self._get18(name)
else:
return self._get16(name)
def __delitem__(self, name):
""" Delete (unlink) an item from this group. """
register_thread()
self.id.unlink(name)
def __len__(self):
""" Number of members attached to this group """
register_thread()
return self.id.get_num_objs()
def __contains__(self, name):
""" Test if a member name exists """
register_thread()
return name in self.id
def __iter__(self):
""" Iterate over member names """
register_thread()
return self.id.__iter__()
def create_group(self, name):
""" Create and return a subgroup. Fails if the group already exists.
"""
return Group(self, name, create=True)
def require_group(self, name):
""" Check if a group exists, and create it if not. TypeError if an
incompatible object exists.
"""
with phil:
if not name in self:
return self.create_group(name)
grp = self[name]
if not isinstance(grp, Group):
raise TypeError("Incompatible object (%s) already exists" % grp.__class__.__name__)
return grp
def create_dataset(self, name, *args, **kwds):
""" Create and return a new dataset. Fails if "name" already exists.
create_dataset(name, shape, [dtype=<Numpy dtype>], **kwds)
create_dataset(name, data=<Numpy array>, **kwds)
The default dtype is '=f4' (single-precision float).
Additional keywords ("*" is default):
chunks
Tuple of chunk dimensions or None*
maxshape
None* or a tuple giving maximum dataset size. An element of None
indicates an unlimited dimension. Dataset can be expanded by
calling resize()
compression
Compression strategy; None*, 'gzip', 'szip' or 'lzf'. An integer
is interpreted as a gzip level.
compression_opts
Optional compression settings; for gzip, this may be an int. For
szip, it should be a 2-tuple ('ec'|'nn', int(0-32)).
shuffle
Use the shuffle filter (increases compression performance for
gzip and LZF). True/False*.
fletcher32
Enable error-detection. True/False*.
"""
return Dataset(self, name, *args, **kwds)
def require_dataset(self, name, shape, dtype, exact=False, **kwds):
"""Open a dataset, or create it if it doesn't exist.
Checks if a dataset with compatible shape and dtype exists, and
creates one if it doesn't. Raises TypeError if an incompatible
dataset (or group) already exists.
By default, datatypes are compared for loss-of-precision only.
To require an exact match, set keyword "exact" to True. Shapes
are always compared exactly.
Keyword arguments are only used when creating a new dataset; they
are ignored if an dataset with matching shape and dtype already
exists. See create_dataset for a list of legal keywords.
"""
dtype = numpy.dtype(dtype)
with phil:
if not name in self:
return self.create_dataset(name, *(shape, dtype), **kwds)
dset = self[name]
if not isinstance(dset, Dataset):
raise TypeError("Incompatible object (%s) already exists" % dset.__class__.__name__)
if not shape == dset.shape:
raise TypeError("Shapes do not match (existing %s vs new %s)" % (dset.shape, shape))
if exact:
if not dtype == dset.dtype:
raise TypeError("Datatypes do not exactly match (existing %s vs new %s)" % (dset.dtype, dtype))
elif not numpy.can_cast(dtype, dset.dtype):
raise TypeError("Datatypes cannot be safely cast (existing %s vs new %s)" % (dset.dtype, dtype))
return dset
def get(self, name, default=None, getclass=False, getlink=False):
""" Retrieve item "name", or "default" if it's not in this group.
getclass
If True, returns the class of object (Group, Dataset, etc.)
instead of the object itself.
getlink
If True, return SoftLink and ExternalLink instances instead
of the objects they point to.
"""
register_thread()
with phil:
if not name in self:
return default
if config.API_18:
linkinfo = self.id.links.get_info(name)
if linkinfo.type == h5l.TYPE_HARD or not getlink:
objinfo = h5o.get_info(self.id, name)
cls = {h5o.TYPE_GROUP: Group, h5o.TYPE_DATASET: Dataset,
h5o.TYPE_NAMED_DATATYPE: Datatype}.get(objinfo.type)
if cls is None:
raise TypeError("Unknown object type")
return cls if getclass else cls(self, name)
else:
if linkinfo.type == h5l.TYPE_SOFT:
return SoftLink if getclass else SoftLink(self.id.links.get_val(name))
elif linkinfo.type == h5l.TYPE_EXTERNAL:
return ExternalLink if getclass else ExternalLink(*self.id.links.get_val(name))
raise TypeError("Unknown link class")
# API 1.6
info = h5g.get_objinfo(self.id, name, follow_link=(not getlink))
cls = {h5g.DATASET: Dataset, h5g.GROUP: Group,
h5g.TYPE: Datatype}.get(info.type)
if cls is not None:
return cls if getclass else cls(self, name)
if getlink and info.type == h5g.LINK:
return SoftLink if getclass else SoftLink(self.id.get_linkval(name))
raise TypeError("Unknown object type")
# New 1.8.X methods
def copy(self, source, dest, name=None):
""" Copy an object or group (Requires HDF5 1.8).
The source can be a path, Group, Dataset, or Datatype object. The
destination can be either a path or a Group object. The source and
destinations need not be in the same file.
If the source is a Group object, all objects contained in that group
will be copied recursively.
When the destination is a Group object, by default the target will
be created in that group with its current name (basename of obj.name).
You can override that by setting "name" to a string.
Example:
>>> f = File('myfile.hdf5')
>>> f.listnames()
['MyGroup']
>>> f.copy('MyGroup', 'MyCopy')
>>> f.listnames()
['MyGroup', 'MyCopy']
"""
register_thread()
if not config.API_18:
raise NotImplementedError("This feature is only available with HDF5 1.8.0 and later")
with phil:
if isinstance(source, HLObject):
source_path = '.'
else:
# Interpret source as a path relative to this group
source_path = source
source = self
if isinstance(dest, Group):
if name is not None:
dest_path = name
else:
dest_path = pp.basename(h5i.get_name(source[source_path].id))
elif isinstance(dest, HLObject):
raise TypeError("Destination must be path or Group object")
else:
# Interpret destination as a path relative to this group
dest_path = dest
dest = self
h5o.copy(source.id, source_path, dest.id, dest_path)
def visit(self, func):
""" Recursively visit all names in this group and subgroups (HDF5 1.8).
You supply a callable (function, method or callable object); it
will be called exactly once for each link in this group and every
group below it. Your callable must conform to the signature:
func(<member name>) => <None or return value>
Returning None continues iteration, returning anything else stops
and immediately returns that value from the visit method. No
particular order of iteration within groups is guranteed.
Example:
>>> # List the entire contents of the file
>>> f = File("foo.hdf5")
>>> list_of_names = []
>>> f.visit(list_of_names.append)
"""
register_thread()
if not config.API_18:
raise NotImplementedError("This feature is only available with HDF5 1.8.0 and later")
with phil:
return h5o.visit(self.id, func)
def visititems(self, func):
""" Recursively visit names and objects in this group (HDF5 1.8).
You supply a callable (function, method or callable object); it
will be called exactly once for each link in this group and every
group below it. Your callable must conform to the signature:
func(<member name>, <object>) => <None or return value>
Returning None continues iteration, returning anything else stops
and immediately returns that value from the visit method. No
particular order of iteration within groups is guranteed.
Example:
# Get a list of all datasets in the file
>>> mylist = []
>>> def func(name, obj):
... if isinstance(obj, Dataset):
... mylist.append(name)
...
>>> f = File('foo.hdf5')
>>> f.visititems(func)
"""
register_thread()
if not config.API_18:
raise NotImplementedError("This feature is only available with HDF5 1.8.0 and later")
with phil:
def call_proxy(name):
return func(name, self[name])
return h5o.visit(self.id, call_proxy)
def __repr__(self):
if not self:
return "<Closed HDF5 group>"
namestr = '"%s"' % self.name if self.name is not None else "(anonymous)"
return '<HDF5 group %s (%d members)>' % \
(namestr, len(self))
class File(Group):
""" Represents an HDF5 file on disk.
File(name, mode=None, driver=None, **driver_kwds)
Legal modes: r, r+, w, w-, a (default)
File objects inherit from Group objects; Group-like methods all
operate on the HDF5 root group ('/'). Like Python file objects, you
must close the file ("obj.close()") when you're done with it. File
objects may also be used as context managers in Python "with" blocks.
The HDF5 file driver may also be specified:
None
Use the standard HDF5 driver appropriate for the current platform.
On UNIX, this is the H5FD_SEC2 driver; on Windows, it is
H5FD_WINDOWS.
'sec2'
Unbuffered, optimized I/O using standard POSIX functions.
'stdio'
Buffered I/O using functions from stdio.h.
'core'
Memory-map the entire file; all operations are performed in
memory and written back out when the file is closed. Keywords:
backing_store: If True (default), save changes to a real file
when closing. If False, the file exists purely
in memory and is discarded when closed.
block_size: Increment (in bytes) by which memory is extended.
Default is 64k.
'family'
Store the file on disk as a series of fixed-length chunks. Useful
if the file system doesn't allow large files. Note: the filename
you provide *must* contain a printf-style integer format code
(e.g. %d"), which will be replaced by the file sequence number.
Keywords:
memb_size: Maximum file size (default is 2**31-1).
"""
_modes = weakref.WeakKeyDictionary()
@property
def filename(self):
"""File name on disk"""
register_thread()
name = h5f.get_name(self.fid)
# Note the exception can happen in one of two ways:
# 1. The name doesn't comply with the file system encoding;
# return the raw byte string
# 2. The name can't be encoded down to ASCII; return it as
# a Unicode string object
try:
name = name.decode(sys.getfilesystemencoding())
return name.encode('ascii')
except (UnicodeError, LookupError):
return name
@property
def file(self):
return self
@property
def mode(self):
"""Python mode used to open file"""
register_thread()
mode = self._modes.get(self)
if mode is None and config.API_18:
mode = {h5f.ACC_RDONLY: 'r', h5f.ACC_RDWR: 'r+'}.get(self.fid.get_intent())
return mode
@property
def driver(self):
"""Low-level HDF5 file driver used to open file"""
register_thread()
drivers = {h5fd.SEC2: 'sec2', h5fd.STDIO: 'stdio',
h5fd.CORE: 'core', h5fd.FAMILY: 'family',
h5fd.WINDOWS: 'windows'}
return drivers.get(self.fid.get_access_plist().get_driver(), 'unknown')
# --- Public interface (File) ---------------------------------------------
def __init__(self, name, mode=None, driver=None, **kwds):
""" Create a new file object.
Valid modes (like Python's file() modes) are:
- r Readonly, file must exist
- r+ Read/write, file must exist
- w Create file, truncate if exists
- w- Create file, fail if exists
- a Read/write if exists, create otherwise (default)
Valid drivers are:
- None Use default driver ('sec2' on UNIX, 'windows' on Win32)
- 'sec2' Standard UNIX driver
- 'stdio' Stdio (buffered) driver
- 'core' mmap driver
- 'family' Multi-part file driver
"""
register_thread()
if "bind" in kwds:
self.fid = kwds["bind"]
else:
if driver == 'core' and mode=='w-' and version.hdf5_version_tuple[0:2] == (1,6):
raise NotImplementedError("w- flag does not work on 1.6 for CORE driver")
try:
# If the byte string doesn't match the default encoding, just
# pass it on as-is. Note Unicode objects can always be encoded.
name = name.encode(sys.getfilesystemencoding())
except (UnicodeError, LookupError):
pass
plist = self._generate_access_plist(driver, **kwds)
self.fid = self._generate_fid(name, mode, plist)
self._modes[self] = mode
if config.API_18:
gid = h5o.open(self.fid, '/', lapl=self._lapl)
else:
gid = h5g.open(self.fid, '/')
Group.__init__(self, None, None, _rawid=gid)
def _generate_access_plist(self, driver, **kwds):
""" Set up file access property list """
plist = h5p.create(h5p.FILE_ACCESS)
plist.set_fclose_degree(h5f.CLOSE_STRONG)
if driver is None or (driver=='windows' and sys.platform=='win32'):
return plist
if(driver=='sec2'):
plist.set_fapl_sec2(**kwds)
elif(driver=='stdio'):
plist.set_fapl_stdio(**kwds)
elif(driver=='core'):
plist.set_fapl_core(**kwds)
elif(driver=='family'):
plist.set_fapl_family(memb_fapl=plist.copy(), **kwds)
else:
raise ValueError('Unknown driver type "%s"' % driver)
return plist
def _generate_fid(self, name, mode, plist):
""" Get a new FileID by opening or creating a file.
Also validates mode argument."""
if mode == 'r':
fid = h5f.open(name, h5f.ACC_RDONLY, fapl=plist)
elif mode == 'r+':
fid = h5f.open(name, h5f.ACC_RDWR, fapl=plist)
elif mode == 'w-':
fid = h5f.create(name, h5f.ACC_EXCL, fapl=plist)
elif mode == 'w':
fid = h5f.create(name, h5f.ACC_TRUNC, fapl=plist)
elif mode == 'a' or mode is None:
try:
fid = h5f.open(name, h5f.ACC_RDWR, fapl=plist)
except IOError:
fid = h5f.create(name, h5f.ACC_EXCL, fapl=plist)
else:
raise ValueError("Invalid mode; must be one of r, r+, w, w-, a")
return fid
def close(self):
""" Close this HDF5 file. All open objects will be invalidated.
"""
register_thread()
with phil:
while self.fid:
self.fid.close()
def flush(self):
""" Tell the HDF5 library to flush its buffers.
"""
register_thread()
h5f.flush(self.fid)
def __enter__(self):
return self
def __exit__(self,*args):
register_thread()
with phil:
if self.id._valid:
self.close()
def __repr__(self):
register_thread()
if not self:
return "<Closed HDF5 file>"
return '<HDF5 file "%s" (mode %s, %s)>' % \
(os.path.basename(self.filename), self.mode,
_extras.sizestring(self.fid.get_filesize()))
def __hash__(self):
register_thread()
return hash(self.fid)
def __eq__(self, other):
# Python requires that objects which compare equal hash the same.
# Therefore comparison to generic Group objects is impossible
register_thread()
if hasattr(other, 'fid'):
return self.fid == other.fid
return False
class _RegionProxy(object):
def __init__(self, dset):
self.id = dset.id
def __getitem__(self, args):
register_thread()
selection = sel.select(self.id.shape, args, dsid=self.id)
return h5r.create(self.id, '.', h5r.DATASET_REGION, selection._id)
class Dataset(HLObject):
""" High-level interface to an HDF5 dataset.
Datasets can be opened via the syntax Group[<dataset name>], and
created with the method Group.create_dataset().
Datasets behave superficially like Numpy arrays. NumPy "simple"
slicing is fully supported, along with a subset of fancy indexing
and indexing by field names (dataset[0:10, "fieldname"]).
The standard NumPy properties "shape" and "dtype" are also available.
"""
# Internal properties
def _g_shape(self):
"""Numpy-style shape tuple giving dataset dimensions"""
register_thread()
return self.id.shape
def _s_shape(self, shape):
self.resize(shape)
shape = property(_g_shape, _s_shape)
@property
def dtype(self):
"""Numpy dtype representing the datatype"""
register_thread()
return self.id.dtype
@property
def value(self):
""" Deprecated alias for dataset[...] and dataset[()] """
with phil:
arr = self[...]
#if arr.shape == ():
# return numpy.asscalar(arr)
return arr
@property
def _dcpl(self):
return self.id.get_create_plist()
@property
def _filters(self):
return filters.get_filters(self._dcpl)
@property
def chunks(self):
"""Dataset chunks (or None)"""
register_thread()
dcpl = self._dcpl
if dcpl.get_layout() == h5d.CHUNKED:
return dcpl.get_chunk()
return None
@property
def compression(self):
"""Compression strategy (or None)"""
for x in ('gzip','lzf','szip'):
if x in self._filters:
return x
return None
@property
def compression_opts(self):
""" Compression setting. Int(0-9) for gzip, 2-tuple for szip. """
return self._filters.get(self.compression, None)
@property
def shuffle(self):
"""Shuffle filter present (T/F)"""
return 'shuffle' in self._filters
@property
def fletcher32(self):
"""Fletcher32 filter is present (T/F)"""
return 'fletcher32' in self._filters
@property
def maxshape(self):
register_thread()
with phil:
space = self.id.get_space()
dims = space.get_simple_extent_dims(True)
return tuple(x if x != h5s.UNLIMITED else None for x in dims)
@property
def regionref(self):
return _RegionProxy(self)
def __init__(self, group, name,
shape=None, dtype=None, data=None,
chunks=None, compression=None, shuffle=None,
fletcher32=None, maxshape=None, compression_opts=None,
_rawid = None):
""" Open or create a new dataset in the file.
It's recommended you use the Group methods (open via Group["name"],
create via Group.create_dataset), rather than calling the constructor.
There are two modes of operation for this constructor:
1. Open an existing dataset:
Dataset(group, name)
2. Create a dataset:
Dataset(group, name, shape, [dtype=<Numpy dtype>], **kwds)
or
Dataset(group, name, data=<Numpy array>, **kwds)
If "dtype" is not specified, the default is single-precision
floating point, with native byte order ("=f4").
Creating a dataset will fail if another of the same name already
exists. Also, chunks/compression/shuffle/fletcher32 may only be
specified when creating a dataset.
Creation keywords (* is default):
chunks: Tuple of chunk dimensions, True, or None*
compression: "gzip", "lzf", or "szip" (if available)
shuffle: Use the shuffle filter? (requires compression) T/F*
fletcher32: Enable Fletcher32 error detection? T/F*
maxshape: Tuple giving dataset maximum dimensions or None*.
You can grow each axis up to this limit using
resize(). For each unlimited axis, provide None.
compress_opts: Optional setting for the compression filter
All these options require chunking. If a chunk tuple is not
provided, the constructor will guess an appropriate chunk shape.
Please note none of these are allowed for scalar datasets.
"""
register_thread()
with phil:
if _rawid is not None:
id = _rawid
elif data is None and shape is None:
if any((dtype,chunks,compression,shuffle,fletcher32)):
raise ValueError('You cannot specify keywords when opening a dataset.')
id = h5d.open(group.id, name)
else:
# Convert data to a C-contiguous ndarray
if data is not None:
data = numpy.asarray(data, order="C")
# Validate shape
if shape is None:
if data is None:
raise TypeError("Either data or shape must be specified")
shape = data.shape
else:
shape = tuple(shape)
if data is not None and (numpy.product(shape) != numpy.product(data.shape)):
raise ValueError("Shape tuple is incompatible with data")
# Validate dtype
if dtype is None and data is None:
dtype = numpy.dtype("=f4")
elif dtype is None and data is not None:
dtype = data.dtype
else:
dtype = numpy.dtype(dtype)
# Legacy
if any((compression, shuffle, fletcher32, maxshape)):
if chunks is False:
raise ValueError("Chunked format required for given storage options")
# Legacy
if compression in range(10) or compression is True:
if compression_opts is None:
if compression is True:
compression_opts = 4
else:
compression_opts = compression
else:
raise TypeError("Conflict in compression options")
compression = 'gzip'
# Generate the dataset creation property list
# This also validates the keyword arguments
plist = filters.generate_dcpl(shape, dtype, chunks, compression,
compression_opts, shuffle, fletcher32, maxshape)
if maxshape is not None:
maxshape = tuple(x if x is not None else h5s.UNLIMITED for x in maxshape)
space_id = h5s.create_simple(shape, maxshape)
type_id = h5t.py_create(dtype, logical=True)
if config.API_18:
id = h5d.create(group.id, name, type_id, space_id, dcpl=plist, lcpl=self._lcpl)
else:
id = h5d.create(group.id, name, type_id, space_id, dcpl=plist)
if data is not None:
id.write(h5s.ALL, h5s.ALL, data)
HLObject.__init__(self, id)
def resize(self, size, axis=None):
""" Resize the dataset, or the specified axis (HDF5 1.8 only).
The dataset must be stored in chunked format; it can be resized up to
the "maximum shape" (keyword maxshape) specified at creation time.
The rank of the dataset cannot be changed.
"Size" should be a shape tuple, or if an axis is specified, an integer.
BEWARE: This functions differently than the NumPy resize() method!
The data is not "reshuffled" to fit in the new shape; each axis is
grown or shrunk independently. The coordinates of existing data is
fixed.
"""
register_thread()
with phil:
if not config.API_18:
raise NotImplementedError("Resizing is only available with HDF5 1.8.")
if self.chunks is None:
raise TypeError("Only chunked datasets can be resized")
if axis is not None:
if not axis >=0 and axis < self.id.rank:
raise ValueError("Invalid axis (0 to %s allowed)" % self.id.rank-1)
try:
newlen = int(size)
except TypeError:
raise TypeError("Argument must be a single int if axis is specified")
size = list(self.shape)
size[axis] = newlen
size = tuple(size)
self.id.set_extent(size)
h5f.flush(self.id) # THG recommends
def __len__(self):
""" The size of the first axis. TypeError if scalar.
Limited to 2**32 on 32-bit systems; Dataset.len() is preferred.
"""
size = self.len()
if size > sys.maxint:
raise OverflowError("Value too big for Python's __len__; use Dataset.len() instead.")
return size
def len(self):
""" The size of the first axis. TypeError if scalar.
Use of this method is preferred to len(dset), as Python's built-in
len() cannot handle values greater then 2**32 on 32-bit systems.
"""
shape = self.shape
if len(shape) == 0:
raise TypeError("Attempt to take len() of scalar dataset")
return shape[0]
def __iter__(self):
""" Iterate over the first axis. TypeError if scalar.
BEWARE: Modifications to the yielded data are *NOT* written to file.
"""
shape = self.shape
if len(shape) == 0:
raise TypeError("Can't iterate over a scalar dataset")
for i in xrange(shape[0]):
yield self[i]
def __getitem__(self, args):
""" Read a slice from the HDF5 dataset.
Takes slices and recarray-style field names (more than one is
allowed!) in any order. Obeys basic NumPy rules, including
broadcasting.
Also supports:
* Boolean "mask" array indexing
* Advanced dataspace selection via the "selections" module
"""
register_thread()
with phil:
args = args if isinstance(args, tuple) else (args,)
# Sort field indices from the rest of the args.
names = tuple(x for x in args if isinstance(x, str))
args = tuple(x for x in args if not isinstance(x, str))
# Create NumPy datatype for read, using only the named fields
# as specified by the user.
basetype = self.id.dtype
if len(names) == 0:
new_dtype = basetype
elif basetype.names is None:
raise ValueError("Field names only allowed for compound types")
else:
for name in names:
if not name in basetype.names:
raise ValueError("Field %s does not appear in this type." % name)
new_dtype = numpy.dtype([(name, basetype.fields[name][0]) for name in names])
# Perform the dataspace selection.
selection = sel.select(self.shape, args, dsid=self.id)
if selection.nselect == 0:
return numpy.ndarray((0,), dtype=new_dtype)
# Create the output array using information from the selection.
arr = numpy.ndarray(selection.mshape, new_dtype, order='C')
# This is necessary because in the case of array types, NumPy
# discards the array information at the top level.
mtype = h5t.py_create(new_dtype)
# HDF5 has a bug where if the memory shape has a different rank
# than the dataset, the read is very slow
mshape = selection.mshape
if len(mshape) < len(self.shape):
# pad with ones
mshape = (1,)*(len(self.shape)-len(mshape)) + mshape
# Perfom the actual read
mspace = h5s.create_simple(mshape)
fspace = selection._id
self.id.read(mspace, fspace, arr, mtype)
# Patch up the output for NumPy
if len(names) == 1:
arr = arr[names[0]] # Single-field recarray convention
if arr.shape == ():
arr = numpy.asscalar(arr)
return arr
def __setitem__(self, args, val):
""" Write to the HDF5 dataset from a Numpy array.
NumPy's broadcasting rules are honored, for "simple" indexing
(slices and integers). For advanced indexing, the shapes must
match.
Classes from the "selections" module may also be used to index.
"""
register_thread()
with phil:
args = args if isinstance(args, tuple) else (args,)
# Sort field indices from the slicing
names = tuple(x for x in args if isinstance(x, str))
args = tuple(x for x in args if not isinstance(x, str))
if len(names) != 0:
raise TypeError("Field name selections are not allowed for write.")
# Generally we try to avoid converting the arrays on the Python
# side. However, for compound literals this is unavoidable.
if self.dtype.kind == 'V' and \
(not isinstance(val, numpy.ndarray) or val.dtype.kind != 'V'):
val = numpy.asarray(val, dtype=self.dtype, order='C')
else:
val = numpy.asarray(val, order='C')
# Check for array dtype compatibility and convert
if self.dtype.subdtype is not None:
shp = self.dtype.subdtype[1]
if val.shape[-len(shp):] != shp:
raise TypeError("Can't broadcast to array dimension %s" % (shp,))
mtype = h5t.py_create(numpy.dtype((val.dtype, shp)))
mshape = val.shape[0:len(val.shape)-len(shp)]
else:
mshape = val.shape
mtype = None
# Perform the dataspace selection
selection = sel.select(self.shape, args, dsid=self.id)
if selection.nselect == 0:
return
# Broadcast scalars if necessary.
if (mshape == () and selection.mshape != ()):
if self.dtype.subdtype is not None:
raise NotImplementedError("Scalar broadcasting is not supported for array dtypes")
val2 = numpy.empty(selection.mshape[-1], dtype=val.dtype)
val2[...] = val
val = val2
mshape = val.shape
# Perform the write, with broadcasting
# Be careful to pad memory shape with ones to avoid HDF5 chunking
# glitch, which kicks in for mismatched memory/file selections
if(len(mshape) < len(self.shape)):
mshape_pad = (1,)*(len(self.shape)-len(mshape)) + mshape
else:
mshape_pad = mshape
mspace = h5s.create_simple(mshape_pad, (h5s.UNLIMITED,)*len(mshape_pad))
for fspace in selection.broadcast(mshape):
self.id.write(mspace, fspace, val, mtype)
def read_direct(self, dest, source_sel=None, dest_sel=None):
""" Read data directly from HDF5 into an existing NumPy array.
The destination array must be C-contiguous and writable.
Selections may be any operator class (HyperSelection, etc) in
h5py.selections, or the output of numpy.s_[<args>].
Broadcasting is supported for simple indexing.
"""
register_thread()
if source_sel is None:
source_sel = sel.SimpleSelection(self.shape)
else:
source_sel = sel.select(self.shape, source_sel, self.id) # for numpy.s_
fspace = source_sel._id
if dest_sel is None:
dest_sel = sel.SimpleSelection(dest.shape)
else:
dest_sel = sel.select(dest.shape, dest_sel, self.id)
for mspace in dest_sel.broadcast(source_sel.mshape):
self.id.read(mspace, fspace, dest)
def write_direct(self, source, source_sel=None, dest_sel=None):
""" Write data directly to HDF5 from a NumPy array.
The source array must be C-contiguous. Selections may be any
operator class (HyperSelection, etc) in h5py.selections, or
the output of numpy.s_[<args>].
Broadcasting is supported for simple indexing.
"""
register_thread()
if source_sel is None:
source_sel = sel.SimpleSelection(source.shape)
else:
source_sel = sel.select(source.shape, source_sel, self.id) # for numpy.s_
mspace = source_sel._id
if dest_sel is None:
dest_sel = sel.SimpleSelection(self.shape)
else:
dest_sel = sel.select(self.shape, dest_sel, self.id)
for fspace in dest_sel.broadcast(source_sel.mshape):
self.id.write(mspace, fspace, source)
def __array__(self, dtype=None):
with phil:
arr = numpy.empty(self.shape, dtype=self.dtype if dtype is None else dtype)
self.read_direct(arr)
return arr
def __repr__(self):
if not self:
return "<Closed HDF5 dataset>"
namestr = '"%s"' % _extras.basename(self.name) if self.name is not None else "(anonymous)"
return '<HDF5 dataset %s: shape %s, type "%s">' % \
(namestr, self.shape, self.dtype.str)
class AttributeManager(_DictCompat):
"""
Allows dictionary-style access to an HDF5 object's attributes.
These are created exclusively by the library and are available as
a Python attribute at <object>.attrs
Like the members of groups, attributes provide a minimal dictionary-
style interface. Anything which can be reasonably converted to a
Numpy array or Numpy scalar can be stored.
Attributes are automatically created on assignment with the
syntax <obj>.attrs[name] = value, with the HDF5 type automatically
deduced from the value. Existing attributes are overwritten.
To modify an existing attribute while preserving its type, use the
method modify(). To specify an attribute of a particular type and
shape (or to create an empty attribute), use create().
"""
def __init__(self, parent):
""" Private constructor.
"""
self._id = parent.id
def __getitem__(self, name):
""" Read the value of an attribute.
"""
register_thread()
with phil:
attr = h5a.open(self._id, name)
arr = numpy.ndarray(attr.shape, dtype=attr.dtype, order='C')
attr.read(arr)
if len(arr.shape) == 0:
return numpy.asscalar(arr)
return arr
def __setitem__(self, name, value):
""" Set a new attribute, overwriting any existing attribute.
The type and shape of the attribute are determined from the data. To
use a specific type or shape, or to preserve the type of an attribute,
use the methods create() and modify().
Broadcasting isn't supported for attributes.
"""
with phil:
self.create(name, data=value, dtype=_guess_dtype(value))
def __delitem__(self, name):
""" Delete an attribute (which must already exist). """
register_thread()
h5a.delete(self._id, name)
def create(self, name, data, shape=None, dtype=None):
""" Create a new attribute, overwriting any existing attribute.
name
Name of the new attribute (required)
data
An array to initialize the attribute (required)
shape
Shape of the attribute. Overrides data.shape if both are
given. The total number of points must be unchanged.
dtype
Data type of the attribute. Overrides data.dtype if both
are given. Must be conversion-compatible with data.dtype.
"""
register_thread()
with phil:
if data is not None:
data = numpy.asarray(data, order='C', dtype=dtype)
if shape is None:
shape = data.shape
elif numpy.product(shape) != numpy.product(data.shape):
raise ValueError("Shape of new attribute conflicts with shape of data")
if dtype is None:
dtype = data.dtype
if dtype is None:
dtype = numpy.dtype('f')
if shape is None:
raise ValueError('At least one of "shape" or "data" must be given')
data = data.reshape(shape)
space = h5s.create_simple(shape)
htype = h5t.py_create(dtype, logical=True)
if name in self:
h5a.delete(self._id, name)
attr = h5a.create(self._id, name, htype, space)
if data is not None:
attr.write(data)
def modify(self, name, value):
""" Change the value of an attribute while preserving its type.
Differs from __setitem__ in that the type of an existing attribute
is preserved. Useful for interacting with externally generated files.
If the attribute doesn't exist, it will be automatically created.
"""
register_thread()
with phil:
if not name in self:
self[name] = value
else:
value = numpy.asarray(value, order='C')
attr = h5a.open(self._id, name)
# Allow the case of () <-> (1,)
if (value.shape != attr.shape) and not \
(numpy.product(value.shape)==1 and numpy.product(attr.shape)==1):
raise TypeError("Shape of data is incompatible with existing attribute")
attr.write(value)
def __len__(self):
""" Number of attributes attached to the object. """
# I expect we will not have more than 2**32 attributes
register_thread()
return h5a.get_num_attrs(self._id)
def __iter__(self):
""" Iterate over the names of attributes. """
register_thread()
with phil:
attrlist = []
def iter_cb(name, *args):
attrlist.append(name)
h5a.iterate(self._id, iter_cb)
for name in attrlist:
yield name
def __contains__(self, name):
""" Determine if an attribute exists, by name. """
register_thread()
return h5a.exists(self._id, name)
def __repr__(self):
if not self._id:
return "<Attributes of closed HDF5 object>"
return "<Attributes of HDF5 object at %s>" % id(self._id)
class Datatype(HLObject):
"""
Represents an HDF5 named datatype stored in a file.
To store a datatype, simply assign it to a name in a group:
>>> MyGroup["name"] = numpy.dtype("f")
>>> named_type = MyGroup["name"]
>>> assert named_type.dtype == numpy.dtype("f")
"""
@property
def dtype(self):
"""Numpy dtype equivalent for this datatype"""
register_thread()
return self.id.dtype
def __init__(self, grp, name, _rawid=None):
""" Private constructor.
"""
register_thread()
with phil:
id = _rawid if _rawid is not None else h5t.open(grp.id, name)
HLObject.__init__(self, id)
def __repr__(self):
if not self.id:
return "<Closed HDF5 named type>"
namestr = '"%s"' % _extras.basename(self.name) if self.name is not None else "(anonymous)"
return '<HDF5 named type %s (dtype %s)>' % \
(namestr, self.dtype.str)
class SoftLink(object):
"""
Represents a symbolic ("soft") link in an HDF5 file. The path
may be absolute or relative. No checking is performed to ensure
that the target actually exists.
"""
@property
def path(self):
return self._path
def __init__(self, path):
self._path = str(path)
def __repr__(self):
return '<SoftLink to "%s">' % self.path
class ExternalLink(object):
"""
Represents an HDF5 external link. Paths may be absolute or relative.
No checking is performed to ensure either the target or file exists.
"""
@property
def path(self):
return self._path
@property
def filename(self):
return self._filename
def __init__(self, filename, path):
if not config.API_18:
raise NotImplementedError("External links are only available as of HDF5 1.8")
self._filename = str(filename)
self._path = str(path)
def __repr__(self):
return '<ExternalLink to "%s" in file "%s"' % (self.path, self.filename)
|
qsnake/h5py
|
h5py/highlevel.py
|
Python
|
bsd-3-clause
| 56,170
|
[
"VisIt"
] |
c9b97e1c6921fb3063710570386492b4add31678832bf722465dad82d4c67225
|
import tempfile
import numpy as np
from pygmin.systems import AtomicCluster
from pygmin.potentials.ljpshiftfast import LJpshift
from pygmin.utils.xyz import write_xyz
from pygmin.mindist import CoMToOrigin
__all__ = ["BLJCluster"]
class BLJCluster(AtomicCluster):
"""
define the System class for a binary Lennard-Jones cluster
Parameters
----------
natoms : int
ntypeA : int
number of type-A, big particles
**kwargs : other keyword parameters
these are passed on to the potential
See Also
--------
BaseSystem, AtomicCluster
"""
def __init__(self, natoms, ntypeA="default", **potential_kwargs):
super(BLJCluster, self).__init__()
self.natoms = natoms
if ntypeA == "default":
self.ntypeA = int(self.natoms * 0.8)
else:
self.ntypeA = ntypeA
self.potential_kwargs = potential_kwargs
self.params["database"]["accuracy"] = 1e-3
def get_potential(self):
return LJpshift(self.natoms, self.ntypeA, **self.potential_kwargs)
def get_permlist(self):
return [range(self.ntypeA), range(self.ntypeA, self.natoms)]
#
# stuff for the gui below here
#
def draw(self, coordslinear, index):
"""
tell the gui how to represent your system using openGL objects
Parameters
----------
coords : array
index : int
we can have more than one molecule on the screen at one time. index tells
which one to draw. They are viewed at the same time, so they should be
visually distinct, e.g. different colors. accepted values are 1 or 2
"""
# index = 1 or 2
from OpenGL import GL,GLUT
coords = coordslinear.reshape(coordslinear.size/3, 3)
com=np.mean(coords, axis=0)
size = 0.5
if index == 1:
color = [0.65, 0.0, 0.0, 1.]
else:
color = [0.00, 0.65, 0., 1.]
GL.glMaterialfv(GL.GL_FRONT_AND_BACK, GL.GL_DIFFUSE, color)
for i,xx in enumerate(coords):
if i == self.ntypeA:
size *= 0.88 #this should be dependent on lj parameters
if index == 1:
color = [0.25, 0.00, 0., 1.]
else:
color = [0.00, 0.25, 0., 1.]
GL.glMaterialfv(GL.GL_FRONT_AND_BACK, GL.GL_DIFFUSE, color)
x=xx-com
GL.glPushMatrix()
GL.glTranslate(x[0],x[1],x[2])
GLUT.glutSolidSphere(size,30,30)
GL.glPopMatrix()
def load_coords_pymol(self, coordslist, oname, index=1):
"""load the coords into pymol
the new object must be named oname so we can manipulate it later
Parameters
----------
coordslist : list of arrays
oname : str
the new pymol object must be named oname so it can be manipulated
later
index : int
we can have more than one molecule on the screen at one time. index tells
which one to draw. They are viewed at the same time, so should be
visually distinct, e.g. different colors. accepted values are 1 or 2
Notes
-----
the implementation here is a bit hacky. we create a temporary xyz file from coords
and load the molecule in pymol from this file.
"""
#pymol is imported here so you can do, e.g. basinhopping without installing pymol
import pymol
#create the temporary file
suffix = ".xyz"
f = tempfile.NamedTemporaryFile(mode="w", suffix=suffix)
fname = f.name
#write the coords into the xyz file
from pygmin.mindist import CoMToOrigin
labels = ["LA" for i in range(self.ntypeA)] + \
["LB" for i in range(self.natoms - self.ntypeA)]
for coords in coordslist:
coords = CoMToOrigin(coords.copy())
write_xyz(f, coords, title=oname, atomtypes=labels)
f.flush()
# self.f = f # so the file is not deleted
# print fname
#load the molecule from the temporary file
pymol.cmd.load(fname)
#get name of the object just create and change it to oname
objects = pymol.cmd.get_object_list()
objectname = objects[-1]
pymol.cmd.set_name(objectname, oname)
#set the representation
pymol.cmd.hide("everything", oname)
pymol.cmd.show("spheres", oname)
#make the B atoms smaller
seleA = "%s and name LA" % (oname)
seleB = "%s and name LB" % (oname)
pymol.cmd.set("sphere_scale", value=1.0, selection=seleA)
pymol.cmd.set("sphere_scale", value=0.8, selection=seleB)
#set the color according to index
if index == 1:
pymol.cmd.color("red", selection=seleA)
pymol.cmd.color("firebrick", selection=seleB)
else:
pymol.cmd.color("deepolive", selection=seleA)
pymol.cmd.color("smudge", selection=seleB)
#
#only for testing below here
#
def run():
#create the system object
sys = BLJCluster(15)
#create a database
db = sys.create_database()
#do a short basinhopping run
bh = sys.get_basinhopping(database=db, outstream=None)
while len(db.minima()) < 2:
bh.run(100)
#try to connect the lowest two minima
min1, min2 = db.minima()[:2]
connect = sys.get_double_ended_connect(min1, min2, db)
connect.connect()
if __name__ == "__main__":
run()
|
js850/PyGMIN
|
pygmin/systems/bljcluster.py
|
Python
|
gpl-3.0
| 5,755
|
[
"PyMOL"
] |
dd4dc35f61c0c4b6788b606e011c5278eaa0cc30ffaadff2b1bf23c0353ef094
|
"""
This module contain solvers for all kinds of equations:
- algebraic or transcendental, use solve()
- recurrence, use rsolve()
- differential, use dsolve()
- nonlinear (numerically), use nsolve()
(you will need a good starting point)
"""
from __future__ import print_function, division
from sympy.core.compatibility import (iterable, is_sequence, ordered,
default_sort_key, range)
from sympy.core.sympify import sympify
from sympy.core import S, Add, Symbol, Equality, Dummy, Expr, Mul, Pow
from sympy.core.exprtools import factor_terms
from sympy.core.function import (expand_mul, expand_multinomial, expand_log,
Derivative, AppliedUndef, UndefinedFunction, nfloat,
Function, expand_power_exp, Lambda, _mexpand)
from sympy.integrals.integrals import Integral
from sympy.core.numbers import ilcm, Float
from sympy.core.relational import Relational, Ge
from sympy.core.logic import fuzzy_not
from sympy.logic.boolalg import And, Or, BooleanAtom
from sympy.core.basic import preorder_traversal
from sympy.functions import (log, exp, LambertW, cos, sin, tan, acos, asin, atan,
Abs, re, im, arg, sqrt, atan2)
from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
HyperbolicFunction)
from sympy.simplify import (simplify, collect, powsimp, posify, powdenest,
nsimplify, denom, logcombine)
from sympy.simplify.sqrtdenest import sqrt_depth
from sympy.simplify.fu import TR1
from sympy.matrices import Matrix, zeros
from sympy.polys import roots, cancel, factor, Poly, together, degree
from sympy.polys.polyerrors import GeneratorsNeeded, PolynomialError
from sympy.functions.elementary.piecewise import piecewise_fold, Piecewise
from sympy.utilities.lambdify import lambdify
from sympy.utilities.misc import filldedent
from sympy.utilities.iterables import uniq, generate_bell, flatten
from sympy.utilities.decorator import conserve_mpmath_dps
from mpmath import findroot
from sympy.solvers.polysys import solve_poly_system
from sympy.solvers.inequalities import reduce_inequalities
from types import GeneratorType
from collections import defaultdict
import warnings
def _ispow(e):
"""Return True if e is a Pow or is exp."""
return isinstance(e, Expr) and (e.is_Pow or e.func is exp)
def _simple_dens(f, symbols):
# when checking if a denominator is zero, we can just check the
# base of powers with nonzero exponents since if the base is zero
# the power will be zero, too. To keep it simple and fast, we
# limit simplification to exponents that are Numbers
dens = set()
for d in denoms(f, symbols):
if d.is_Pow and d.exp.is_Number:
if d.exp.is_zero:
continue # foo**0 is never 0
d = d.base
dens.add(d)
return dens
def denoms(eq, symbols=None):
"""Return (recursively) set of all denominators that appear in eq
that contain any symbol in iterable ``symbols``; if ``symbols`` is
None (default) then all denominators will be returned.
Examples
========
>>> from sympy.solvers.solvers import denoms
>>> from sympy.abc import x, y, z
>>> from sympy import sqrt
>>> denoms(x/y)
{y}
>>> denoms(x/(y*z))
{y, z}
>>> denoms(3/x + y/z)
{x, z}
>>> denoms(x/2 + y/z)
{2, z}
"""
pot = preorder_traversal(eq)
dens = set()
for p in pot:
den = denom(p)
if den is S.One:
continue
for d in Mul.make_args(den):
dens.add(d)
if not symbols:
return dens
rv = []
for d in dens:
free = d.free_symbols
if any(s in free for s in symbols):
rv.append(d)
return set(rv)
def checksol(f, symbol, sol=None, **flags):
"""Checks whether sol is a solution of equation f == 0.
Input can be either a single symbol and corresponding value
or a dictionary of symbols and values. When given as a dictionary
and flag ``simplify=True``, the values in the dictionary will be
simplified. ``f`` can be a single equation or an iterable of equations.
A solution must satisfy all equations in ``f`` to be considered valid;
if a solution does not satisfy any equation, False is returned; if one or
more checks are inconclusive (and none are False) then None
is returned.
Examples
========
>>> from sympy import symbols
>>> from sympy.solvers import checksol
>>> x, y = symbols('x,y')
>>> checksol(x**4 - 1, x, 1)
True
>>> checksol(x**4 - 1, x, 0)
False
>>> checksol(x**2 + y**2 - 5**2, {x: 3, y: 4})
True
To check if an expression is zero using checksol, pass it
as ``f`` and send an empty dictionary for ``symbol``:
>>> checksol(x**2 + x - x*(x + 1), {})
True
None is returned if checksol() could not conclude.
flags:
'numerical=True (default)'
do a fast numerical check if ``f`` has only one symbol.
'minimal=True (default is False)'
a very fast, minimal testing.
'warn=True (default is False)'
show a warning if checksol() could not conclude.
'simplify=True (default)'
simplify solution before substituting into function and
simplify the function before trying specific simplifications
'force=True (default is False)'
make positive all symbols without assumptions regarding sign.
"""
from sympy.physics.units import Unit
minimal = flags.get('minimal', False)
if sol is not None:
sol = {symbol: sol}
elif isinstance(symbol, dict):
sol = symbol
else:
msg = 'Expecting (sym, val) or ({sym: val}, None) but got (%s, %s)'
raise ValueError(msg % (symbol, sol))
if iterable(f):
if not f:
raise ValueError('no functions to check')
rv = True
for fi in f:
check = checksol(fi, sol, **flags)
if check:
continue
if check is False:
return False
rv = None # don't return, wait to see if there's a False
return rv
if isinstance(f, Poly):
f = f.as_expr()
elif isinstance(f, Equality):
f = f.lhs - f.rhs
if not f:
return True
if sol and not f.has(*list(sol.keys())):
# if f(y) == 0, x=3 does not set f(y) to zero...nor does it not
return None
illegal = set([S.NaN,
S.ComplexInfinity,
S.Infinity,
S.NegativeInfinity])
if any(sympify(v).atoms() & illegal for k, v in sol.items()):
return False
was = f
attempt = -1
numerical = flags.get('numerical', True)
while 1:
attempt += 1
if attempt == 0:
val = f.subs(sol)
if isinstance(val, Mul):
val = val.as_independent(Unit)[0]
if val.atoms() & illegal:
return False
elif attempt == 1:
if val.free_symbols:
if not val.is_constant(*list(sol.keys()), simplify=not minimal):
return False
# there are free symbols -- simple expansion might work
_, val = val.as_content_primitive()
val = expand_mul(expand_multinomial(val))
elif attempt == 2:
if minimal:
return
if flags.get('simplify', True):
for k in sol:
sol[k] = simplify(sol[k])
# start over without the failed expanded form, possibly
# with a simplified solution
val = simplify(f.subs(sol))
if flags.get('force', True):
val, reps = posify(val)
# expansion may work now, so try again and check
exval = expand_mul(expand_multinomial(val))
if exval.is_number or not exval.free_symbols:
# we can decide now
val = exval
else:
# if there are no radicals and no functions then this can't be
# zero anymore -- can it?
pot = preorder_traversal(expand_mul(val))
seen = set()
saw_pow_func = False
for p in pot:
if p in seen:
continue
seen.add(p)
if p.is_Pow and not p.exp.is_Integer:
saw_pow_func = True
elif p.is_Function:
saw_pow_func = True
elif isinstance(p, UndefinedFunction):
saw_pow_func = True
if saw_pow_func:
break
if saw_pow_func is False:
return False
if flags.get('force', True):
# don't do a zero check with the positive assumptions in place
val = val.subs(reps)
nz = fuzzy_not(val.is_zero)
if nz is not None:
# issue 5673: nz may be True even when False
# so these are just hacks to keep a false positive
# from being returned
# HACK 1: LambertW (issue 5673)
if val.is_number and val.has(LambertW):
# don't eval this to verify solution since if we got here,
# numerical must be False
return None
# add other HACKs here if necessary, otherwise we assume
# the nz value is correct
return not nz
break
if val == was:
continue
elif val.is_Rational:
return val == 0
if numerical and not val.free_symbols:
return bool(abs(val.n(18).n(12, chop=True)) < 1e-9)
was = val
if flags.get('warn', False):
warnings.warn("\n\tWarning: could not verify solution %s." % sol)
# returns None if it can't conclude
# TODO: improve solution testing
def check_assumptions(expr, against=None, **assumptions):
"""Checks whether expression `expr` satisfies all assumptions.
`assumptions` is a dict of assumptions: {'assumption': True|False, ...}.
Examples
========
>>> from sympy import Symbol, pi, I, exp, check_assumptions
>>> check_assumptions(-5, integer=True)
True
>>> check_assumptions(pi, real=True, integer=False)
True
>>> check_assumptions(pi, real=True, negative=True)
False
>>> check_assumptions(exp(I*pi/7), real=False)
True
>>> x = Symbol('x', real=True, positive=True)
>>> check_assumptions(2*x + 1, real=True, positive=True)
True
>>> check_assumptions(-2*x - 5, real=True, positive=True)
False
To check assumptions of ``expr`` against another variable or expression,
pass the expression or variable as ``against``.
>>> check_assumptions(2*x + 1, x)
True
`None` is returned if check_assumptions() could not conclude.
>>> check_assumptions(2*x - 1, real=True, positive=True)
>>> z = Symbol('z')
>>> check_assumptions(z, real=True)
"""
if against is not None:
assumptions = against.assumptions0
expr = sympify(expr)
result = True
for key, expected in assumptions.items():
if expected is None:
continue
test = getattr(expr, 'is_' + key, None)
if test is expected:
continue
elif test is not None:
return False
result = None # Can't conclude, unless an other test fails.
return result
def solve(f, *symbols, **flags):
"""
Algebraically solves equations and systems of equations.
Currently supported are:
- polynomial,
- transcendental
- piecewise combinations of the above
- systems of linear and polynomial equations
- sytems containing relational expressions.
Input is formed as:
* f
- a single Expr or Poly that must be zero,
- an Equality
- a Relational expression or boolean
- iterable of one or more of the above
* symbols (object(s) to solve for) specified as
- none given (other non-numeric objects will be used)
- single symbol
- denested list of symbols
e.g. solve(f, x, y)
- ordered iterable of symbols
e.g. solve(f, [x, y])
* flags
'dict'=True (default is False)
return list (perhaps empty) of solution mappings
'set'=True (default is False)
return list of symbols and set of tuple(s) of solution(s)
'exclude=[] (default)'
don't try to solve for any of the free symbols in exclude;
if expressions are given, the free symbols in them will
be extracted automatically.
'check=True (default)'
If False, don't do any testing of solutions. This can be
useful if one wants to include solutions that make any
denominator zero.
'numerical=True (default)'
do a fast numerical check if ``f`` has only one symbol.
'minimal=True (default is False)'
a very fast, minimal testing.
'warn=True (default is False)'
show a warning if checksol() could not conclude.
'simplify=True (default)'
simplify all but polynomials of order 3 or greater before
returning them and (if check is not False) use the
general simplify function on the solutions and the
expression obtained when they are substituted into the
function which should be zero
'force=True (default is False)'
make positive all symbols without assumptions regarding sign.
'rational=True (default)'
recast Floats as Rational; if this option is not used, the
system containing floats may fail to solve because of issues
with polys. If rational=None, Floats will be recast as
rationals but the answer will be recast as Floats. If the
flag is False then nothing will be done to the Floats.
'manual=True (default is False)'
do not use the polys/matrix method to solve a system of
equations, solve them one at a time as you might "manually"
'implicit=True (default is False)'
allows solve to return a solution for a pattern in terms of
other functions that contain that pattern; this is only
needed if the pattern is inside of some invertible function
like cos, exp, ....
'particular=True (default is False)'
instructs solve to try to find a particular solution to a linear
system with as many zeros as possible; this is very expensive
'quick=True (default is False)'
when using particular=True, use a fast heuristic instead to find a
solution with many zeros (instead of using the very slow method
guaranteed to find the largest number of zeros possible)
'cubics=True (default)'
return explicit solutions when cubic expressions are encountered
'quartics=True (default)'
return explicit solutions when quartic expressions are encountered
'quintics=True (default)'
return explicit solutions (if possible) when quintic expressions
are encountered
Examples
========
The output varies according to the input and can be seen by example::
>>> from sympy import solve, Poly, Eq, Function, exp
>>> from sympy.abc import x, y, z, a, b
>>> f = Function('f')
* boolean or univariate Relational
>>> solve(x < 3)
And(-oo < x, x < 3)
* to always get a list of solution mappings, use flag dict=True
>>> solve(x - 3, dict=True)
[{x: 3}]
>>> solve([x - 3, y - 1], dict=True)
[{x: 3, y: 1}]
* to get a list of symbols and set of solution(s) use flag set=True
>>> solve([x**2 - 3, y - 1], set=True)
([x, y], {(-sqrt(3), 1), (sqrt(3), 1)})
* single expression and single symbol that is in the expression
>>> solve(x - y, x)
[y]
>>> solve(x - 3, x)
[3]
>>> solve(Eq(x, 3), x)
[3]
>>> solve(Poly(x - 3), x)
[3]
>>> solve(x**2 - y**2, x, set=True)
([x], {(-y,), (y,)})
>>> solve(x**4 - 1, x, set=True)
([x], {(-1,), (1,), (-I,), (I,)})
* single expression with no symbol that is in the expression
>>> solve(3, x)
[]
>>> solve(x - 3, y)
[]
* single expression with no symbol given
In this case, all free symbols will be selected as potential
symbols to solve for. If the equation is univariate then a list
of solutions is returned; otherwise -- as is the case when symbols are
given as an iterable of length > 1 -- a list of mappings will be returned.
>>> solve(x - 3)
[3]
>>> solve(x**2 - y**2)
[{x: -y}, {x: y}]
>>> solve(z**2*x**2 - z**2*y**2)
[{x: -y}, {x: y}, {z: 0}]
>>> solve(z**2*x - z**2*y**2)
[{x: y**2}, {z: 0}]
* when an object other than a Symbol is given as a symbol, it is
isolated algebraically and an implicit solution may be obtained.
This is mostly provided as a convenience to save one from replacing
the object with a Symbol and solving for that Symbol. It will only
work if the specified object can be replaced with a Symbol using the
subs method.
>>> solve(f(x) - x, f(x))
[x]
>>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x))
[x + f(x)]
>>> solve(f(x).diff(x) - f(x) - x, f(x))
[-x + Derivative(f(x), x)]
>>> solve(x + exp(x)**2, exp(x), set=True)
([exp(x)], {(-sqrt(-x),), (sqrt(-x),)})
>>> from sympy import Indexed, IndexedBase, Tuple, sqrt
>>> A = IndexedBase('A')
>>> eqs = Tuple(A[1] + A[2] - 3, A[1] - A[2] + 1)
>>> solve(eqs, eqs.atoms(Indexed))
{A[1]: 1, A[2]: 2}
* To solve for a *symbol* implicitly, use 'implicit=True':
>>> solve(x + exp(x), x)
[-LambertW(1)]
>>> solve(x + exp(x), x, implicit=True)
[-exp(x)]
* It is possible to solve for anything that can be targeted with
subs:
>>> solve(x + 2 + sqrt(3), x + 2)
[-sqrt(3)]
>>> solve((x + 2 + sqrt(3), x + 4 + y), y, x + 2)
{y: -2 + sqrt(3), x + 2: -sqrt(3)}
* Nothing heroic is done in this implicit solving so you may end up
with a symbol still in the solution:
>>> eqs = (x*y + 3*y + sqrt(3), x + 4 + y)
>>> solve(eqs, y, x + 2)
{y: -sqrt(3)/(x + 3), x + 2: (-2*x - 6 + sqrt(3))/(x + 3)}
>>> solve(eqs, y*x, x)
{x: -y - 4, x*y: -3*y - sqrt(3)}
* if you attempt to solve for a number remember that the number
you have obtained does not necessarily mean that the value is
equivalent to the expression obtained:
>>> solve(sqrt(2) - 1, 1)
[sqrt(2)]
>>> solve(x - y + 1, 1) # /!\ -1 is targeted, too
[x/(y - 1)]
>>> [_.subs(z, -1) for _ in solve((x - y + 1).subs(-1, z), 1)]
[-x + y]
* To solve for a function within a derivative, use dsolve.
* single expression and more than 1 symbol
* when there is a linear solution
>>> solve(x - y**2, x, y)
[{x: y**2}]
>>> solve(x**2 - y, x, y)
[{y: x**2}]
* when undetermined coefficients are identified
* that are linear
>>> solve((a + b)*x - b + 2, a, b)
{a: -2, b: 2}
* that are nonlinear
>>> solve((a + b)*x - b**2 + 2, a, b, set=True)
([a, b], {(-sqrt(2), sqrt(2)), (sqrt(2), -sqrt(2))})
* if there is no linear solution then the first successful
attempt for a nonlinear solution will be returned
>>> solve(x**2 - y**2, x, y)
[{x: -y}, {x: y}]
>>> solve(x**2 - y**2/exp(x), x, y)
[{x: 2*LambertW(y/2)}]
>>> solve(x**2 - y**2/exp(x), y, x)
[{y: -x*sqrt(exp(x))}, {y: x*sqrt(exp(x))}]
* iterable of one or more of the above
* involving relationals or bools
>>> solve([x < 3, x - 2])
Eq(x, 2)
>>> solve([x > 3, x - 2])
False
* when the system is linear
* with a solution
>>> solve([x - 3], x)
{x: 3}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y, z)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - z), z, x, y)
{x: -5*y + 2, z: 21*y - 6}
* without a solution
>>> solve([x + 3, x - 3])
[]
* when the system is not linear
>>> solve([x**2 + y -2, y**2 - 4], x, y, set=True)
([x, y], {(-2, -2), (0, 2), (2, -2)})
* if no symbols are given, all free symbols will be selected and a list
of mappings returned
>>> solve([x - 2, x**2 + y])
[{x: 2, y: -4}]
>>> solve([x - 2, x**2 + f(x)], {f(x), x})
[{x: 2, f(x): -4}]
* if any equation doesn't depend on the symbol(s) given it will be
eliminated from the equation set and an answer may be given
implicitly in terms of variables that were not of interest
>>> solve([x - y, y - 3], x)
{x: y}
Notes
=====
solve() with check=True (default) will run through the symbol tags to
elimate unwanted solutions. If no assumptions are included all possible
solutions will be returned.
>>> from sympy import Symbol, solve
>>> x = Symbol("x")
>>> solve(x**2 - 1)
[-1, 1]
By using the positive tag only one solution will be returned:
>>> pos = Symbol("pos", positive=True)
>>> solve(pos**2 - 1)
[1]
Assumptions aren't checked when `solve()` input involves
relationals or bools.
When the solutions are checked, those that make any denominator zero
are automatically excluded. If you do not want to exclude such solutions
then use the check=False option:
>>> from sympy import sin, limit
>>> solve(sin(x)/x) # 0 is excluded
[pi]
If check=False then a solution to the numerator being zero is found: x = 0.
In this case, this is a spurious solution since sin(x)/x has the well known
limit (without dicontinuity) of 1 at x = 0:
>>> solve(sin(x)/x, check=False)
[0, pi]
In the following case, however, the limit exists and is equal to the the
value of x = 0 that is excluded when check=True:
>>> eq = x**2*(1/x - z**2/x)
>>> solve(eq, x)
[]
>>> solve(eq, x, check=False)
[0]
>>> limit(eq, x, 0, '-')
0
>>> limit(eq, x, 0, '+')
0
Disabling high-order, explicit solutions
----------------------------------------
When solving polynomial expressions, one might not want explicit solutions
(which can be quite long). If the expression is univariate, CRootOf
instances will be returned instead:
>>> solve(x**3 - x + 1)
[-1/((-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)) - (-1/2 -
sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3, -(-1/2 +
sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3 - 1/((-1/2 +
sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)), -(3*sqrt(69)/2 +
27/2)**(1/3)/3 - 1/(3*sqrt(69)/2 + 27/2)**(1/3)]
>>> solve(x**3 - x + 1, cubics=False)
[CRootOf(x**3 - x + 1, 0),
CRootOf(x**3 - x + 1, 1),
CRootOf(x**3 - x + 1, 2)]
If the expression is multivariate, no solution might be returned:
>>> solve(x**3 - x + a, x, cubics=False)
[]
Sometimes solutions will be obtained even when a flag is False because the
expression could be factored. In the following example, the equation can
be factored as the product of a linear and a quadratic factor so explicit
solutions (which did not require solving a cubic expression) are obtained:
>>> eq = x**3 + 3*x**2 + x - 1
>>> solve(eq, cubics=False)
[-1, -1 + sqrt(2), -sqrt(2) - 1]
Solving equations involving radicals
------------------------------------
Because of SymPy's use of the principle root (issue #8789), some solutions
to radical equations will be missed unless check=False:
>>> from sympy import root
>>> eq = root(x**3 - 3*x**2, 3) + 1 - x
>>> solve(eq)
[]
>>> solve(eq, check=False)
[1/3]
In the above example there is only a single solution to the equation. Other
expressions will yield spurious roots which must be checked manually;
roots which give a negative argument to odd-powered radicals will also need
special checking:
>>> from sympy import real_root, S
>>> eq = root(x, 3) - root(x, 5) + S(1)/7
>>> solve(eq) # this gives 2 solutions but misses a 3rd
[CRootOf(7*_p**5 - 7*_p**3 + 1, 1)**15,
CRootOf(7*_p**5 - 7*_p**3 + 1, 2)**15]
>>> sol = solve(eq, check=False)
>>> [abs(eq.subs(x,i).n(2)) for i in sol]
[0.48, 0.e-110, 0.e-110, 0.052, 0.052]
The first solution is negative so real_root must be used to see that
it satisfies the expression:
>>> abs(real_root(eq.subs(x, sol[0])).n(2))
0.e-110
If the roots of the equation are not real then more care will be necessary
to find the roots, especially for higher order equations. Consider the
following expression:
>>> expr = root(x, 3) - root(x, 5)
We will construct a known value for this expression at x = 3 by selecting
the 1-th root for each radical:
>>> expr1 = root(x, 3, 1) - root(x, 5, 1)
>>> v = expr1.subs(x, -3)
The solve function is unable to find any exact roots to this equation:
>>> eq = Eq(expr, v); eq1 = Eq(expr1, v)
>>> solve(eq, check=False), solve(eq1, check=False)
([], [])
The function unrad, however, can be used to get a form of the equation for
which numerical roots can be found:
>>> from sympy.solvers.solvers import unrad
>>> from sympy import nroots
>>> e, (p, cov) = unrad(eq)
>>> pvals = nroots(e)
>>> inversion = solve(cov, x)[0]
>>> xvals = [inversion.subs(p, i) for i in pvals]
Although eq or eq1 could have been used to find xvals, the solution can
only be verified with expr1:
>>> z = expr - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z.subs(x, xi).n()) < 1e-9]
[]
>>> z1 = expr1 - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z1.subs(x, xi).n()) < 1e-9]
[-3.0]
See Also
========
- rsolve() for solving recurrence relationships
- dsolve() for solving differential equations
"""
# keeping track of how f was passed since if it is a list
# a dictionary of results will be returned.
###########################################################################
def _sympified_list(w):
return list(map(sympify, w if iterable(w) else [w]))
bare_f = not iterable(f)
ordered_symbols = (symbols and
symbols[0] and
(isinstance(symbols[0], Symbol) or
is_sequence(symbols[0],
include=GeneratorType)
)
)
f, symbols = (_sympified_list(w) for w in [f, symbols])
implicit = flags.get('implicit', False)
# preprocess equation(s)
###########################################################################
for i, fi in enumerate(f):
if isinstance(fi, Equality):
if 'ImmutableMatrix' in [type(a).__name__ for a in fi.args]:
f[i] = fi.lhs - fi.rhs
else:
f[i] = Add(fi.lhs, -fi.rhs, evaluate=False)
elif isinstance(fi, Poly):
f[i] = fi.as_expr()
elif isinstance(fi, (bool, BooleanAtom)) or fi.is_Relational:
return reduce_inequalities(f, symbols=symbols)
# rewrite hyperbolics in terms of exp
f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction),
lambda w: w.rewrite(exp))
# if we have a Matrix, we need to iterate over its elements again
if f[i].is_Matrix:
bare_f = False
f.extend(list(f[i]))
f[i] = S.Zero
# if we can split it into real and imaginary parts then do so
freei = f[i].free_symbols
if freei and all(s.is_real or s.is_imaginary for s in freei):
fr, fi = f[i].as_real_imag()
# accept as long as new re, im, arg or atan2 are not introduced
had = f[i].atoms(re, im, arg, atan2)
if fr and fi and fr != fi and not any(
i.atoms(re, im, arg, atan2) - had for i in (fr, fi)):
if bare_f:
bare_f = False
f[i: i + 1] = [fr, fi]
# preprocess symbol(s)
###########################################################################
if not symbols:
# get symbols from equations
symbols = set().union(*[fi.free_symbols for fi in f])
if len(symbols) < len(f):
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not (p.is_number or p.is_Add or p.is_Mul) or \
isinstance(p, AppliedUndef):
flags['dict'] = True # better show symbols
symbols.add(p)
pot.skip() # don't go any deeper
symbols = list(symbols)
# supply dummy symbols so solve(3) behaves like solve(3, x)
for i in range(len(f) - len(symbols)):
symbols.append(Dummy())
ordered_symbols = False
elif len(symbols) == 1 and iterable(symbols[0]):
symbols = symbols[0]
# remove symbols the user is not interested in
exclude = flags.pop('exclude', set())
if exclude:
if isinstance(exclude, Expr):
exclude = [exclude]
exclude = set().union(*[e.free_symbols for e in sympify(exclude)])
symbols = [s for s in symbols if s not in exclude]
# real/imag handling -----------------------------
w = Dummy('w')
piece = Lambda(w, Piecewise((w, Ge(w, 0)), (-w, True)))
for i, fi in enumerate(f):
# Abs
reps = []
for a in fi.atoms(Abs):
if not a.has(*symbols):
continue
if a.args[0].is_real is None:
raise NotImplementedError('solving %s when the argument '
'is not real or imaginary.' % a)
reps.append((a, piece(a.args[0]) if a.args[0].is_real else \
piece(a.args[0]*S.ImaginaryUnit)))
fi = fi.subs(reps)
# arg
_arg = [a for a in fi.atoms(arg) if a.has(*symbols)]
fi = fi.xreplace(dict(list(zip(_arg,
[atan(im(a.args[0])/re(a.args[0])) for a in _arg]))))
# save changes
f[i] = fi
# see if re(s) or im(s) appear
irf = []
for s in symbols:
if s.is_real or s.is_imaginary:
continue # neither re(x) nor im(x) will appear
# if re(s) or im(s) appear, the auxiliary equation must be present
if any(fi.has(re(s), im(s)) for fi in f):
irf.append((s, re(s) + S.ImaginaryUnit*im(s)))
if irf:
for s, rhs in irf:
for i, fi in enumerate(f):
f[i] = fi.xreplace({s: rhs})
f.append(s - rhs)
symbols.extend([re(s), im(s)])
if bare_f:
bare_f = False
flags['dict'] = True
# end of real/imag handling -----------------------------
symbols = list(uniq(symbols))
if not ordered_symbols:
# we do this to make the results returned canonical in case f
# contains a system of nonlinear equations; all other cases should
# be unambiguous
symbols = sorted(symbols, key=default_sort_key)
# we can solve for non-symbol entities by replacing them with Dummy symbols
symbols_new = []
symbol_swapped = False
for i, s in enumerate(symbols):
if s.is_Symbol:
s_new = s
else:
symbol_swapped = True
s_new = Dummy('X%d' % i)
symbols_new.append(s_new)
if symbol_swapped:
swap_sym = list(zip(symbols, symbols_new))
f = [fi.subs(swap_sym) for fi in f]
symbols = symbols_new
swap_sym = {v: k for k, v in swap_sym}
else:
swap_sym = {}
# this is needed in the next two events
symset = set(symbols)
# get rid of equations that have no symbols of interest; we don't
# try to solve them because the user didn't ask and they might be
# hard to solve; this means that solutions may be given in terms
# of the eliminated equations e.g. solve((x-y, y-3), x) -> {x: y}
newf = []
for fi in f:
# let the solver handle equations that..
# - have no symbols but are expressions
# - have symbols of interest
# - have no symbols of interest but are constant
# but when an expression is not constant and has no symbols of
# interest, it can't change what we obtain for a solution from
# the remaining equations so we don't include it; and if it's
# zero it can be removed and if it's not zero, there is no
# solution for the equation set as a whole
#
# The reason for doing this filtering is to allow an answer
# to be obtained to queries like solve((x - y, y), x); without
# this mod the return value is []
ok = False
if fi.has(*symset):
ok = True
else:
free = fi.free_symbols
if not free:
if fi.is_Number:
if fi.is_zero:
continue
return []
ok = True
else:
if fi.is_constant():
ok = True
if ok:
newf.append(fi)
if not newf:
return []
f = newf
del newf
# mask off any Object that we aren't going to invert: Derivative,
# Integral, etc... so that solving for anything that they contain will
# give an implicit solution
seen = set()
non_inverts = set()
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not isinstance(p, Expr) or isinstance(p, Piecewise):
pass
elif (isinstance(p, bool) or
not p.args or
p in symset or
p.is_Add or p.is_Mul or
p.is_Pow and not implicit or
p.is_Function and not implicit) and p.func not in (re, im):
continue
elif not p in seen:
seen.add(p)
if p.free_symbols & symset:
non_inverts.add(p)
else:
continue
pot.skip()
del seen
non_inverts = dict(list(zip(non_inverts, [Dummy() for d in non_inverts])))
f = [fi.subs(non_inverts) for fi in f]
non_inverts = [(v, k.subs(swap_sym)) for k, v in non_inverts.items()]
# rationalize Floats
floats = False
if flags.get('rational', True) is not False:
for i, fi in enumerate(f):
if fi.has(Float):
floats = True
f[i] = nsimplify(fi, rational=True)
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
# However, this is necessary only if one of the piecewise
# functions depends on one of the symbols we are solving for.
def _has_piecewise(e):
if e.is_Piecewise:
return e.has(*symbols)
return any([_has_piecewise(a) for a in e.args])
for i, fi in enumerate(f):
if _has_piecewise(fi):
f[i] = piecewise_fold(fi)
#
# try to get a solution
###########################################################################
if bare_f:
solution = _solve(f[0], *symbols, **flags)
else:
solution = _solve_system(f, symbols, **flags)
#
# postprocessing
###########################################################################
# Restore masked-off objects
if non_inverts:
def _do_dict(solution):
return dict([(k, v.subs(non_inverts)) for k, v in
solution.items()])
for i in range(1):
if type(solution) is dict:
solution = _do_dict(solution)
break
elif solution and type(solution) is list:
if type(solution[0]) is dict:
solution = [_do_dict(s) for s in solution]
break
elif type(solution[0]) is tuple:
solution = [tuple([v.subs(non_inverts) for v in s]) for s
in solution]
break
else:
solution = [v.subs(non_inverts) for v in solution]
break
elif not solution:
break
else:
raise NotImplementedError(filldedent('''
no handling of %s was implemented''' % solution))
# Restore original "symbols" if a dictionary is returned.
# This is not necessary for
# - the single univariate equation case
# since the symbol will have been removed from the solution;
# - the nonlinear poly_system since that only supports zero-dimensional
# systems and those results come back as a list
#
# ** unless there were Derivatives with the symbols, but those were handled
# above.
if symbol_swapped:
symbols = [swap_sym[k] for k in symbols]
if type(solution) is dict:
solution = dict([(swap_sym[k], v.subs(swap_sym))
for k, v in solution.items()])
elif solution and type(solution) is list and type(solution[0]) is dict:
for i, sol in enumerate(solution):
solution[i] = dict([(swap_sym[k], v.subs(swap_sym))
for k, v in sol.items()])
# undo the dictionary solutions returned when the system was only partially
# solved with poly-system if all symbols are present
if (
not flags.get('dict', False) and
solution and
ordered_symbols and
type(solution) is not dict and
type(solution[0]) is dict and
all(s in solution[0] for s in symbols)
):
solution = [tuple([r[s].subs(r) for s in symbols]) for r in solution]
# Get assumptions about symbols, to filter solutions.
# Note that if assumptions about a solution can't be verified, it is still
# returned.
check = flags.get('check', True)
# restore floats
if floats and solution and flags.get('rational', None) is None:
solution = nfloat(solution, exponent=False)
if check and solution: # assumption checking
warn = flags.get('warn', False)
got_None = [] # solutions for which one or more symbols gave None
no_False = [] # solutions for which no symbols gave False
if type(solution) is tuple:
# this has already been checked and is in as_set form
return solution
elif type(solution) is list:
if type(solution[0]) is tuple:
for sol in solution:
for symb, val in zip(symbols, sol):
test = check_assumptions(val, **symb.assumptions0)
if test is False:
break
if test is None:
got_None.append(sol)
else:
no_False.append(sol)
elif type(solution[0]) is dict:
for sol in solution:
a_None = False
for symb, val in sol.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
break
a_None = True
else:
no_False.append(sol)
if a_None:
got_None.append(sol)
else: # list of expressions
for sol in solution:
test = check_assumptions(sol, **symbols[0].assumptions0)
if test is False:
continue
no_False.append(sol)
if test is None:
got_None.append(sol)
elif type(solution) is dict:
a_None = False
for symb, val in solution.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
no_False = None
break
a_None = True
else:
no_False = solution
if a_None:
got_None.append(solution)
elif isinstance(solution, (Relational, And, Or)):
if len(symbols) != 1:
raise ValueError("Length should be 1")
if warn and symbols[0].assumptions0:
warnings.warn(filldedent("""
\tWarning: assumptions about variable '%s' are
not handled currently.""" % symbols[0]))
# TODO: check also variable assumptions for inequalities
else:
raise TypeError('Unrecognized solution') # improve the checker
solution = no_False
if warn and got_None:
warnings.warn(filldedent("""
\tWarning: assumptions concerning following solution(s)
can't be checked:""" + '\n\t' +
', '.join(str(s) for s in got_None)))
#
# done
###########################################################################
as_dict = flags.get('dict', False)
as_set = flags.get('set', False)
if not as_set and isinstance(solution, list):
# Make sure that a list of solutions is ordered in a canonical way.
solution.sort(key=default_sort_key)
if not as_dict and not as_set:
return solution or []
# return a list of mappings or []
if not solution:
solution = []
else:
if isinstance(solution, dict):
solution = [solution]
elif iterable(solution[0]):
solution = [dict(list(zip(symbols, s))) for s in solution]
elif isinstance(solution[0], dict):
pass
else:
if len(symbols) != 1:
raise ValueError("Length should be 1")
solution = [{symbols[0]: s} for s in solution]
if as_dict:
return solution
assert as_set
if not solution:
return [], set()
k = list(ordered(solution[0].keys()))
return k, {tuple([s[ki] for ki in k]) for s in solution}
def _solve(f, *symbols, **flags):
"""Return a checked solution for f in terms of one or more of the
symbols. A list should be returned except for the case when a linear
undetermined-coefficients equation is encountered (in which case
a dictionary is returned).
If no method is implemented to solve the equation, a NotImplementedError
will be raised. In the case that conversion of an expression to a Poly
gives None a ValueError will be raised."""
not_impl_msg = "No algorithms are implemented to solve equation %s"
if len(symbols) != 1:
soln = None
free = f.free_symbols
ex = free - set(symbols)
if len(ex) != 1:
ind, dep = f.as_independent(*symbols)
ex = ind.free_symbols & dep.free_symbols
if len(ex) == 1:
ex = ex.pop()
try:
# soln may come back as dict, list of dicts or tuples, or
# tuple of symbol list and set of solution tuples
soln = solve_undetermined_coeffs(f, symbols, ex, **flags)
except NotImplementedError:
pass
if soln:
if flags.get('simplify', True):
if type(soln) is dict:
for k in soln:
soln[k] = simplify(soln[k])
elif type(soln) is list:
if type(soln[0]) is dict:
for d in soln:
for k in d:
d[k] = simplify(d[k])
elif type(soln[0]) is tuple:
soln = [tuple(simplify(i) for i in j) for j in soln]
else:
raise TypeError('unrecognized args in list')
elif type(soln) is tuple:
sym, sols = soln
soln = sym, {tuple(simplify(i) for i in j) for j in sols}
else:
raise TypeError('unrecognized solution type')
return soln
# find first successful solution
failed = []
got_s = set([])
result = []
for s in symbols:
xi, v = solve_linear(f, symbols=[s])
if xi == s:
# no need to check but we should simplify if desired
if flags.get('simplify', True):
v = simplify(v)
vfree = v.free_symbols
if got_s and any([ss in vfree for ss in got_s]):
# sol depends on previously solved symbols: discard it
continue
got_s.add(xi)
result.append({xi: v})
elif xi: # there might be a non-linear solution if xi is not 0
failed.append(s)
if not failed:
return result
for s in failed:
try:
soln = _solve(f, s, **flags)
for sol in soln:
if got_s and any([ss in sol.free_symbols for ss in got_s]):
# sol depends on previously solved symbols: discard it
continue
got_s.add(s)
result.append({s: sol})
except NotImplementedError:
continue
if got_s:
return result
else:
raise NotImplementedError(not_impl_msg % f)
symbol = symbols[0]
# /!\ capture this flag then set it to False so that no checking in
# recursive calls will be done; only the final answer is checked
checkdens = check = flags.pop('check', True)
flags['check'] = False
# build up solutions if f is a Mul
if f.is_Mul:
result = set()
for m in f.args:
soln = _solve(m, symbol, **flags)
result.update(set(soln))
result = list(result)
if check:
# all solutions have been checked but now we must
# check that the solutions do not set denominators
# in any factor to zero
dens = _simple_dens(f, symbols)
result = [s for s in result if
all(not checksol(den, {symbol: s}, **flags) for den in
dens)]
# set flags for quick exit at end
check = False
flags['simplify'] = False
elif f.is_Piecewise:
result = set()
for n, (expr, cond) in enumerate(f.args):
candidates = _solve(piecewise_fold(expr), symbol, **flags)
for candidate in candidates:
if candidate in result:
continue
try:
v = (cond == True) or cond.subs(symbol, candidate)
except:
v = False
if v != False:
# Only include solutions that do not match the condition
# of any previous pieces.
matches_other_piece = False
for other_n, (other_expr, other_cond) in enumerate(f.args):
if other_n == n:
break
if other_cond == False:
continue
try:
if other_cond.subs(symbol, candidate) == True:
matches_other_piece = True
break
except:
pass
if not matches_other_piece:
v = v == True or v.doit()
if isinstance(v, Relational):
v = v.canonical
result.add(Piecewise(
(candidate, v),
(S.NaN, True)
))
check = False
else:
# first see if it really depends on symbol and whether there
# is only a linear solution
f_num, sol = solve_linear(f, symbols=symbols)
if f_num is S.Zero:
return []
elif f_num.is_Symbol:
# no need to check but simplify if desired
if flags.get('simplify', True):
sol = simplify(sol)
return [sol]
result = False # no solution was obtained
msg = '' # there is no failure message
# Poly is generally robust enough to convert anything to
# a polynomial and tell us the different generators that it
# contains, so we will inspect the generators identified by
# polys to figure out what to do.
# try to identify a single generator that will allow us to solve this
# as a polynomial, followed (perhaps) by a change of variables if the
# generator is not a symbol
try:
poly = Poly(f_num)
if poly is None:
raise ValueError('could not convert %s to Poly' % f_num)
except GeneratorsNeeded:
simplified_f = simplify(f_num)
if simplified_f != f_num:
return _solve(simplified_f, symbol, **flags)
raise ValueError('expression appears to be a constant')
gens = [g for g in poly.gens if g.has(symbol)]
def _as_base_q(x):
"""Return (b**e, q) for x = b**(p*e/q) where p/q is the leading
Rational of the exponent of x, e.g. exp(-2*x/3) -> (exp(x), 3)
"""
b, e = x.as_base_exp()
if e.is_Rational:
return b, e.q
if not e.is_Mul:
return x, 1
c, ee = e.as_coeff_Mul()
if c.is_Rational and c is not S.One: # c could be a Float
return b**ee, c.q
return x, 1
if len(gens) > 1:
# If there is more than one generator, it could be that the
# generators have the same base but different powers, e.g.
# >>> Poly(exp(x) + 1/exp(x))
# Poly(exp(-x) + exp(x), exp(-x), exp(x), domain='ZZ')
#
# If unrad was not disabled then there should be no rational
# exponents appearing as in
# >>> Poly(sqrt(x) + sqrt(sqrt(x)))
# Poly(sqrt(x) + x**(1/4), sqrt(x), x**(1/4), domain='ZZ')
bases, qs = list(zip(*[_as_base_q(g) for g in gens]))
bases = set(bases)
if len(bases) > 1 or not all(q == 1 for q in qs):
funcs = set(b for b in bases if b.is_Function)
trig = set([_ for _ in funcs if
isinstance(_, TrigonometricFunction)])
other = funcs - trig
if not other and len(funcs.intersection(trig)) > 1:
newf = TR1(f_num).rewrite(tan)
if newf != f_num:
result = _solve(newf, symbol, **flags)
# just a simple case - see if replacement of single function
# clears all symbol-dependent functions, e.g.
# log(x) - log(log(x) - 1) - 3 can be solved even though it has
# two generators.
if result is False and funcs:
funcs = list(ordered(funcs)) # put shallowest function first
f1 = funcs[0]
t = Dummy('t')
# perform the substitution
ftry = f_num.subs(f1, t)
# if no Functions left, we can proceed with usual solve
if not ftry.has(symbol):
cv_sols = _solve(ftry, t, **flags)
cv_inv = _solve(t - f1, symbol, **flags)[0]
sols = list()
for sol in cv_sols:
sols.append(cv_inv.subs(t, sol))
result = list(ordered(sols))
if result is False:
msg = 'multiple generators %s' % gens
else:
# e.g. case where gens are exp(x), exp(-x)
u = bases.pop()
t = Dummy('t')
inv = _solve(u - t, symbol, **flags)
if isinstance(u, (Pow, exp)):
# this will be resolved by factor in _tsolve but we might
# as well try a simple expansion here to get things in
# order so something like the following will work now without
# having to factor:
#
# >>> eq = (exp(I*(-x-2))+exp(I*(x+2)))
# >>> eq.subs(exp(x),y) # fails
# exp(I*(-x - 2)) + exp(I*(x + 2))
# >>> eq.expand().subs(exp(x),y) # works
# y**I*exp(2*I) + y**(-I)*exp(-2*I)
def _expand(p):
b, e = p.as_base_exp()
e = expand_mul(e)
return expand_power_exp(b**e)
ftry = f_num.replace(
lambda w: w.is_Pow or isinstance(w, exp),
_expand).subs(u, t)
if not ftry.has(symbol):
soln = _solve(ftry, t, **flags)
sols = list()
for sol in soln:
for i in inv:
sols.append(i.subs(t, sol))
result = list(ordered(sols))
elif len(gens) == 1:
# There is only one generator that we are interested in, but
# there may have been more than one generator identified by
# polys (e.g. for symbols other than the one we are interested
# in) so recast the poly in terms of our generator of interest.
# Also use composite=True with f_num since Poly won't update
# poly as documented in issue 8810.
poly = Poly(f_num, gens[0], composite=True)
# if we aren't on the tsolve-pass, use roots
if not flags.pop('tsolve', False):
soln = None
deg = poly.degree()
flags['tsolve'] = True
solvers = dict([(k, flags.get(k, True)) for k in
('cubics', 'quartics', 'quintics')])
soln = roots(poly, **solvers)
if sum(soln.values()) < deg:
# e.g. roots(32*x**5 + 400*x**4 + 2032*x**3 +
# 5000*x**2 + 6250*x + 3189) -> {}
# so all_roots is used and RootOf instances are
# returned *unless* the system is multivariate
# or high-order EX domain.
try:
soln = poly.all_roots()
except NotImplementedError:
if not flags.get('incomplete', True):
raise NotImplementedError(
filldedent('''
Neither high-order multivariate polynomials
nor sorting of EX-domain polynomials is supported.
If you want to see any results, pass keyword incomplete=True to
solve; to see numerical values of roots
for univariate expressions, use nroots.
'''))
else:
pass
else:
soln = list(soln.keys())
if soln is not None:
u = poly.gen
if u != symbol:
try:
t = Dummy('t')
iv = _solve(u - t, symbol, **flags)
soln = list(ordered({i.subs(t, s) for i in iv for s in soln}))
except NotImplementedError:
# perhaps _tsolve can handle f_num
soln = None
else:
check = False # only dens need to be checked
if soln is not None:
if len(soln) > 2:
# if the flag wasn't set then unset it since high-order
# results are quite long. Perhaps one could base this
# decision on a certain critical length of the
# roots. In addition, wester test M2 has an expression
# whose roots can be shown to be real with the
# unsimplified form of the solution whereas only one of
# the simplified forms appears to be real.
flags['simplify'] = flags.get('simplify', False)
result = soln
# fallback if above fails
# -----------------------
if result is False:
# try unrad
if flags.pop('_unrad', True):
try:
u = unrad(f_num, symbol)
except (ValueError, NotImplementedError):
u = False
if u:
eq, cov = u
if cov:
isym, ieq = cov
inv = _solve(ieq, symbol, **flags)[0]
rv = {inv.subs(isym, xi) for xi in _solve(eq, isym, **flags)}
else:
try:
rv = set(_solve(eq, symbol, **flags))
except NotImplementedError:
rv = None
if rv is not None:
result = list(ordered(rv))
# if the flag wasn't set then unset it since unrad results
# can be quite long or of very high order
flags['simplify'] = flags.get('simplify', False)
else:
pass # for coverage
# try _tsolve
if result is False:
flags.pop('tsolve', None) # allow tsolve to be used on next pass
try:
soln = _tsolve(f_num, symbol, **flags)
if soln is not None:
result = soln
except PolynomialError:
pass
# ----------- end of fallback ----------------------------
if result is False:
raise NotImplementedError('\n'.join([msg, not_impl_msg % f]))
if flags.get('simplify', True):
result = list(map(simplify, result))
# we just simplified the solution so we now set the flag to
# False so the simplification doesn't happen again in checksol()
flags['simplify'] = False
if checkdens:
# reject any result that makes any denom. affirmatively 0;
# if in doubt, keep it
dens = _simple_dens(f, symbols)
result = [s for s in result if
all(not checksol(d, {symbol: s}, **flags)
for d in dens)]
if check:
# keep only results if the check is not False
result = [r for r in result if
checksol(f_num, {symbol: r}, **flags) is not False]
return result
def _solve_system(exprs, symbols, **flags):
if not exprs:
return []
polys = []
dens = set()
failed = []
result = False
linear = False
manual = flags.get('manual', False)
checkdens = check = flags.get('check', True)
for j, g in enumerate(exprs):
dens.update(_simple_dens(g, symbols))
i, d = _invert(g, *symbols)
g = d - i
g = g.as_numer_denom()[0]
if manual:
failed.append(g)
continue
poly = g.as_poly(*symbols, extension=True)
if poly is not None:
polys.append(poly)
else:
failed.append(g)
if not polys:
solved_syms = []
else:
if all(p.is_linear for p in polys):
n, m = len(polys), len(symbols)
matrix = zeros(n, m + 1)
for i, poly in enumerate(polys):
for monom, coeff in poly.terms():
try:
j = monom.index(1)
matrix[i, j] = coeff
except ValueError:
matrix[i, m] = -coeff
# returns a dictionary ({symbols: values}) or None
if flags.pop('particular', False):
result = minsolve_linear_system(matrix, *symbols, **flags)
else:
result = solve_linear_system(matrix, *symbols, **flags)
if failed:
if result:
solved_syms = list(result.keys())
else:
solved_syms = []
else:
linear = True
else:
if len(symbols) > len(polys):
from sympy.utilities.iterables import subsets
free = set().union(*[p.free_symbols for p in polys])
free = list(ordered(free.intersection(symbols)))
got_s = set()
result = []
for syms in subsets(free, len(polys)):
try:
# returns [] or list of tuples of solutions for syms
res = solve_poly_system(polys, *syms)
if res:
for r in res:
skip = False
for r1 in r:
if got_s and any([ss in r1.free_symbols
for ss in got_s]):
# sol depends on previously
# solved symbols: discard it
skip = True
if not skip:
got_s.update(syms)
result.extend([dict(list(zip(syms, r)))])
except NotImplementedError:
pass
if got_s:
solved_syms = list(got_s)
else:
raise NotImplementedError('no valid subset found')
else:
try:
result = solve_poly_system(polys, *symbols)
solved_syms = symbols
except NotImplementedError:
failed.extend([g.as_expr() for g in polys])
solved_syms = []
if result:
# we don't know here if the symbols provided were given
# or not, so let solve resolve that. A list of dictionaries
# is going to always be returned from here.
#
result = [dict(list(zip(solved_syms, r))) for r in result]
if result:
if type(result) is dict:
result = [result]
else:
result = [{}]
if failed:
# For each failed equation, see if we can solve for one of the
# remaining symbols from that equation. If so, we update the
# solution set and continue with the next failed equation,
# repeating until we are done or we get an equation that can't
# be solved.
def _ok_syms(e, sort=False):
rv = (e.free_symbols - solved_syms) & legal
if sort:
rv = list(rv)
rv.sort(key=default_sort_key)
return rv
solved_syms = set(solved_syms) # set of symbols we have solved for
legal = set(symbols) # what we are interested in
# sort so equation with the fewest potential symbols is first
for eq in ordered(failed, lambda _: len(_ok_syms(_))):
u = Dummy() # used in solution checking
newresult = []
bad_results = []
got_s = set()
hit = False
for r in result:
# update eq with everything that is known so far
eq2 = eq.subs(r)
# if check is True then we see if it satisfies this
# equation, otherwise we just accept it
if check and r:
b = checksol(u, u, eq2, minimal=True)
if b is not None:
# this solution is sufficient to know whether
# it is valid or not so we either accept or
# reject it, then continue
if b:
newresult.append(r)
else:
bad_results.append(r)
continue
# search for a symbol amongst those available that
# can be solved for
ok_syms = _ok_syms(eq2, sort=True)
if not ok_syms:
if r:
newresult.append(r)
break # skip as it's independent of desired symbols
for s in ok_syms:
try:
soln = _solve(eq2, s, **flags)
except NotImplementedError:
continue
# put each solution in r and append the now-expanded
# result in the new result list; use copy since the
# solution for s in being added in-place
for sol in soln:
if got_s and any([ss in sol.free_symbols for ss in got_s]):
# sol depends on previously solved symbols: discard it
continue
rnew = r.copy()
for k, v in r.items():
rnew[k] = v.subs(s, sol)
# and add this new solution
rnew[s] = sol
newresult.append(rnew)
hit = True
got_s.add(s)
if not hit:
raise NotImplementedError('could not solve %s' % eq2)
else:
result = newresult
for b in bad_results:
if b in result:
result.remove(b)
default_simplify = bool(failed) # rely on system-solvers to simplify
if flags.get('simplify', default_simplify):
for r in result:
for k in r:
r[k] = simplify(r[k])
flags['simplify'] = False # don't need to do so in checksol now
if checkdens:
result = [r for r in result
if not any(checksol(d, r, **flags) for d in dens)]
if check and not linear:
result = [r for r in result
if not any(checksol(e, r, **flags) is False for e in exprs)]
result = [r for r in result if r]
if linear and result:
result = result[0]
return result
def solve_linear(lhs, rhs=0, symbols=[], exclude=[]):
r""" Return a tuple derived from f = lhs - rhs that is one of
the following:
(0, 1) meaning that ``f`` is independent of the symbols in
``symbols`` that aren't in ``exclude``, e.g::
>>> from sympy.solvers.solvers import solve_linear
>>> from sympy.abc import x, y, z
>>> from sympy import cos, sin
>>> eq = y*cos(x)**2 + y*sin(x)**2 - y # = y*(1 - 1) = 0
>>> solve_linear(eq)
(0, 1)
>>> eq = cos(x)**2 + sin(x)**2 # = 1
>>> solve_linear(eq)
(0, 1)
>>> solve_linear(x, exclude=[x])
(0, 1)
(0, 0) meaning that there is no solution to the equation
amongst the symbols given.
(If the first element of the tuple is not zero then
the function is guaranteed to be dependent on a symbol
in ``symbols``.)
(symbol, solution) where symbol appears linearly in the
numerator of ``f``, is in ``symbols`` (if given) and is
not in ``exclude`` (if given). No simplification is done
to ``f`` other than a ``mul=True`` expansion, so the
solution will correspond strictly to a unique solution.
``(n, d)`` where ``n`` and ``d`` are the numerator and
denominator of ``f`` when the numerator was not linear
in any symbol of interest; ``n`` will never be a symbol
unless a solution for that symbol was found (in which case
the second element is the solution, not the denominator).
Examples
========
>>> from sympy.core.power import Pow
>>> from sympy.polys.polytools import cancel
The variable ``x`` appears as a linear variable in each of the
following:
>>> solve_linear(x + y**2)
(x, -y**2)
>>> solve_linear(1/x - y**2)
(x, y**(-2))
When not linear in x or y then the numerator and denominator are returned.
>>> solve_linear(x**2/y**2 - 3)
(x**2 - 3*y**2, y**2)
If the numerator of the expression is a symbol then (0, 0) is
returned if the solution for that symbol would have set any
denominator to 0:
>>> eq = 1/(1/x - 2)
>>> eq.as_numer_denom()
(x, -2*x + 1)
>>> solve_linear(eq)
(0, 0)
But automatic rewriting may cause a symbol in the denominator to
appear in the numerator so a solution will be returned:
>>> (1/x)**-1
x
>>> solve_linear((1/x)**-1)
(x, 0)
Use an unevaluated expression to avoid this:
>>> solve_linear(Pow(1/x, -1, evaluate=False))
(0, 0)
If ``x`` is allowed to cancel in the following expression, then it
appears to be linear in ``x``, but this sort of cancellation is not
done by ``solve_linear`` so the solution will always satisfy the
original expression without causing a division by zero error.
>>> eq = x**2*(1/x - z**2/x)
>>> solve_linear(cancel(eq))
(x, 0)
>>> solve_linear(eq)
(x**2*(-z**2 + 1), x)
A list of symbols for which a solution is desired may be given:
>>> solve_linear(x + y + z, symbols=[y])
(y, -x - z)
A list of symbols to ignore may also be given:
>>> solve_linear(x + y + z, exclude=[x])
(y, -x - z)
(A solution for ``y`` is obtained because it is the first variable
from the canonically sorted list of symbols that had a linear
solution.)
"""
if isinstance(lhs, Equality):
if rhs:
raise ValueError(filldedent('''
If lhs is an Equality, rhs must be 0 but was %s''' % rhs))
rhs = lhs.rhs
lhs = lhs.lhs
dens = None
eq = lhs - rhs
n, d = eq.as_numer_denom()
if not n:
return S.Zero, S.One
free = n.free_symbols
if not symbols:
symbols = free
else:
bad = [s for s in symbols if not s.is_Symbol]
if bad:
if len(bad) == 1:
bad = bad[0]
if len(symbols) == 1:
eg = 'solve(%s, %s)' % (eq, symbols[0])
else:
eg = 'solve(%s, *%s)' % (eq, list(symbols))
raise ValueError(filldedent('''
solve_linear only handles symbols, not %s. To isolate
non-symbols use solve, e.g. >>> %s <<<.
''' % (bad, eg)))
symbols = free.intersection(symbols)
symbols = symbols.difference(exclude)
if not symbols:
return S.Zero, S.One
dfree = d.free_symbols
# derivatives are easy to do but tricky to analyze to see if they
# are going to disallow a linear solution, so for simplicity we
# just evaluate the ones that have the symbols of interest
derivs = defaultdict(list)
for der in n.atoms(Derivative):
csym = der.free_symbols & symbols
for c in csym:
derivs[c].append(der)
all_zero = True
for xi in sorted(symbols, key=default_sort_key): # canonical order
# if there are derivatives in this var, calculate them now
if type(derivs[xi]) is list:
derivs[xi] = {der: der.doit() for der in derivs[xi]}
newn = n.subs(derivs[xi])
dnewn_dxi = newn.diff(xi)
# dnewn_dxi can be nonzero if it survives differentation by any
# of its free symbols
free = dnewn_dxi.free_symbols
if dnewn_dxi and (not free or any(dnewn_dxi.diff(s) for s in free)):
all_zero = False
if dnewn_dxi is S.NaN:
break
if xi not in dnewn_dxi.free_symbols:
vi = -(newn.subs(xi, 0))/dnewn_dxi
if dens is None:
dens = _simple_dens(eq, symbols)
if not any(checksol(di, {xi: vi}, minimal=True) is True
for di in dens):
# simplify any trivial integral
irep = [(i, i.doit()) for i in vi.atoms(Integral) if
i.function.is_number]
# do a slight bit of simplification
vi = expand_mul(vi.subs(irep))
return xi, vi
if all_zero:
return S.Zero, S.One
if n.is_Symbol: # no solution for this symbol was found
return S.Zero, S.Zero
return n, d
def minsolve_linear_system(system, *symbols, **flags):
r"""
Find a particular solution to a linear system.
In particular, try to find a solution with the minimal possible number
of non-zero variables using a naive algorithm with exponential complexity.
If ``quick=True``, a heuristic is used.
"""
quick = flags.get('quick', False)
# Check if there are any non-zero solutions at all
s0 = solve_linear_system(system, *symbols, **flags)
if not s0 or all(v == 0 for v in s0.values()):
return s0
if quick:
# We just solve the system and try to heuristically find a nice
# solution.
s = solve_linear_system(system, *symbols)
def update(determined, solution):
delete = []
for k, v in solution.items():
solution[k] = v.subs(determined)
if not solution[k].free_symbols:
delete.append(k)
determined[k] = solution[k]
for k in delete:
del solution[k]
determined = {}
update(determined, s)
while s:
# NOTE sort by default_sort_key to get deterministic result
k = max((k for k in s.values()),
key=lambda x: (len(x.free_symbols), default_sort_key(x)))
x = max(k.free_symbols, key=default_sort_key)
if len(k.free_symbols) != 1:
determined[x] = S(0)
else:
val = solve(k)[0]
if val == 0 and all(v.subs(x, val) == 0 for v in s.values()):
determined[x] = S(1)
else:
determined[x] = val
update(determined, s)
return determined
else:
# We try to select n variables which we want to be non-zero.
# All others will be assumed zero. We try to solve the modified system.
# If there is a non-trivial solution, just set the free variables to
# one. If we do this for increasing n, trying all combinations of
# variables, we will find an optimal solution.
# We speed up slightly by starting at one less than the number of
# variables the quick method manages.
from itertools import combinations
from sympy.utilities.misc import debug
N = len(symbols)
bestsol = minsolve_linear_system(system, *symbols, quick=True)
n0 = len([x for x in bestsol.values() if x != 0])
for n in range(n0 - 1, 1, -1):
debug('minsolve: %s' % n)
thissol = None
for nonzeros in combinations(list(range(N)), n):
subm = Matrix([system.col(i).T for i in nonzeros] + [system.col(-1).T]).T
s = solve_linear_system(subm, *[symbols[i] for i in nonzeros])
if s and not all(v == 0 for v in s.values()):
subs = [(symbols[v], S(1)) for v in nonzeros]
for k, v in s.items():
s[k] = v.subs(subs)
for sym in symbols:
if sym not in s:
if symbols.index(sym) in nonzeros:
s[sym] = S(1)
else:
s[sym] = S(0)
thissol = s
break
if thissol is None:
break
bestsol = thissol
return bestsol
def solve_linear_system(system, *symbols, **flags):
r"""
Solve system of N linear equations with M variables, which means
both under- and overdetermined systems are supported. The possible
number of solutions is zero, one or infinite. Respectively, this
procedure will return None or a dictionary with solutions. In the
case of underdetermined systems, all arbitrary parameters are skipped.
This may cause a situation in which an empty dictionary is returned.
In that case, all symbols can be assigned arbitrary values.
Input to this functions is a Nx(M+1) matrix, which means it has
to be in augmented form. If you prefer to enter N equations and M
unknowns then use `solve(Neqs, *Msymbols)` instead. Note: a local
copy of the matrix is made by this routine so the matrix that is
passed will not be modified.
The algorithm used here is fraction-free Gaussian elimination,
which results, after elimination, in an upper-triangular matrix.
Then solutions are found using back-substitution. This approach
is more efficient and compact than the Gauss-Jordan method.
>>> from sympy import Matrix, solve_linear_system
>>> from sympy.abc import x, y
Solve the following system::
x + 4 y == 2
-2 x + y == 14
>>> system = Matrix(( (1, 4, 2), (-2, 1, 14)))
>>> solve_linear_system(system, x, y)
{x: -6, y: 2}
A degenerate system returns an empty dictionary.
>>> system = Matrix(( (0,0,0), (0,0,0) ))
>>> solve_linear_system(system, x, y)
{}
"""
do_simplify = flags.get('simplify', True)
if system.rows == system.cols - 1 == len(symbols):
try:
# well behaved n-equations and n-unknowns
inv = inv_quick(system[:, :-1])
rv = dict(zip(symbols, inv*system[:, -1]))
if do_simplify:
for k, v in rv.items():
rv[k] = simplify(v)
if not all(i.is_zero for i in rv.values()):
# non-trivial solution
return rv
except ValueError:
pass
matrix = system[:, :]
syms = list(symbols)
i, m = 0, matrix.cols - 1 # don't count augmentation
while i < matrix.rows:
if i == m:
# an overdetermined system
if any(matrix[i:, m]):
return None # no solutions
else:
# remove trailing rows
matrix = matrix[:i, :]
break
if not matrix[i, i]:
# there is no pivot in current column
# so try to find one in other columns
for k in range(i + 1, m):
if matrix[i, k]:
break
else:
if matrix[i, m]:
# We need to know if this is always zero or not. We
# assume that if there are free symbols that it is not
# identically zero (or that there is more than one way
# to make this zero). Otherwise, if there are none, this
# is a constant and we assume that it does not simplify
# to zero XXX are there better (fast) ways to test this?
# The .equals(0) method could be used but that can be
# slow; numerical testing is prone to errors of scaling.
if not matrix[i, m].free_symbols:
return None # no solution
# A row of zeros with a non-zero rhs can only be accepted
# if there is another equivalent row. Any such rows will
# be deleted.
nrows = matrix.rows
rowi = matrix.row(i)
ip = None
j = i + 1
while j < matrix.rows:
# do we need to see if the rhs of j
# is a constant multiple of i's rhs?
rowj = matrix.row(j)
if rowj == rowi:
matrix.row_del(j)
elif rowj[:-1] == rowi[:-1]:
if ip is None:
_, ip = rowi[-1].as_content_primitive()
_, jp = rowj[-1].as_content_primitive()
if not (simplify(jp - ip) or simplify(jp + ip)):
matrix.row_del(j)
j += 1
if nrows == matrix.rows:
# no solution
return None
# zero row or was a linear combination of
# other rows or was a row with a symbolic
# expression that matched other rows, e.g. [0, 0, x - y]
# so now we can safely skip it
matrix.row_del(i)
if not matrix:
# every choice of variable values is a solution
# so we return an empty dict instead of None
return dict()
continue
# we want to change the order of colums so
# the order of variables must also change
syms[i], syms[k] = syms[k], syms[i]
matrix.col_swap(i, k)
pivot_inv = S.One/matrix[i, i]
# divide all elements in the current row by the pivot
matrix.row_op(i, lambda x, _: x * pivot_inv)
for k in range(i + 1, matrix.rows):
if matrix[k, i]:
coeff = matrix[k, i]
# subtract from the current row the row containing
# pivot and multiplied by extracted coefficient
matrix.row_op(k, lambda x, j: simplify(x - matrix[i, j]*coeff))
i += 1
# if there weren't any problems, augmented matrix is now
# in row-echelon form so we can check how many solutions
# there are and extract them using back substitution
if len(syms) == matrix.rows:
# this system is Cramer equivalent so there is
# exactly one solution to this system of equations
k, solutions = i - 1, {}
while k >= 0:
content = matrix[k, m]
# run back-substitution for variables
for j in range(k + 1, m):
content -= matrix[k, j]*solutions[syms[j]]
if do_simplify:
solutions[syms[k]] = simplify(content)
else:
solutions[syms[k]] = content
k -= 1
return solutions
elif len(syms) > matrix.rows:
# this system will have infinite number of solutions
# dependent on exactly len(syms) - i parameters
k, solutions = i - 1, {}
while k >= 0:
content = matrix[k, m]
# run back-substitution for variables
for j in range(k + 1, i):
content -= matrix[k, j]*solutions[syms[j]]
# run back-substitution for parameters
for j in range(i, m):
content -= matrix[k, j]*syms[j]
if do_simplify:
solutions[syms[k]] = simplify(content)
else:
solutions[syms[k]] = content
k -= 1
return solutions
else:
return [] # no solutions
def solve_undetermined_coeffs(equ, coeffs, sym, **flags):
"""Solve equation of a type p(x; a_1, ..., a_k) == q(x) where both
p, q are univariate polynomials and f depends on k parameters.
The result of this functions is a dictionary with symbolic
values of those parameters with respect to coefficients in q.
This functions accepts both Equations class instances and ordinary
SymPy expressions. Specification of parameters and variable is
obligatory for efficiency and simplicity reason.
>>> from sympy import Eq
>>> from sympy.abc import a, b, c, x
>>> from sympy.solvers import solve_undetermined_coeffs
>>> solve_undetermined_coeffs(Eq(2*a*x + a+b, x), [a, b], x)
{a: 1/2, b: -1/2}
>>> solve_undetermined_coeffs(Eq(a*c*x + a+b, x), [a, b], x)
{a: 1/c, b: -1/c}
"""
if isinstance(equ, Equality):
# got equation, so move all the
# terms to the left hand side
equ = equ.lhs - equ.rhs
equ = cancel(equ).as_numer_denom()[0]
system = list(collect(equ.expand(), sym, evaluate=False).values())
if not any(equ.has(sym) for equ in system):
# consecutive powers in the input expressions have
# been successfully collected, so solve remaining
# system using Gaussian elimination algorithm
return solve(system, *coeffs, **flags)
else:
return None # no solutions
def solve_linear_system_LU(matrix, syms):
"""
Solves the augmented matrix system using LUsolve and returns a dictionary
in which solutions are keyed to the symbols of syms *as ordered*.
The matrix must be invertible.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.solvers import solve_linear_system_LU
>>> solve_linear_system_LU(Matrix([
... [1, 2, 0, 1],
... [3, 2, 2, 1],
... [2, 0, 0, 1]]), [x, y, z])
{x: 1/2, y: 1/4, z: -1/2}
See Also
========
sympy.matrices.LUsolve
"""
if matrix.rows != matrix.cols - 1:
raise ValueError("Rows should be equal to columns - 1")
A = matrix[:matrix.rows, :matrix.rows]
b = matrix[:, matrix.cols - 1:]
soln = A.LUsolve(b)
solutions = {}
for i in range(soln.rows):
solutions[syms[i]] = soln[i, 0]
return solutions
def det_perm(M):
"""Return the det(``M``) by using permutations to select factors.
For size larger than 8 the number of permutations becomes prohibitively
large, or if there are no symbols in the matrix, it is better to use the
standard determinant routines, e.g. `M.det()`.
See Also
========
det_minor
det_quick
"""
args = []
s = True
n = M.rows
try:
list = M._mat
except AttributeError:
list = flatten(M.tolist())
for perm in generate_bell(n):
fac = []
idx = 0
for j in perm:
fac.append(list[idx + j])
idx += n
term = Mul(*fac) # disaster with unevaluated Mul -- takes forever for n=7
args.append(term if s else -term)
s = not s
return Add(*args)
def det_minor(M):
"""Return the ``det(M)`` computed from minors without
introducing new nesting in products.
See Also
========
det_perm
det_quick
"""
n = M.rows
if n == 2:
return M[0, 0]*M[1, 1] - M[1, 0]*M[0, 1]
else:
return sum([(1, -1)[i % 2]*Add(*[M[0, i]*d for d in
Add.make_args(det_minor(M.minorMatrix(0, i)))])
if M[0, i] else S.Zero for i in range(n)])
def det_quick(M, method=None):
"""Return ``det(M)`` assuming that either
there are lots of zeros or the size of the matrix
is small. If this assumption is not met, then the normal
Matrix.det function will be used with method = ``method``.
See Also
========
det_minor
det_perm
"""
if any(i.has(Symbol) for i in M):
if M.rows < 8 and all(i.has(Symbol) for i in M):
return det_perm(M)
return det_minor(M)
else:
return M.det(method=method) if method else M.det()
def inv_quick(M):
"""Return the inverse of ``M``, assuming that either
there are lots of zeros or the size of the matrix
is small.
"""
from sympy.matrices import zeros
if any(i.has(Symbol) for i in M):
if all(i.has(Symbol) for i in M):
det = lambda _: det_perm(_)
else:
det = lambda _: det_minor(_)
else:
return M.inv()
n = M.rows
d = det(M)
if d is S.Zero:
raise ValueError("Matrix det == 0; not invertible.")
ret = zeros(n)
s1 = -1
for i in range(n):
s = s1 = -s1
for j in range(n):
di = det(M.minorMatrix(i, j))
ret[j, i] = s*di/d
s = -s
return ret
# these are functions that have multiple inverse values per period
multi_inverses = {
sin: lambda x: (asin(x), S.Pi - asin(x)),
cos: lambda x: (acos(x), 2*S.Pi - acos(x)),
}
def _tsolve(eq, sym, **flags):
"""
Helper for _solve that solves a transcendental equation with respect
to the given symbol. Various equations containing powers and logarithms,
can be solved.
There is currently no guarantee that all solutions will be returned or
that a real solution will be favored over a complex one.
Either a list of potential solutions will be returned or None will be
returned (in the case that no method was known to get a solution
for the equation). All other errors (like the inability to cast an
expression as a Poly) are unhandled.
Examples
========
>>> from sympy import log
>>> from sympy.solvers.solvers import _tsolve as tsolve
>>> from sympy.abc import x
>>> tsolve(3**(2*x + 5) - 4, x)
[-5/2 + log(2)/log(3), (-5*log(3)/2 + log(2) + I*pi)/log(3)]
>>> tsolve(log(x) + 2*x, x)
[LambertW(2)/2]
"""
if 'tsolve_saw' not in flags:
flags['tsolve_saw'] = []
if eq in flags['tsolve_saw']:
return None
else:
flags['tsolve_saw'].append(eq)
rhs, lhs = _invert(eq, sym)
if lhs == sym:
return [rhs]
try:
if lhs.is_Add:
# it's time to try factoring; powdenest is used
# to try get powers in standard form for better factoring
f = factor(powdenest(lhs - rhs))
if f.is_Mul:
return _solve(f, sym, **flags)
if rhs:
f = logcombine(lhs, force=flags.get('force', True))
if f.count(log) != lhs.count(log):
if f.func is log:
return _solve(f.args[0] - exp(rhs), sym, **flags)
return _tsolve(f - rhs, sym)
elif lhs.is_Pow:
if lhs.exp.is_Integer:
if lhs - rhs != eq:
return _solve(lhs - rhs, sym, **flags)
elif sym not in lhs.exp.free_symbols:
return _solve(lhs.base - rhs**(1/lhs.exp), sym, **flags)
elif not rhs and sym in lhs.exp.free_symbols:
# f(x)**g(x) only has solutions where f(x) == 0 and g(x) != 0 at
# the same place
sol_base = _solve(lhs.base, sym, **flags)
if not sol_base:
return sol_base # no solutions to remove so return now
return list(ordered(set(sol_base) - set(
_solve(lhs.exp, sym, **flags))))
elif (rhs is not S.Zero and
lhs.base.is_positive and
lhs.exp.is_real):
return _solve(lhs.exp*log(lhs.base) - log(rhs), sym, **flags)
elif lhs.base == 0 and rhs == 1:
return _solve(lhs.exp, sym, **flags)
elif lhs.is_Mul and rhs.is_positive:
llhs = expand_log(log(lhs))
if llhs.is_Add:
return _solve(llhs - log(rhs), sym, **flags)
elif lhs.is_Function and len(lhs.args) == 1 and lhs.func in multi_inverses:
# sin(x) = 1/3 -> x - asin(1/3) & x - (pi - asin(1/3))
soln = []
for i in multi_inverses[lhs.func](rhs):
soln.extend(_solve(lhs.args[0] - i, sym, **flags))
return list(ordered(soln))
rewrite = lhs.rewrite(exp)
if rewrite != lhs:
return _solve(rewrite - rhs, sym, **flags)
except NotImplementedError:
pass
# maybe it is a lambert pattern
if flags.pop('bivariate', True):
# lambert forms may need some help being recognized, e.g. changing
# 2**(3*x) + x**3*log(2)**3 + 3*x**2*log(2)**2 + 3*x*log(2) + 1
# to 2**(3*x) + (x*log(2) + 1)**3
g = _filtered_gens(eq.as_poly(), sym)
up_or_log = set()
for gi in g:
if gi.func is exp or gi.func is log:
up_or_log.add(gi)
elif gi.is_Pow:
gisimp = powdenest(expand_power_exp(gi))
if gisimp.is_Pow and sym in gisimp.exp.free_symbols:
up_or_log.add(gi)
down = g.difference(up_or_log)
eq_down = expand_log(expand_power_exp(eq)).subs(
dict(list(zip(up_or_log, [0]*len(up_or_log)))))
eq = expand_power_exp(factor(eq_down, deep=True) + (eq - eq_down))
rhs, lhs = _invert(eq, sym)
if lhs.has(sym):
try:
poly = lhs.as_poly()
g = _filtered_gens(poly, sym)
return _solve_lambert(lhs - rhs, sym, g)
except NotImplementedError:
# maybe it's a convoluted function
if len(g) == 2:
try:
gpu = bivariate_type(lhs - rhs, *g)
if gpu is None:
raise NotImplementedError
g, p, u = gpu
flags['bivariate'] = False
inversion = _tsolve(g - u, sym, **flags)
if inversion:
sol = _solve(p, u, **flags)
return list(ordered(set([i.subs(u, s)
for i in inversion for s in sol])))
except NotImplementedError:
pass
else:
pass
if flags.pop('force', True):
flags['force'] = False
pos, reps = posify(lhs - rhs)
for u, s in reps.items():
if s == sym:
break
else:
u = sym
if pos.has(u):
try:
soln = _solve(pos, u, **flags)
return list(ordered([s.subs(reps) for s in soln]))
except NotImplementedError:
pass
else:
pass # here for coverage
return # here for coverage
# TODO: option for calculating J numerically
@conserve_mpmath_dps
def nsolve(*args, **kwargs):
r"""
Solve a nonlinear equation system numerically::
nsolve(f, [args,] x0, modules=['mpmath'], **kwargs)
f is a vector function of symbolic expressions representing the system.
args are the variables. If there is only one variable, this argument can
be omitted.
x0 is a starting vector close to a solution.
Use the modules keyword to specify which modules should be used to
evaluate the function and the Jacobian matrix. Make sure to use a module
that supports matrices. For more information on the syntax, please see the
docstring of lambdify.
Overdetermined systems are supported.
>>> from sympy import Symbol, nsolve
>>> import sympy
>>> import mpmath
>>> mpmath.mp.dps = 15
>>> x1 = Symbol('x1')
>>> x2 = Symbol('x2')
>>> f1 = 3 * x1**2 - 2 * x2**2 - 1
>>> f2 = x1**2 - 2 * x1 + x2**2 + 2 * x2 - 8
>>> print(nsolve((f1, f2), (x1, x2), (-1, 1)))
Matrix([[-1.19287309935246], [1.27844411169911]])
For one-dimensional functions the syntax is simplified:
>>> from sympy import sin, nsolve
>>> from sympy.abc import x
>>> nsolve(sin(x), x, 2)
3.14159265358979
>>> nsolve(sin(x), 2)
3.14159265358979
To solve with higher precision than the default, use the prec argument.
>>> from sympy import cos
>>> nsolve(cos(x) - x, 1)
0.739085133215161
>>> nsolve(cos(x) - x, 1, prec=50)
0.73908513321516064165531208767387340401341175890076
>>> cos(_)
0.73908513321516064165531208767387340401341175890076
mpmath.findroot is used and you can find there more extensive
documentation, especially concerning keyword parameters and
available solvers. Note, however, that functions which are very
steep near the root the verification of the solution may fail. In
this case you should use the flag `verify=False` and
independently verify the solution.
>>> from sympy import cos, cosh
>>> from sympy.abc import i
>>> f = cos(x)*cosh(x) - 1
>>> nsolve(f, 3.14*100)
Traceback (most recent call last):
...
ValueError: Could not find root within given tolerance. (1.39267e+230 > 2.1684e-19)
>>> ans = nsolve(f, 3.14*100, verify=False); ans
312.588469032184
>>> f.subs(x, ans).n(2)
2.1e+121
>>> (f/f.diff(x)).subs(x, ans).n(2)
7.4e-15
One might safely skip the verification if bounds of the root are known
and a bisection method is used:
>>> bounds = lambda i: (3.14*i, 3.14*(i + 1))
>>> nsolve(f, bounds(100), solver='bisect', verify=False)
315.730061685774
Alternatively, a function may be better behaved when the
denominator is ignored. Since this is not always the case, however,
the decision of what function to use is left to the discretion of
the user.
>>> eq = x**2/(1 - x)/(1 - 2*x)**2 - 100
>>> nsolve(eq, 0.46)
Traceback (most recent call last):
...
ValueError: Could not find root within given tolerance. (10000 > 2.1684e-19)
Try another starting point or tweak arguments.
>>> nsolve(eq.as_numer_denom()[0], 0.46)
0.46792545969349058
"""
# there are several other SymPy functions that use method= so
# guard against that here
if 'method' in kwargs:
raise ValueError(filldedent('''
Keyword "method" should not be used in this context. When using
some mpmath solvers directly, the keyword "method" is
used, but when using nsolve (and findroot) the keyword to use is
"solver".'''))
if 'prec' in kwargs:
prec = kwargs.pop('prec')
import mpmath
mpmath.mp.dps = prec
else:
prec = None
# interpret arguments
if len(args) == 3:
f = args[0]
fargs = args[1]
x0 = args[2]
elif len(args) == 2:
f = args[0]
fargs = None
x0 = args[1]
elif len(args) < 2:
raise TypeError('nsolve expected at least 2 arguments, got %i'
% len(args))
else:
raise TypeError('nsolve expected at most 3 arguments, got %i'
% len(args))
modules = kwargs.get('modules', ['mpmath'])
if iterable(f):
f = list(f)
for i, fi in enumerate(f):
if isinstance(fi, Equality):
f[i] = fi.lhs - fi.rhs
f = Matrix(f).T
if not isinstance(f, Matrix):
# assume it's a sympy expression
if isinstance(f, Equality):
f = f.lhs - f.rhs
syms = f.free_symbols
if fargs is None:
fargs = syms.copy().pop()
if not (len(syms) == 1 and (fargs in syms or fargs[0] in syms)):
raise ValueError(filldedent('''
expected a one-dimensional and numerical function'''))
# the function is much better behaved if there is no denominator
# but sending the numerator is left to the user since sometimes
# the function is better behaved when the denominator is present
# e.g., issue 11768
f = lambdify(fargs, f, modules)
return Float(findroot(f, x0, **kwargs))
if len(fargs) > f.cols:
raise NotImplementedError(filldedent('''
need at least as many equations as variables'''))
verbose = kwargs.get('verbose', False)
if verbose:
print('f(x):')
print(f)
# derive Jacobian
J = f.jacobian(fargs)
if verbose:
print('J(x):')
print(J)
# create functions
f = lambdify(fargs, f.T, modules)
J = lambdify(fargs, J, modules)
# solve the system numerically
x = findroot(f, x0, J=J, **kwargs)
return Matrix(x)
def _invert(eq, *symbols, **kwargs):
"""Return tuple (i, d) where ``i`` is independent of ``symbols`` and ``d``
contains symbols. ``i`` and ``d`` are obtained after recursively using
algebraic inversion until an uninvertible ``d`` remains. If there are no
free symbols then ``d`` will be zero. Some (but not necessarily all)
solutions to the expression ``i - d`` will be related to the solutions of
the original expression.
Examples
========
>>> from sympy.solvers.solvers import _invert as invert
>>> from sympy import sqrt, cos
>>> from sympy.abc import x, y
>>> invert(x - 3)
(3, x)
>>> invert(3)
(3, 0)
>>> invert(2*cos(x) - 1)
(1/2, cos(x))
>>> invert(sqrt(x) - 3)
(3, sqrt(x))
>>> invert(sqrt(x) + y, x)
(-y, sqrt(x))
>>> invert(sqrt(x) + y, y)
(-sqrt(x), y)
>>> invert(sqrt(x) + y, x, y)
(0, sqrt(x) + y)
If there is more than one symbol in a power's base and the exponent
is not an Integer, then the principal root will be used for the
inversion:
>>> invert(sqrt(x + y) - 2)
(4, x + y)
>>> invert(sqrt(x + y) - 2)
(4, x + y)
If the exponent is an integer, setting ``integer_power`` to True
will force the principal root to be selected:
>>> invert(x**2 - 4, integer_power=True)
(2, x)
"""
eq = sympify(eq)
free = eq.free_symbols
if not symbols:
symbols = free
if not free & set(symbols):
return eq, S.Zero
dointpow = bool(kwargs.get('integer_power', False))
lhs = eq
rhs = S.Zero
while True:
was = lhs
while True:
indep, dep = lhs.as_independent(*symbols)
# dep + indep == rhs
if lhs.is_Add:
# this indicates we have done it all
if indep is S.Zero:
break
lhs = dep
rhs -= indep
# dep * indep == rhs
else:
# this indicates we have done it all
if indep is S.One:
break
lhs = dep
rhs /= indep
# collect like-terms in symbols
if lhs.is_Add:
terms = {}
for a in lhs.args:
i, d = a.as_independent(*symbols)
terms.setdefault(d, []).append(i)
if any(len(v) > 1 for v in terms.values()):
args = []
for d, i in terms.items():
if len(i) > 1:
args.append(Add(*i)*d)
else:
args.append(i[0]*d)
lhs = Add(*args)
# if it's a two-term Add with rhs = 0 and two powers we can get the
# dependent terms together, e.g. 3*f(x) + 2*g(x) -> f(x)/g(x) = -2/3
if lhs.is_Add and not rhs and len(lhs.args) == 2 and \
not lhs.is_polynomial(*symbols):
a, b = ordered(lhs.args)
ai, ad = a.as_independent(*symbols)
bi, bd = b.as_independent(*symbols)
if any(_ispow(i) for i in (ad, bd)):
a_base, a_exp = ad.as_base_exp()
b_base, b_exp = bd.as_base_exp()
if a_base == b_base:
# a = -b
lhs = powsimp(powdenest(ad/bd))
rhs = -bi/ai
else:
rat = ad/bd
_lhs = powsimp(ad/bd)
if _lhs != rat:
lhs = _lhs
rhs = -bi/ai
if ai*bi is S.NegativeOne:
if all(
isinstance(i, Function) for i in (ad, bd)) and \
ad.func == bd.func and len(ad.args) == len(bd.args):
if len(ad.args) == 1:
lhs = ad.args[0] - bd.args[0]
else:
# should be able to solve
# f(x, y) == f(2, 3) -> x == 2
# f(x, x + y) == f(2, 3) -> x == 2 or x == 3 - y
raise NotImplementedError('equal function with more than 1 argument')
elif lhs.is_Mul and any(_ispow(a) for a in lhs.args):
lhs = powsimp(powdenest(lhs))
if lhs.is_Function:
if hasattr(lhs, 'inverse') and len(lhs.args) == 1:
# -1
# f(x) = g -> x = f (g)
#
# /!\ inverse should not be defined if there are multiple values
# for the function -- these are handled in _tsolve
#
rhs = lhs.inverse()(rhs)
lhs = lhs.args[0]
elif lhs.func is atan2:
y, x = lhs.args
lhs = 2*atan(y/(sqrt(x**2 + y**2) + x))
if rhs and lhs.is_Pow and lhs.exp.is_Integer and lhs.exp < 0:
lhs = 1/lhs
rhs = 1/rhs
# base**a = b -> base = b**(1/a) if
# a is an Integer and dointpow=True (this gives real branch of root)
# a is not an Integer and the equation is multivariate and the
# base has more than 1 symbol in it
# The rationale for this is that right now the multi-system solvers
# doesn't try to resolve generators to see, for example, if the whole
# system is written in terms of sqrt(x + y) so it will just fail, so we
# do that step here.
if lhs.is_Pow and (
lhs.exp.is_Integer and dointpow or not lhs.exp.is_Integer and
len(symbols) > 1 and len(lhs.base.free_symbols & set(symbols)) > 1):
rhs = rhs**(1/lhs.exp)
lhs = lhs.base
if lhs == was:
break
return rhs, lhs
def unrad(eq, *syms, **flags):
""" Remove radicals with symbolic arguments and return (eq, cov),
None or raise an error:
None is returned if there are no radicals to remove.
NotImplementedError is raised if there are radicals and they cannot be
removed or if the relationship between the original symbols and the
change of variable needed to rewrite the system as a polynomial cannot
be solved.
Otherwise the tuple, ``(eq, cov)``, is returned where::
``eq``, ``cov``
``eq`` is an equation without radicals (in the symbol(s) of
interest) whose solutions are a superset of the solutions to the
original expression. ``eq`` might be re-written in terms of a new
variable; the relationship to the original variables is given by
``cov`` which is a list containing ``v`` and ``v**p - b`` where
``p`` is the power needed to clear the radical and ``b`` is the
radical now expressed as a polynomial in the symbols of interest.
For example, for sqrt(2 - x) the tuple would be
``(c, c**2 - 2 + x)``. The solutions of ``eq`` will contain
solutions to the original equation (if there are any).
``syms``
an iterable of symbols which, if provided, will limit the focus of
radical removal: only radicals with one or more of the symbols of
interest will be cleared. All free symbols are used if ``syms`` is not
set.
``flags`` are used internally for communication during recursive calls.
Two options are also recognized::
``take``, when defined, is interpreted as a single-argument function
that returns True if a given Pow should be handled.
Radicals can be removed from an expression if::
* all bases of the radicals are the same; a change of variables is
done in this case.
* if all radicals appear in one term of the expression
* there are only 4 terms with sqrt() factors or there are less than
four terms having sqrt() factors
* there are only two terms with radicals
Examples
========
>>> from sympy.solvers.solvers import unrad
>>> from sympy.abc import x
>>> from sympy import sqrt, Rational, root, real_roots, solve
>>> unrad(sqrt(x)*x**Rational(1, 3) + 2)
(x**5 - 64, [])
>>> unrad(sqrt(x) + root(x + 1, 3))
(x**3 - x**2 - 2*x - 1, [])
>>> eq = sqrt(x) + root(x, 3) - 2
>>> unrad(eq)
(_p**3 + _p**2 - 2, [_p, _p**6 - x])
"""
_inv_error = 'cannot get an analytical solution for the inversion'
uflags = dict(check=False, simplify=False)
def _cov(p, e):
if cov:
# XXX - uncovered
oldp, olde = cov
if Poly(e, p).degree(p) in (1, 2):
cov[:] = [p, olde.subs(oldp, _solve(e, p, **uflags)[0])]
else:
raise NotImplementedError
else:
cov[:] = [p, e]
def _canonical(eq, cov):
if cov:
# change symbol to vanilla so no solutions are eliminated
p, e = cov
rep = {p: Dummy(p.name)}
eq = eq.xreplace(rep)
cov = [p.xreplace(rep), e.xreplace(rep)]
# remove constants and powers of factors since these don't change
# the location of the root; XXX should factor or factor_terms be used?
eq = factor_terms(_mexpand(eq.as_numer_denom()[0], recursive=True), clear=True)
if eq.is_Mul:
args = []
for f in eq.args:
if f.is_number:
continue
if f.is_Pow and _take(f, True):
args.append(f.base)
else:
args.append(f)
eq = Mul(*args) # leave as Mul for more efficient solving
# make the sign canonical
free = eq.free_symbols
if len(free) == 1:
if eq.coeff(free.pop()**degree(eq)).could_extract_minus_sign():
eq = -eq
elif eq.could_extract_minus_sign():
eq = -eq
return eq, cov
def _Q(pow):
# return leading Rational of denominator of Pow's exponent
c = pow.as_base_exp()[1].as_coeff_Mul()[0]
if not c.is_Rational:
return S.One
return c.q
# define the _take method that will determine whether a term is of interest
def _take(d, take_int_pow):
# return True if coefficient of any factor's exponent's den is not 1
for pow in Mul.make_args(d):
if not (pow.is_Symbol or pow.is_Pow):
continue
b, e = pow.as_base_exp()
if not b.has(*syms):
continue
if not take_int_pow and _Q(pow) == 1:
continue
free = pow.free_symbols
if free.intersection(syms):
return True
return False
_take = flags.setdefault('_take', _take)
cov, nwas, rpt = [flags.setdefault(k, v) for k, v in
sorted(dict(cov=[], n=None, rpt=0).items())]
# preconditioning
eq = powdenest(factor_terms(eq, radical=True, clear=True))
eq, d = eq.as_numer_denom()
eq = _mexpand(eq, recursive=True)
if eq.is_number:
return
syms = set(syms) or eq.free_symbols
poly = eq.as_poly()
gens = [g for g in poly.gens if _take(g, True)]
if not gens:
return
# check for trivial case
# - already a polynomial in integer powers
if all(_Q(g) == 1 for g in gens):
return
# - an exponent has a symbol of interest (don't handle)
if any(g.as_base_exp()[1].has(*syms) for g in gens):
return
def _rads_bases_lcm(poly):
# if all the bases are the same or all the radicals are in one
# term, `lcm` will be the lcm of the denominators of the
# exponents of the radicals
lcm = 1
rads = set()
bases = set()
for g in poly.gens:
if not _take(g, False):
continue
q = _Q(g)
if q != 1:
rads.add(g)
lcm = ilcm(lcm, q)
bases.add(g.base)
return rads, bases, lcm
rads, bases, lcm = _rads_bases_lcm(poly)
if not rads:
return
covsym = Dummy('p', nonnegative=True)
# only keep in syms symbols that actually appear in radicals;
# and update gens
newsyms = set()
for r in rads:
newsyms.update(syms & r.free_symbols)
if newsyms != syms:
syms = newsyms
gens = [g for g in gens if g.free_symbols & syms]
# get terms together that have common generators
drad = dict(list(zip(rads, list(range(len(rads))))))
rterms = {(): []}
args = Add.make_args(poly.as_expr())
for t in args:
if _take(t, False):
common = set(t.as_poly().gens).intersection(rads)
key = tuple(sorted([drad[i] for i in common]))
else:
key = ()
rterms.setdefault(key, []).append(t)
others = Add(*rterms.pop(()))
rterms = [Add(*rterms[k]) for k in rterms.keys()]
# the output will depend on the order terms are processed, so
# make it canonical quickly
rterms = list(reversed(list(ordered(rterms))))
ok = False # we don't have a solution yet
depth = sqrt_depth(eq)
if len(rterms) == 1 and not (rterms[0].is_Add and lcm > 2):
eq = rterms[0]**lcm - ((-others)**lcm)
ok = True
else:
if len(rterms) == 1 and rterms[0].is_Add:
rterms = list(rterms[0].args)
if len(bases) == 1:
b = bases.pop()
if len(syms) > 1:
free = b.free_symbols
x = {g for g in gens if g.is_Symbol} & free
if not x:
x = free
x = ordered(x)
else:
x = syms
x = list(x)[0]
try:
inv = _solve(covsym**lcm - b, x, **uflags)
if not inv:
raise NotImplementedError
eq = poly.as_expr().subs(b, covsym**lcm).subs(x, inv[0])
_cov(covsym, covsym**lcm - b)
return _canonical(eq, cov)
except NotImplementedError:
pass
else:
# no longer consider integer powers as generators
gens = [g for g in gens if _Q(g) != 1]
if len(rterms) == 2:
if not others:
eq = rterms[0]**lcm - (-rterms[1])**lcm
ok = True
elif not log(lcm, 2).is_Integer:
# the lcm-is-power-of-two case is handled below
r0, r1 = rterms
if flags.get('_reverse', False):
r1, r0 = r0, r1
i0 = _rads0, _bases0, lcm0 = _rads_bases_lcm(r0.as_poly())
i1 = _rads1, _bases1, lcm1 = _rads_bases_lcm(r1.as_poly())
for reverse in range(2):
if reverse:
i0, i1 = i1, i0
r0, r1 = r1, r0
_rads1, _, lcm1 = i1
_rads1 = Mul(*_rads1)
t1 = _rads1**lcm1
c = covsym**lcm1 - t1
for x in syms:
try:
sol = _solve(c, x, **uflags)
if not sol:
raise NotImplementedError
neweq = r0.subs(x, sol[0]) + covsym*r1/_rads1 + \
others
tmp = unrad(neweq, covsym)
if tmp:
eq, newcov = tmp
if newcov:
newp, newc = newcov
_cov(newp, c.subs(covsym,
_solve(newc, covsym, **uflags)[0]))
else:
_cov(covsym, c)
else:
eq = neweq
_cov(covsym, c)
ok = True
break
except NotImplementedError:
if reverse:
raise NotImplementedError(
'no successful change of variable found')
else:
pass
if ok:
break
elif len(rterms) == 3:
# two cube roots and another with order less than 5
# (so an analytical solution can be found) or a base
# that matches one of the cube root bases
info = [_rads_bases_lcm(i.as_poly()) for i in rterms]
RAD = 0
BASES = 1
LCM = 2
if info[0][LCM] != 3:
info.append(info.pop(0))
rterms.append(rterms.pop(0))
elif info[1][LCM] != 3:
info.append(info.pop(1))
rterms.append(rterms.pop(1))
if info[0][LCM] == info[1][LCM] == 3:
if info[1][BASES] != info[2][BASES]:
info[0], info[1] = info[1], info[0]
rterms[0], rterms[1] = rterms[1], rterms[0]
if info[1][BASES] == info[2][BASES]:
eq = rterms[0]**3 + (rterms[1] + rterms[2] + others)**3
ok = True
elif info[2][LCM] < 5:
# a*root(A, 3) + b*root(B, 3) + others = c
a, b, c, d, A, B = [Dummy(i) for i in 'abcdAB']
# zz represents the unraded expression into which the
# specifics for this case are substituted
zz = (c - d)*(A**3*a**9 + 3*A**2*B*a**6*b**3 -
3*A**2*a**6*c**3 + 9*A**2*a**6*c**2*d - 9*A**2*a**6*c*d**2 +
3*A**2*a**6*d**3 + 3*A*B**2*a**3*b**6 + 21*A*B*a**3*b**3*c**3 -
63*A*B*a**3*b**3*c**2*d + 63*A*B*a**3*b**3*c*d**2 -
21*A*B*a**3*b**3*d**3 + 3*A*a**3*c**6 - 18*A*a**3*c**5*d +
45*A*a**3*c**4*d**2 - 60*A*a**3*c**3*d**3 + 45*A*a**3*c**2*d**4 -
18*A*a**3*c*d**5 + 3*A*a**3*d**6 + B**3*b**9 - 3*B**2*b**6*c**3 +
9*B**2*b**6*c**2*d - 9*B**2*b**6*c*d**2 + 3*B**2*b**6*d**3 +
3*B*b**3*c**6 - 18*B*b**3*c**5*d + 45*B*b**3*c**4*d**2 -
60*B*b**3*c**3*d**3 + 45*B*b**3*c**2*d**4 - 18*B*b**3*c*d**5 +
3*B*b**3*d**6 - c**9 + 9*c**8*d - 36*c**7*d**2 + 84*c**6*d**3 -
126*c**5*d**4 + 126*c**4*d**5 - 84*c**3*d**6 + 36*c**2*d**7 -
9*c*d**8 + d**9)
def _t(i):
b = Mul(*info[i][RAD])
return cancel(rterms[i]/b), Mul(*info[i][BASES])
aa, AA = _t(0)
bb, BB = _t(1)
cc = -rterms[2]
dd = others
eq = zz.xreplace(dict(zip(
(a, A, b, B, c, d),
(aa, AA, bb, BB, cc, dd))))
ok = True
# handle power-of-2 cases
if not ok:
if log(lcm, 2).is_Integer and (not others and
len(rterms) == 4 or len(rterms) < 4):
def _norm2(a, b):
return a**2 + b**2 + 2*a*b
if len(rterms) == 4:
# (r0+r1)**2 - (r2+r3)**2
r0, r1, r2, r3 = rterms
eq = _norm2(r0, r1) - _norm2(r2, r3)
ok = True
elif len(rterms) == 3:
# (r1+r2)**2 - (r0+others)**2
r0, r1, r2 = rterms
eq = _norm2(r1, r2) - _norm2(r0, others)
ok = True
elif len(rterms) == 2:
# r0**2 - (r1+others)**2
r0, r1 = rterms
eq = r0**2 - _norm2(r1, others)
ok = True
new_depth = sqrt_depth(eq) if ok else depth
rpt += 1 # XXX how many repeats with others unchanging is enough?
if not ok or (
nwas is not None and len(rterms) == nwas and
new_depth is not None and new_depth == depth and
rpt > 3):
raise NotImplementedError('Cannot remove all radicals')
flags.update(dict(cov=cov, n=len(rterms), rpt=rpt))
neq = unrad(eq, *syms, **flags)
if neq:
eq, cov = neq
eq, cov = _canonical(eq, cov)
return eq, cov
from sympy.solvers.bivariate import (
bivariate_type, _solve_lambert, _filtered_gens)
|
jaimahajan1997/sympy
|
sympy/solvers/solvers.py
|
Python
|
bsd-3-clause
| 123,374
|
[
"Gaussian"
] |
97e38f46cae7f0f43950e661cf113f3785b14f37172b7c4dc67b9367a4bf43cc
|
#! /usr/bin/python2
import subprocess
import sys
import os
import time
from subprocess import PIPE
import socket
default_params = ''
output_file_prefix = 'output'
if len(sys.argv) > 1:
output_file_prefix = sys.argv[1]
#
# SCENARIO
#
# 0: radial dam break
# 1: gaussian
# 2: balanced steady state u
# 3: balanced steady state v
# 4: diamond initial condition
# 5: waves
#default_params += ' -s 5'
default_params += ' --initial-freq-x-mul=2.0'
default_params += ' --initial-freq-y-mul=1.0'
scenario_name = "SinCos Waves"
# FD/Spectral time stepping
run_method_0 = True
# rexi
run_method_rexi = False
# rexi par
run_method_rexipar = True
timeout = "02:00"
curdir_name = os.getcwd()
print ("Current working directory: "+curdir_name)
#os.chdir('../../../')
#
# run for 1 seconds
#
max_time = 50
#
# time step size for coarse time steps
#
dt = 5.0
#
# order of time step for RK
# Use order 4 to make time errors very small to make the spatial error dominate
#
timestep_order = 4
cfl=0.3
print "Max simulation time: "+str(max_time)
print "Time step size for REXI time step: "+str(dt)
print "Time step order: "+str(timestep_order)
#
# default params
#
default_params += ' -f 1 -g 1 -H 1 -X 1 -Y 1 --compute-error 1 -t '+str(max_time)
# Use higher-order time stepping?
default_params += ' -R '+str(timestep_order)
###########################
# threads
###########################
# 24 threads per node on Hartree center
#T_list = [24]
# Use only single-threaded test (Parallelization-in-time only)
#thread_list = [1, 2, 4, 8, 12, 24]
thread_list = [24]
###########################
# MPI RANKS
###########################
#mpi_ranks = [1, 2, 4, 8, 16, 32, 64, 128, 256]
mpi_ranks = [1, 2, 4, 8, 16, 32, 64, 128, 170, 256, 512, 768, 1024, 1536, 2048, 4096]
mpi_ranks = [1]
# Maximum of total threads (MPI ranks x threads)
max_total_threads = 4*1024
###########################
# resolutions
###########################
res_list = [32, 64, 128]
###########################
# M REXI sampling points
###########################
M_list = []
m = 8
while m < 2000:
M_list.append(m)
m *= 2;
#M_list = [64, 128, 256, 512, 1024, 2048, 2048*4, 2048*8, 2048*16, 2048*32, 2048*64]
M_list = [32, 64, 128, 256, 512]
M_list = [8, 16, 32, 64, 128, 256, 512, 1024, 2048, 2048*2, 2048*4, 2048*8, 2048*16]
#M_list = [32, 64, 128, 256, 512, 1024, 1024*4, 1024*16, 1024*32]
###########################
# MEM ALLOC
###########################
A_list = [0, 1, 2]
A_list = [1]
###########################
# HYPER VISCOSITY
###########################
hyperviscosity = {}
# http://math.boisestate.edu/~wright/research/FlyerEtAl2012.pdf
# not necessary for these short-range runs
for n in res_list:
hyperviscosity[n] = 4.*pow(float(n), float(-4))
hyperviscosity[n] = 0
comp_spec='scons --compiler=intel --sweet-mpi=enable --program=swe_rexi --plane-spectral-space=enable --libfft=enable --plane-spectral-dealiasing=disable --mode=release'
comp_cart='scons --compiler=intel --sweet-mpi=enable --program=swe_rexi --plane-spectral-space=disable --libfft=enable --plane-spectral-dealiasing=disable --mode=release'
comp_rexi='scons --compiler=intel --sweet-mpi=enable --program=swe_rexi --plane-spectral-space=disable --libfft=enable --rexi-parallel-sum=disable --plane-spectral-dealiasing=disable --mode=release'
comp_rexi_par='scons --compiler=intel --sweet-mpi=enable --program=swe_rexi --plane-spectral-space=disable --libfft=enable --rexi-parallel-sum=enable --plane-spectral-dealiasing=disable --mode=release'
if False:
tests = []
else:
# short description, binary, parameters, title
tests = [
# [ 'nr_fd_spec_agrid', ' -S 0 --timestepping-mode 0 --staggering 0 -C '+str(cfl), 'Finite differences in Fourier space, A-grid', comp_spec, 'nr_fd_spec_agrid' ],
# [ 'nr_fd_cart_agrid', ' -S 0 --timestepping-mode 0 --staggering 0 -C '+str(cfl), 'Finite differences in Cartesian space, A-grid', comp_cart, 'nr_fd_cart_agrid' ],
# [ 'nr_fd_spec_cgrid', ' -S 0 --timestepping-mode 0 --staggering 1 -C '+str(cfl), 'Finite differences in Fourier space, C-grid', comp_spec, 'nr_fd_spec_cgrid' ],
[ 'nr_fd_cart_cgrid', ' -S 0 --timestepping-mode 0 --staggering 1 -C '+str(cfl), 'Finite differences in Cartesian space, C-grid', comp_cart, 'nr_fd_cart_cgrid' ],
# [ 'nr_spec_spec_agrid', ' -S 1 --timestepping-mode 0 --staggering 0 -C '+str(cfl), 'Spectral derivatives in Fourier space, A-grid', comp_spec, 'nr_spec_spec_agrid' ],
]
# add rexi tests
for m in M_list:
# tests.append(['rexi_m'+str(m).zfill(6), ' -S 1 --use-specdiff-for-complex-array 1 --rexi-h 0.2 --timestepping-mode 1 --staggering 0 --rexi-m='+str(m)+' -C '+str(-dt), 'REXI M='+str(m), comp_rexi, 'rexi_m'])
# tests.append(['rexi_par_m'+str(m).zfill(6), ' -S 1 --use-specdiff-for-complex-array 1 --rexi-h 0.2 --timestepping-mode 1 --staggering 0 --rexi-m='+str(m)+' -C '+str(-dt), 'REXI PAR M='+str(m), comp_rexi_par, 'rexi_par_m'])
# tests.append(['rexi_fd_m'+str(m).zfill(4), ' -S 0 --use-specdiff-for-complex-array 0 --rexi-h 0.8 --timestepping-mode 1 --staggering 0 --rexi-m='+str(m)+' -C '+str(-dt), 'REXI FD M='+str(m), comp_rexi, 'rexi_fd_m'])
tests.append(['rexi_fd_par_m'+str(m).zfill(4), ' -S 0 --use-specdiff-for-complex-array 0 --rexi-h 0.8 --timestepping-mode 1 --staggering 0 --rexi-m='+str(m)+' -C '+str(-dt), 'REXI PAR FD M='+str(m), comp_rexi_par, 'rexi_fd_par_m'])
|
schreiberx/sweet
|
archive/benchmarks_plane/rexi_tests_stfc/test_nxq/params.py
|
Python
|
mit
| 5,377
|
[
"Gaussian"
] |
65519e408684877a4b6656f6a1013f184964f77a5477fd99741b0f4959504d26
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter construction support.
This module contains a base class for all converters, as well as supporting
structures. These structures are referred to as contexts.
The class hierarchy is as follows:
<your converter>
[extends] converter.Base
[extends] transformer.Base
[extends] gast.nodeTransformer
[uses] transfomer.SourceInfo
[uses] converter.EntityContext
[uses] converter.ProgramContext
[uses] transfomer.SourceInfo
converter.Base is a specialization of transformer.Base for AutoGraph. It's a
very lightweight subclass that adds a `ctx` attribute holding the corresponding
EntityContext object (see below). Note that converters are not reusable, and
`visit` will raise an error if called more than once.
converter.EntityContext contains mutable state associated with an entity that
the converter processes.
converter.ProgramContext contains mutable state across related entities. For
example, when converting several functions that call one another, the
ProgramContext should be shared across these entities.
Below is the overall flow at conversion:
program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)
while <program_ctx has more entities to convert>:
entity, source_info = <get next entity from program_ctx>
entity_ctx = EntityContext(program_ctx, source_info)
for <each ConverterClass>:
converter = ConverterClass(entity_ctx)
# May update entity_ctx and program_ctx
entity = converter.visit(entity)
<add entity's dependencies to program_ctx>
Note that pyct contains a small number of transformers used for static analysis.
These implement transformer.Base, rather than converter.Base, to avoid a
dependency on AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.util.tf_export import tf_export
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
# TODO(mdan): Add a test specific to this converter.
@tf_export('autograph.experimental.Feature')
class Feature(enum.Enum):
"""This enumeration represents optional conversion options.
These conversion options are experimental. They are subject to change without
notice and offer no guarantees.
_Example Usage_
```python
optionals= tf.autograph.experimental.Feature.EQUALITY_OPERATORS
@tf.function(experimental_autograph_options=optionals)
def f(i):
if i == 0: # EQUALITY_OPERATORS allows the use of == here.
tf.print('i is zero')
```
Attributes:
ALL: Enable all features.
AUTO_CONTROL_DEPS: Insert of control dependencies in the generated code.
ASSERT_STATEMENTS: Convert Tensor-dependent assert statements to tf.Assert.
BUILTIN_FUNCTIONS: Convert builtin functions applied to Tensors to
their TF counterparts.
EQUALITY_OPERATORS: Whether to convert the comparison operators, like
equality. This is soon to be deprecated as support is being added to the
Tensor class.
LISTS: Convert list idioms, like initializers, slices, append, etc.
NAME_SCOPES: Insert name scopes that name ops according to context, like the
function they were defined in.
"""
ALL = 'ALL'
AUTO_CONTROL_DEPS = 'AUTO_CONTROL_DEPS'
ASSERT_STATEMENTS = 'ASSERT_STATEMENTS'
BUILTIN_FUNCTIONS = 'BUILTIN_FUNCTIONS'
EQUALITY_OPERATORS = 'EQUALITY_OPERATORS'
LISTS = 'LISTS'
NAME_SCOPES = 'NAME_SCOPES'
@classmethod
def all(cls):
"""Returns a tuple that enables all options."""
return tuple(cls.__members__.values())
@classmethod
def all_but(cls, exclude):
"""Returns a tuple that enables all but the excluded options."""
if not isinstance(exclude, (list, tuple, set)):
exclude = (exclude,)
return tuple(set(cls.all()) - set(exclude) - {cls.ALL})
STANDARD_OPTIONS = None # Forward definition.
class ConversionOptions(object):
"""Immutable container for global conversion flags.
Attributes:
recursive: bool, whether to recursively convert any user functions or
classes that the converted function may use.
user_requested: bool, whether the conversion was explicitly requested by
the user, as opposed to being performed as a result of other logic. This
value always auto-resets resets to False in child conversions.
optional_features: Union[Feature, Set[Feature]], controls the use of
optional features in the conversion process. See Feature for available
options.
"""
def __init__(self,
recursive=False,
user_requested=False,
internal_convert_user_code=True,
optional_features=Feature.ALL):
self.recursive = recursive
self.user_requested = user_requested
# TODO(mdan): Rename to conversion_recursion_depth?
self.internal_convert_user_code = internal_convert_user_code
if optional_features is None:
optional_features = ()
elif isinstance(optional_features, Feature):
optional_features = (optional_features,)
optional_features = frozenset(optional_features)
self.optional_features = optional_features
def as_tuple(self):
return (self.recursive, self.user_requested,
self.internal_convert_user_code, self.optional_features)
def __hash__(self):
return hash(self.as_tuple())
def __eq__(self, other):
assert isinstance(other, ConversionOptions)
return self.as_tuple() == other.as_tuple()
def __str__(self):
return 'ConversionOptions[{}]'
def uses(self, feature):
return (Feature.ALL in self.optional_features or
feature in self.optional_features)
def call_options(self):
"""Returns the corresponding options to be used for recursive conversion."""
return ConversionOptions(
recursive=self.recursive,
user_requested=False,
internal_convert_user_code=self.recursive,
optional_features=self.optional_features)
def to_ast(self):
"""Returns a representation of this object as an AST node.
The AST node encodes a constructor that would create an object with the
same contents.
Returns:
ast.Node
"""
if self == STANDARD_OPTIONS:
return parser.parse_expression('ag__.STD')
template = """
ag__.ConversionOptions(
recursive=recursive_val,
user_requested=user_requested_val,
optional_features=optional_features_val,
internal_convert_user_code=internal_convert_user_code_val)
"""
def list_of_features(values):
return parser.parse_expression('({})'.format(', '.join(
'ag__.{}'.format(str(v)) for v in values)))
expr_ast = templates.replace(
template,
recursive_val=parser.parse_expression(str(self.recursive)),
user_requested_val=parser.parse_expression(str(self.user_requested)),
internal_convert_user_code_val=parser.parse_expression(
str(self.internal_convert_user_code)),
optional_features_val=list_of_features(self.optional_features))
return expr_ast[0].value
STANDARD_OPTIONS = ConversionOptions(
recursive=True,
user_requested=False,
internal_convert_user_code=True,
optional_features=None)
class ProgramContext(
collections.namedtuple('ProgramContext', ('options', 'autograph_module'))):
"""ProgramContext keeps track of converting function hierarchies.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
options: ConversionOptions
autograph_module: Module, a reference to the autograph module. This needs to
be specified by the caller to avoid circular dependencies.
"""
pass
class EntityContext(transformer.Context):
"""Tracks the conversion of a single entity.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
namer: Namer
info: transformer.EntityInfo
program: ProgramContext,
targe_name: Text
"""
def __init__(self, namer, entity_info, program_ctx, target_name=None):
super(EntityContext, self).__init__(entity_info)
self.namer = namer
self.program = program_ctx
self.target_name = target_name
class Base(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx)
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive argument for a symbol.
See lang/directives.py for details on directives.
Example:
# Given a directive in the code:
ag.foo_directive(bar, baz=1)
# One can write for an AST node Name(id='bar'):
get_definition_directive(node, ag.foo_directive, 'baz')
Args:
node: ast.AST, the node representing the symbol for which the directive
argument is needed.
directive: Callable[..., Any], the directive to search.
arg: str, the directive argument to return.
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
arg_values_found = []
for def_ in defs:
if (directive in def_.directives and arg in def_.directives[directive]):
arg_values_found.append(def_.directives[directive][arg])
if not arg_values_found:
return default
if len(arg_values_found) == 1:
return arg_values_found[0]
# If multiple annotations reach the symbol, they must all match. If they do,
# return any of them.
first_value = arg_values_found[0]
for other_value in arg_values_found[1:]:
if not ast_util.matches(first_value, other_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError(
'%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg, parser.unparse(other_value).strip(),
parser.unparse(first_value).strip()))
return first_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
class AnnotatedDef(reaching_definitions.Definition):
def __init__(self):
super(AnnotatedDef, self).__init__()
self.directives = {}
def standard_analysis(node, context, is_initial=False):
"""Performs a complete static analysis of the given code.
Args:
node: ast.AST
context: converter.EntityContext
is_initial: bool, whether this is the initial analysis done on the input
source code
Returns:
ast.AST, same as node, with the static analysis annotations added
"""
# TODO(mdan): Clear static analysis here.
# TODO(mdan): Consider not running all analyses every time.
# TODO(mdan): Don't return a node because it's modified by reference.
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, context, None)
node = reaching_definitions.resolve(node, context, graphs, AnnotatedDef)
node = liveness.resolve(node, context, graphs)
if is_initial:
anno.dup(
node,
{
anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
},
)
return node
def apply_(node, context, converter_module):
"""Applies a converter to an AST.
Args:
node: ast.AST
context: converter.EntityContext
converter_module: converter.Base
Returns:
ast.AST, the result of applying converter to node
"""
node = standard_analysis(node, context)
node = converter_module.transform(node, context)
return node
|
jhseu/tensorflow
|
tensorflow/python/autograph/core/converter.py
|
Python
|
apache-2.0
| 13,320
|
[
"VisIt"
] |
e22dc79a355f2461bbb8a889476065d1ff01ad708446fa2b76746d5e67551017
|
# -*- coding: utf-8 -*-
# YAFF is yet another force-field code.
# Copyright (C) 2011 Toon Verstraelen <Toon.Verstraelen@UGent.be>,
# Louis Vanduyfhuys <Louis.Vanduyfhuys@UGent.be>, Center for Molecular Modeling
# (CMM), Ghent University, Ghent, Belgium; all rights reserved unless otherwise
# stated.
#
# This file is part of YAFF.
#
# YAFF is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# YAFF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
"""Short-range scaling of pairwise interactions
The ``Scalings`` class describe scaling or exclusion of short-range
non-bonding pairwise interactions for atom pairs that are involved in
covalent energy terms.
A ``Scaling`` object can be attached to any ``ForcePartPair`` class and,
as a special case, also to the ``ForcePartEwaldCorrection``. A ``Scaling``
object describes which 1-2 (scale1), 1-3 (scale2) and 1-4 (scale3) pairs
should have their interactions scaled down or excluded (scaling=0.0).
In order to avoid ambiguities, each scaled pair should only correspond to
one unique bond path to the periodic image. If this is not the case an
``AssertionError`` is raised to inform the user that he/she should switch to
a larger supercell. Yaff can simply not handle such cases correctly. (The
same problem may be present in other codes, but we do not know to what extent
they handle such cases gracefully.)
"""
from __future__ import division
import numpy as np
from yaff.log import log
from yaff.pes.ext import scaling_dtype
__all__ = ['Scalings', 'iter_paths']
class Scalings(object):
'''Describes the scaling of short-range pairwise interactions for atom pairs
involved in covalent energy terms.
'''
def __init__(self, system, scale1=0.0, scale2=0.0, scale3=1.0, scale4=1.0):
'''
**Arguments:**
system
The system to which the scaling rules apply.
scale1, scale2, scale3
The scaling of the 1-2. 1-3 and 1-4 pairs, respectively.
'''
self.items = []
if scale1 < -1 or scale1 > 1:
raise ValueError('scale1 must be in the range [-1,1].')
if scale2 < -1 or scale2 > 1:
raise ValueError('scale2 must be in the range [-1,1].')
if scale3 < -1 or scale3 > 1:
raise ValueError('scale3 must be in the range [-1,1].')
if scale4 < -1 or scale4 > 1:
raise ValueError('scale4 must be in the range [-1,1].')
self.scale1 = scale1
self.scale2 = scale2
self.scale3 = scale3
self.scale4 = scale4
stab = []
for i0 in range(system.natom):
if scale1 < 1.0:
for i1 in system.neighs1[i0]:
if i0 > i1:
stab.append((i0, i1, scale1, 1))
if scale2 < 1.0:
for i2 in system.neighs2[i0]:
if i0 > i2:
stab.append((i0, i2, scale2, 2))
if scale3 < 1.0:
for i3 in system.neighs3[i0]:
if i0 > i3:
stab.append((i0, i3, scale3, 3))
if scale4 < 1.0:
for i4 in system.neighs4[i0]:
if i0 > i4:
stab.append((i0, i4, scale4, 4))
stab.sort()
self.stab = np.array(stab, dtype=scaling_dtype)
self.check_mic(system)
def check_mic(self, system):
'''Check if each scale2 and scale3 are uniquely defined.
**Arguments:**
system
An instance of the system class, i.e. the one that is used to
create this scaling object.
This check is done by constructing for each scaled pair, all possible
bond paths between the two atoms. For each path, the bond vectors
(after applying the minimum image convention) are added. If for a
given pair, these sums of bond vectors differ between all possible
paths, the differences are expanded in cell vectors which can be used
to construct a proper supercell in which scale2 and scale3 pairs are
all uniquely defined.
'''
if system.cell.nvec == 0:
return
troubles = False
with log.section('SCALING'):
for i0, i1, scale, nbond in self.stab:
if nbond == 1:
continue
all_deltas = []
paths = []
for path in iter_paths(system, i0, i1, nbond):
delta_total = 0
for j0 in range(nbond):
j1 = j0 + 1
delta = system.pos[path[j0]] - system.pos[path[j1]]
system.cell.mic(delta)
delta_total += delta
all_deltas.append(delta_total)
paths.append(path)
all_deltas = np.array(all_deltas)
if abs(all_deltas.mean(axis=0) - all_deltas).max() > 1e-10:
troubles = True
if log.do_warning:
log.warn('Troublesome pair scaling detected.')
log('The following bond paths connect the same pair of '
'atoms, yet the relative vectors are different.')
for ipath in range(len(paths)):
log('%2i %27s %10s %10s %10s' % (
ipath,
','.join(str(index) for index in paths[ipath]),
log.length(all_deltas[ipath,0]),
log.length(all_deltas[ipath,1]),
log.length(all_deltas[ipath,2]),
))
log('Differences between relative vectors in fractional '
'coordinates:')
for ipath0 in range(1, len(paths)):
for ipath1 in range(ipath0):
diff = all_deltas[ipath0] - all_deltas[ipath1]
diff_frac = np.dot(system.cell.gvecs, diff)
log('%2i %2i %10.4f %10.4f %10.4f' % (
ipath0, ipath1,
diff_frac[0], diff_frac[1], diff_frac[2]
))
log.blank()
if troubles:
raise AssertionError('Due to the small spacing between some crystal planes, the scaling of non-bonding interactions will not work properly. Use a supercell to avoid this problem.')
def iter_paths(system, ib, ie, nbond):
"""Iterates over all paths between atoms ``ib`` and ``ie`` with the given
number of bonds
**Arguments:**
system
The system that contains the bond graph
ib, ie
The indexes of the beginning and end atoms.
nbond
The length of the path, in number of bonds.
"""
if nbond == 1:
if ie in system.neighs1[ib]:
yield (ib, ie)
else:
for i1 in system.neighs1[ib]:
for path in iter_paths(system, i1, ie, nbond-1):
if ib not in path:
yield (ib,) + path
|
molmod/yaff
|
yaff/pes/scaling.py
|
Python
|
gpl-3.0
| 7,782
|
[
"CRYSTAL"
] |
c58e6a8a9dd61af53570bf45483a4ea27baaed525391d006af3b3bf67b614bd5
|
import functools
import inspect
import sys
import warnings
from collections import defaultdict
from collections import deque
from collections import OrderedDict
from typing import Dict
from typing import List
from typing import Tuple
import attr
import py
import _pytest
from _pytest._code.code import FormattedExcinfo
from _pytest._code.code import TerminalRepr
from _pytest._code.source import getfslineno
from _pytest._io import TerminalWriter
from _pytest.compat import _format_args
from _pytest.compat import _PytestWrapper
from _pytest.compat import get_real_func
from _pytest.compat import get_real_method
from _pytest.compat import getfuncargnames
from _pytest.compat import getimfunc
from _pytest.compat import getlocation
from _pytest.compat import is_generator
from _pytest.compat import NOTSET
from _pytest.compat import safe_getattr
from _pytest.compat import TYPE_CHECKING
from _pytest.deprecated import FIXTURE_POSITIONAL_ARGUMENTS
from _pytest.deprecated import FUNCARGNAMES
from _pytest.mark import ParameterSet
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
if TYPE_CHECKING:
from typing import Type
from _pytest import nodes
from _pytest.main import Session
@attr.s(frozen=True)
class PseudoFixtureDef:
cached_result = attr.ib()
scope = attr.ib()
def pytest_sessionstart(session: "Session"):
import _pytest.python
import _pytest.nodes
scopename2class.update(
{
"package": _pytest.python.Package,
"class": _pytest.python.Class,
"module": _pytest.python.Module,
"function": _pytest.nodes.Item,
"session": _pytest.main.Session,
}
)
session._fixturemanager = FixtureManager(session)
scopename2class = {} # type: Dict[str, Type[nodes.Node]]
scope2props = dict(session=()) # type: Dict[str, Tuple[str, ...]]
scope2props["package"] = ("fspath",)
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance",)
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError(
"{} not available in {}-scoped context".format(scopename, self.scope)
)
return property(provide, None, None, func.__doc__)
return decoratescope
def get_scope_package(node, fixturedef):
import pytest
cls = pytest.Package
current = node
fixture_package_name = "{}/{}".format(fixturedef.baseid, "__init__.py")
while current and (
type(current) is not cls or fixture_package_name != current.nodeid
):
current = current.parent
if current is None:
return node.session
return current
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
raise ValueError("unknown scope")
return node.getparent(cls)
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname, scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, _pytest.python.Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(
fixturemanager,
"",
argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist,
False,
False,
)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except TEST_OUTCOME:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indices.items() is random order of argnames. Need to
# sort this so that different calls to
# get_parametrized_fixture_keys will be deterministic.
for argname, param_index in sorted(cs.indices.items()):
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # package
key = (argname, param_index, item.fspath.dirpath())
elif scopenum == 2: # module
key = (argname, param_index, item.fspath)
elif scopenum == 3: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
items_by_argkey = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
items_by_argkey[scopenum] = item_d = defaultdict(deque)
for item in items:
keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
for key in keys:
item_d[key].append(item)
items = OrderedDict.fromkeys(items)
return list(reorder_items_atscope(items, argkeys_cache, items_by_argkey, 0))
def fix_cache_order(item, argkeys_cache, items_by_argkey):
for scopenum in range(0, scopenum_function):
for key in argkeys_cache[scopenum].get(item, []):
items_by_argkey[scopenum][key].appendleft(item)
def reorder_items_atscope(items, argkeys_cache, items_by_argkey, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
ignore = set()
items_deque = deque(items)
items_done = OrderedDict()
scoped_items_by_argkey = items_by_argkey[scopenum]
scoped_argkeys_cache = argkeys_cache[scopenum]
while items_deque:
no_argkey_group = OrderedDict()
slicing_argkey = None
while items_deque:
item = items_deque.popleft()
if item in items_done or item in no_argkey_group:
continue
argkeys = OrderedDict.fromkeys(
k for k in scoped_argkeys_cache.get(item, []) if k not in ignore
)
if not argkeys:
no_argkey_group[item] = None
else:
slicing_argkey, _ = argkeys.popitem()
# we don't have to remove relevant items from later in the deque because they'll just be ignored
matching_items = [
i for i in scoped_items_by_argkey[slicing_argkey] if i in items
]
for i in reversed(matching_items):
fix_cache_order(i, argkeys_cache, items_by_argkey)
items_deque.appendleft(i)
break
if no_argkey_group:
no_argkey_group = reorder_items_atscope(
no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1
)
for item in no_argkey_group:
items_done[item] = None
ignore.add(slicing_argkey)
return items_done
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
def get_direct_param_fixture_func(request):
return request.param
@attr.s(slots=True)
class FuncFixtureInfo:
# original function argument names
argnames = attr.ib(type=tuple)
# argnames that function immediately requires. These include argnames +
# fixture names specified via usefixtures and via autouse=True in fixture
# definitions.
initialnames = attr.ib(type=tuple)
names_closure = attr.ib() # List[str]
name2fixturedefs = attr.ib() # List[str, List[FixtureDef]]
def prune_dependency_tree(self):
"""Recompute names_closure from initialnames and name2fixturedefs
Can only reduce names_closure, which means that the new closure will
always be a subset of the old one. The order is preserved.
This method is needed because direct parametrization may shadow some
of the fixtures that were included in the originally built dependency
tree. In this way the dependency tree can get pruned, and the closure
of argnames may get reduced.
"""
closure = set()
working_set = set(self.initialnames)
while working_set:
argname = working_set.pop()
# argname may be smth not included in the original names_closure,
# in which case we ignore it. This currently happens with pseudo
# FixtureDefs which wrap 'get_direct_param_fixture_func(request)'.
# So they introduce the new dependency 'request' which might have
# been missing in the original tree (closure).
if argname not in closure and argname in self.names_closure:
closure.add(argname)
if argname in self.name2fixturedefs:
working_set.update(self.name2fixturedefs[argname][-1].argnames)
self.names_closure[:] = sorted(closure, key=self.names_closure.index)
class FixtureRequest:
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "class", "module", "session"
self.scope = "function"
self._fixture_defs = {} # type: Dict[str, FixtureDef]
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def fixturenames(self):
"""names of all active fixtures in this request"""
result = list(self._pyfuncitem._fixtureinfo.names_closure)
result.extend(set(self._fixture_defs).difference(result))
return result
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
warnings.warn(FUNCARGNAMES, stacklevel=2)
return self.fixturenames
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a dynamic call to
# getfixturevalue(argname) usage which was naturally
# not known at parsing/collection time
parentid = self._pyfuncitem.parent.nodeid
fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
return getattr(function, "__self__", None)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(_pytest.python.Module).obj
@scopeproperty()
def fspath(self) -> py.path.local:
""" the file system path of the test module which collected this test. """
# TODO: Remove ignore once _pyfuncitem is properly typed.
return self._pyfuncitem.fspath # type: ignore
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem
)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
self.node.add_marker(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfixturevalue(argname)
def getfixturevalue(self, argname):
""" Dynamically run a named fixture function.
Declaring fixtures via function argument is recommended where possible.
But if you can only decide whether to use another fixture at test
setup time, you may use this function to retrieve it inside a fixture
or test function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixture_defs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef(cached_result, scope)
raise
# remove indent to prevent the python3 exception
# from leaking into the call
self._compute_fixture_value(fixturedef)
self._fixture_defs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
values = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
values.reverse()
return values
values.append(fixturedef)
current = current._parent_request
def _compute_fixture_value(self, fixturedef: "FixtureDef") -> None:
"""
Creates a SubRequest based on "self" and calls the execute method of the given fixturedef object. This will
force the FixtureDef object to throw away any previous results and compute a new fixture value, which
will be stored into the FixtureDef object itself.
"""
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
has_params = fixturedef.params is not None
fixtures_not_supported = getattr(funcitem, "nofuncargs", False)
if has_params and fixtures_not_supported:
msg = (
"{name} does not support fixtures, maybe unittest.TestCase subclass?\n"
"Node id: {nodeid}\n"
"Function type: {typename}"
).format(
name=funcitem.name,
nodeid=funcitem.nodeid,
typename=type(funcitem).__name__,
)
fail(msg, pytrace=False)
if has_params:
frame = inspect.stack()[3]
frameinfo = inspect.getframeinfo(frame[0])
source_path = py.path.local(frameinfo.filename)
source_lineno = frameinfo.lineno
rel_source_path = source_path.relto(funcitem.config.rootdir)
if rel_source_path:
source_path_str = rel_source_path
else:
source_path_str = str(source_path)
msg = (
"The requested fixture has no parameter defined for test:\n"
" {}\n\n"
"Requested fixture '{}' defined in:\n{}"
"\n\nRequested here:\n{}:{}".format(
funcitem.nodeid,
fixturedef.argname,
getlocation(fixturedef.func, funcitem.config.rootdir),
source_path_str,
source_lineno,
)
)
fail(msg, pytrace=False)
else:
param_index = funcitem.callspec.indices[argname]
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
try:
# call the fixture function
fixturedef.execute(request=subrequest)
finally:
self._schedule_finalizers(fixturedef, subrequest)
def _schedule_finalizers(self, fixturedef, subrequest):
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(
functools.partial(fixturedef.finish, request=subrequest), subrequest.node
)
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
fail(
"ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s"
% ((requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False,
)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" % (p, lineno + 1, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
if scope == "package":
node = get_scope_package(self._pyfuncitem, self._fixturedef)
else:
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format(
scope, self._pyfuncitem
)
return node
def __repr__(self):
return "<FixtureRequest for %r>" % (self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self._pyfuncitem = request._pyfuncitem
self._fixture_defs = request._fixture_defs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest {!r} for {!r}>".format(self.fixturename, self._pyfuncitem)
def addfinalizer(self, finalizer):
self._fixturedef.addfinalizer(finalizer)
def _schedule_finalizers(self, fixturedef, subrequest):
# if the executing fixturedef was not explicitly requested in the argument list (via
# getfixturevalue inside the fixture call) then ensure this fixture def will be finished
# first
if fixturedef.argname not in self.fixturenames:
fixturedef.addfinalizer(
functools.partial(self._fixturedef.finish, request=self)
)
super()._schedule_finalizers(fixturedef, subrequest)
scopes = "session package module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
def scope2index(scope, descr, where=None):
"""Look up the index of ``scope`` and raise a descriptive value error
if not defined.
"""
try:
return scopes.index(scope)
except ValueError:
fail(
"{} {}got an unexpected scope value '{}'".format(
descr, "from {} ".format(where) if where else "", scope
),
pytrace=False,
)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self) -> "FixtureLookupErrorRepr":
tblines = [] # type: List[str]
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# the last fixture raise an error, let's present
# it at the requesting side
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (OSError, IndexError, TypeError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno + 1))
else:
addline("file {}, line {}".format(fspath, lineno + 1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith("def"):
break
if msg is None:
fm = self.request._fixturemanager
available = set()
parentid = self.request._pyfuncitem.parent.nodeid
for name, fixturedefs in fm._arg2fixturedefs.items():
faclist = list(fm._matchfactories(fixturedefs, parentid))
if faclist:
available.add(name)
if self.argname in available:
msg = " recursive dependency involving fixture '{}' detected".format(
self.argname
)
else:
msg = "fixture '{}' not found".format(self.argname)
msg += "\n available fixtures: {}".format(", ".join(sorted(available)))
msg += "\n use 'pytest --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw: TerminalWriter) -> None:
# tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
lines = self.errorstring.split("\n")
if lines:
tw.line(
"{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()),
red=True,
)
for line in lines[1:]:
tw.line(
"{} {}".format(FormattedExcinfo.flow_marker, line.strip()),
red=True,
)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno + 1))
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "{}:{}".format(fs, lineno + 1)
source = _pytest._code.Source(fixturefunc)
fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs):
yieldctx = is_generator(fixturefunc)
if yieldctx:
it = fixturefunc(**kwargs)
res = next(it)
finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, it)
request.addfinalizer(finalizer)
else:
res = fixturefunc(**kwargs)
return res
def _teardown_yield_fixture(fixturefunc, it):
"""Executes the teardown of a fixture function by advancing the iterator after the
yield and ensure the iteration ends (if not it means there is more than one yield in the function)"""
try:
next(it)
except StopIteration:
pass
else:
fail_fixturefunc(
fixturefunc, "yield_fixture function has more than one 'yield'"
)
def _eval_scope_callable(scope_callable, fixture_name, config):
try:
result = scope_callable(fixture_name=fixture_name, config=config)
except Exception:
raise TypeError(
"Error evaluating {} while defining fixture '{}'.\n"
"Expected a function with the signature (*, fixture_name, config)".format(
scope_callable, fixture_name
)
)
if not isinstance(result, str):
fail(
"Expected {} to return a 'str' while defining fixture '{}', but it returned:\n"
"{!r}".format(scope_callable, fixture_name, result),
pytrace=False,
)
return result
class FixtureDef:
""" A container for a factory definition. """
def __init__(
self,
fixturemanager,
baseid,
argname,
func,
scope,
params,
unittest=False,
ids=None,
):
self._fixturemanager = fixturemanager
self.baseid = baseid or ""
self.has_location = baseid is not None
self.func = func
self.argname = argname
if callable(scope):
scope = _eval_scope_callable(scope, argname, fixturemanager.config)
self.scope = scope
self.scopenum = scope2index(
scope or "function",
descr="Fixture '{}'".format(func.__name__),
where=baseid,
)
self.params = params
self.argnames = getfuncargnames(func, name=argname, is_method=unittest)
self.unittest = unittest
self.ids = ids
self.cached_result = None
self._finalizers = []
def addfinalizer(self, finalizer):
self._finalizers.append(finalizer)
def finish(self, request):
exc = None
try:
while self._finalizers:
try:
func = self._finalizers.pop()
func()
except BaseException as e:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = e
if exc:
raise exc
finally:
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
hook.pytest_fixture_post_finalizer(fixturedef=self, request=request)
# even if finalization fails, we invalidate
# the cached fixture value and remove
# all finalizers because they may be bound methods which will
# keep instances alive
self.cached_result = None
self._finalizers = []
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
if argname != "request":
fixturedef.addfinalizer(functools.partial(self.finish, request=request))
my_cache_key = self.cache_key(request)
if self.cached_result is not None:
result, cache_key, err = self.cached_result
# note: comparison with `==` can fail (or be expensive) for e.g.
# numpy arrays (#6497)
if my_cache_key is cache_key:
if err is not None:
_, val, tb = err
raise val.with_traceback(tb)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish(request)
assert self.cached_result is None
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
return hook.pytest_fixture_setup(fixturedef=self, request=request)
def cache_key(self, request):
return request.param_index if not hasattr(request, "param") else request.param
def __repr__(self):
return "<FixtureDef argname={!r} scope={!r} baseid={!r}>".format(
self.argname, self.scope, self.baseid
)
def resolve_fixture_function(fixturedef, request):
"""Gets the actual callable that can be called to obtain the fixture value, dealing with unittest-specific
instances and bound methods.
"""
fixturefunc = fixturedef.func
if fixturedef.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = fixturedef.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "fixturedef" behaves
# as expected.
if request.instance is not None:
# handle the case where fixture is defined not in a test class, but some other class
# (for example a plugin class with a fixture), see #2270
if hasattr(fixturefunc, "__self__") and not isinstance(
request.instance, fixturefunc.__self__.__class__
):
return fixturefunc
fixturefunc = getimfunc(fixturedef.func)
if fixturefunc != fixturedef.func:
fixturefunc = fixturefunc.__get__(request.instance)
return fixturefunc
def pytest_fixture_setup(fixturedef, request):
""" Execution of fixture setup. """
kwargs = {}
for argname in fixturedef.argnames:
fixdef = request._get_active_fixturedef(argname)
assert fixdef.cached_result is not None
result, arg_cache_key, exc = fixdef.cached_result
request._check_scope(argname, request.scope, fixdef.scope)
kwargs[argname] = result
fixturefunc = resolve_fixture_function(fixturedef, request)
my_cache_key = fixturedef.cache_key(request)
try:
result = call_fixture_func(fixturefunc, request, kwargs)
except TEST_OUTCOME:
fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
raise
fixturedef.cached_result = (result, my_cache_key, None)
return result
def _ensure_immutable_ids(ids):
if ids is None:
return
if callable(ids):
return ids
return tuple(ids)
def wrap_function_to_error_out_if_called_directly(function, fixture_marker):
"""Wrap the given fixture function so we can raise an error about it being called directly,
instead of used as an argument in a test function.
"""
message = (
'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n'
"but are created automatically when test functions request them as parameters.\n"
"See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and\n"
"https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code."
).format(name=fixture_marker.name or function.__name__)
@functools.wraps(function)
def result(*args, **kwargs):
fail(message, pytrace=False)
# keep reference to the original function in our own custom attribute so we don't unwrap
# further than this point and lose useful wrappings like @mock.patch (#3774)
result.__pytest_wrapped__ = _PytestWrapper(function)
return result
@attr.s(frozen=True)
class FixtureFunctionMarker:
scope = attr.ib()
params = attr.ib(converter=attr.converters.optional(tuple))
autouse = attr.ib(default=False)
# Ignore type because of https://github.com/python/mypy/issues/6172.
ids = attr.ib(default=None, converter=_ensure_immutable_ids) # type: ignore
name = attr.ib(default=None)
def __call__(self, function):
if inspect.isclass(function):
raise ValueError("class fixtures not supported (maybe in the future)")
if getattr(function, "_pytestfixturefunction", False):
raise ValueError(
"fixture is being applied more than once to the same function"
)
function = wrap_function_to_error_out_if_called_directly(function, self)
name = self.name or function.__name__
if name == "request":
location = getlocation(function)
fail(
"'request' is a reserved word for fixtures, use another name:\n {}".format(
location
),
pytrace=False,
)
function._pytestfixturefunction = self
return function
FIXTURE_ARGS_ORDER = ("scope", "params", "autouse", "ids", "name")
def _parse_fixture_args(callable_or_scope, *args, **kwargs):
arguments = {
"scope": "function",
"params": None,
"autouse": False,
"ids": None,
"name": None,
}
kwargs = {
key: value for key, value in kwargs.items() if arguments.get(key) != value
}
fixture_function = None
if isinstance(callable_or_scope, str):
args = list(args)
args.insert(0, callable_or_scope)
else:
fixture_function = callable_or_scope
positionals = set()
for positional, argument_name in zip(args, FIXTURE_ARGS_ORDER):
arguments[argument_name] = positional
positionals.add(argument_name)
duplicated_kwargs = {kwarg for kwarg in kwargs.keys() if kwarg in positionals}
if duplicated_kwargs:
raise TypeError(
"The fixture arguments are defined as positional and keyword: {}. "
"Use only keyword arguments.".format(", ".join(duplicated_kwargs))
)
if positionals:
warnings.warn(FIXTURE_POSITIONAL_ARGUMENTS, stacklevel=2)
arguments.update(kwargs)
return fixture_function, arguments
def fixture(
callable_or_scope=None,
*args,
scope="function",
params=None,
autouse=False,
ids=None,
name=None
):
"""Decorator to mark a fixture factory function.
This decorator can be used, with or without parameters, to define a
fixture function.
The name of the fixture function can later be referenced to cause its
invocation ahead of running tests: test
modules or classes can use the ``pytest.mark.usefixtures(fixturename)``
marker.
Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
Fixtures can provide their values to test functions using ``return`` or ``yield``
statements. When using ``yield`` the code block after the ``yield`` statement is executed
as teardown code regardless of the test outcome, and must yield exactly once.
:arg scope: the scope for which this fixture is shared, one of
``"function"`` (default), ``"class"``, ``"module"``,
``"package"`` or ``"session"`` (``"package"`` is considered **experimental**
at this time).
This parameter may also be a callable which receives ``(fixture_name, config)``
as parameters, and must return a ``str`` with one of the values mentioned above.
See :ref:`dynamic scope` in the docs for more information.
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
The current parameter is available in ``request.param``.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
:arg name: the name of the fixture. This defaults to the name of the
decorated function. If a fixture is used in the same module in
which it is defined, the function name of the fixture will be
shadowed by the function arg that requests the fixture; one way
to resolve this is to name the decorated function
``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
"""
if params is not None:
params = list(params)
fixture_function, arguments = _parse_fixture_args(
callable_or_scope,
*args,
scope=scope,
params=params,
autouse=autouse,
ids=ids,
name=name,
)
scope = arguments.get("scope")
params = arguments.get("params")
autouse = arguments.get("autouse")
ids = arguments.get("ids")
name = arguments.get("name")
if fixture_function and params is None and autouse is False:
# direct decoration
return FixtureFunctionMarker(scope, params, autouse, name=name)(
fixture_function
)
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
def yield_fixture(
callable_or_scope=None,
*args,
scope="function",
params=None,
autouse=False,
ids=None,
name=None
):
""" (return a) decorator to mark a yield-fixture factory function.
.. deprecated:: 3.0
Use :py:func:`pytest.fixture` directly instead.
"""
return fixture(
callable_or_scope,
*args,
scope=scope,
params=params,
autouse=autouse,
ids=ids,
name=name,
)
defaultfuncargprefixmarker = fixture()
@fixture(scope="session")
def pytestconfig(request):
"""Session-scoped fixture that returns the :class:`_pytest.config.Config` object.
Example::
def test_foo(pytestconfig):
if pytestconfig.getoption("verbose") > 0:
...
"""
return request.config
def pytest_addoption(parser):
parser.addini(
"usefixtures",
type="args",
default=[],
help="list of default fixtures to be used with this project",
)
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def _get_direct_parametrize_args(self, node):
"""This function returns all the direct parametrization
arguments of a node, so we don't mistake them for fixtures
Check https://github.com/pytest-dev/pytest/issues/5036
This things are done later as well when dealing with parametrization
so this could be improved
"""
parametrize_argnames = []
for marker in node.iter_markers(name="parametrize"):
if not marker.kwargs.get("indirect", False):
p_argnames, _ = ParameterSet._parse_parametrize_args(
*marker.args, **marker.kwargs
)
parametrize_argnames.extend(p_argnames)
return parametrize_argnames
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not getattr(node, "nofuncargs", False):
argnames = getfuncargnames(func, name=node.name, cls=cls)
else:
argnames = ()
usefixtures = get_use_fixtures_for_node(node)
initialnames = usefixtures + argnames
fm = node.session._fixturemanager
initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure(
initialnames, node, ignore_args=self._get_direct_parametrize_args(node)
)
return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__).realpath()
except AttributeError:
pass
else:
from _pytest import nodes
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != nodes.SEP:
nodeid = nodeid.replace(p.sep, nodes.SEP)
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i : i + 1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode, ignore_args=()):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return an arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
# at this point, fixturenames_closure contains what we call "initialnames",
# which is a set of fixturenames the function immediately requests. We
# need to return it as well, so save this.
initialnames = tuple(fixturenames_closure)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in ignore_args:
continue
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
def sort_by_scope(arg_name):
try:
fixturedefs = arg2fixturedefs[arg_name]
except KeyError:
return scopes.index("function")
else:
return fixturedefs[-1].scopenum
fixturenames_closure.sort(key=sort_by_scope)
return initialnames, fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
markers = list(metafunc.definition.iter_markers("parametrize"))
for parametrize_mark in markers:
if "argnames" in parametrize_mark.kwargs:
argnames = parametrize_mark.kwargs["argnames"]
else:
argnames = parametrize_mark.args[0]
if not isinstance(argnames, (tuple, list)):
argnames = [
x.strip() for x in argnames.split(",") if x.strip()
]
if argname in argnames:
break
else:
metafunc.parametrize(
argname,
fixturedef.params,
indirect=True,
scope=fixturedef.scope,
ids=fixturedef.ids,
)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
# The attribute can be an arbitrary descriptor, so the attribute
# access below can raise. safe_getatt() ignores such exceptions.
obj = safe_getattr(holderobj, name, None)
marker = getfixturemarker(obj)
if not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
if marker.name:
name = marker.name
# during fixture definition we wrap the original fixture function
# to issue a warning if called directly, so here we unwrap it in order to not emit the warning
# when pytest itself calls the fixture function
obj = get_real_method(obj, holderobj)
fixture_def = FixtureDef(
self,
nodeid,
name,
obj,
marker.scope,
marker.params,
unittest=unittest,
ids=marker.ids,
)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixture_def.has_location:
faclist.append(fixture_def)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixture_def)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or "", autousenames))
def getfixturedefs(self, argname, nodeid):
"""
Gets a list of fixtures which are applicable to the given node id.
:param str argname: name of the fixture to search for
:param str nodeid: full node id of the requesting test.
:return: list[FixtureDef]
"""
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
from _pytest import nodes
for fixturedef in fixturedefs:
if nodes.ischildnode(fixturedef.baseid, nodeid):
yield fixturedef
def get_use_fixtures_for_node(node) -> Tuple[str, ...]:
"""Returns the names of all the usefixtures() marks on the given node"""
return tuple(
str(name)
for mark in node.iter_markers(name="usefixtures")
for name in mark.args
)
|
alfredodeza/pytest
|
src/_pytest/fixtures.py
|
Python
|
mit
| 56,037
|
[
"VisIt"
] |
19949c2d61701327e9dac4b8b3acb9f59686a2d2d67d1ad2f74836d46c668ecb
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para filmenoi
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "filmenoi"
__adult__ = "false"
__category__ = "F,S"
__type__ = "generic"
__title__ = "Filme-noi.com"
__language__ = "ES"
__creationdate__ = "20131223"
__thumbnail__ = "http://s6.postimg.org/qxbdwx5xt/filmenoi.jpg"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("[filmenoi.py] mainlist")
item.url="http://www.filme-net.com/";
return novedades(item)
def novedades(item):
logger.info("[filmenoi.py] novedades")
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
#esta es para web situl Cinemaxx.rs
#patron = '<ul class="pm-ul-browse-videos thumbnails" id="pm-grid">[^<]+'
#patron = '<li>[^<]+'
#patron += '<div class="pm-li-video">[^<]+'
#patron += '.*?<a href="([^"]+)".*?[^<]+<img src="([^"]+)" alt="([^"]+)".*?</li>'
#esta es para web Filme-noi.com
#patron = '<div class="home_posts_thumbnail">[^<]+'
#patron += '<a href="([^"]+)".*?[^<]+<img src="([^"]+)" alt="([^"]+)".*?</div>'
patron = '<div class="home_posts_thumbnail">[^<]+'
patron += '<a href="([^"]+)"[^<]+<img src="([^"]+)" alt="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
thumbnail = scrapertools.find_single_match(scrapedthumbnail,"(http\://www.filme-net.com/wp-content/uploads/.*?.jpg)")
scrapedplot = ""
#if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
if (DEBUG): logger.info("url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"], title=["+scrapedtitle+"]") # Falla en sacar las imagenes por que tienen espacios
itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
patron = "<a href='([^']+)'>\&rsaquo\;</a>" #Falla no pone pagina siguente
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
#if DEBUG: scrapertools.printMatches(item.url)
for match in matches:
scrapedtitle = "> Inainte"
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,match)
scrapedthumbnail = ""
if (DEBUG): logger.info("url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"], title=["+scrapedtitle+"]")
itemlist.append( Item(channel=__channel__, action="novedades", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def findvideos(item):
logger.info("[filmenoi.py] findvideos")
data = scrapertools.cache_page(item.url)
itemlist=[]
#<a href="http://67cfb0db.linkbucks.com"><img title="billionuploads" src="http://unsoloclic.info/wp-content/uploads/2012/11/billonuploads2.png" alt="" width="380" height="50" /></a></p>
#<a href="http://1bd02d49.linkbucks.com"><img class="colorbox-57103" title="Freakeshare" alt="" src="http://unsoloclic.info/wp-content/uploads/2013/01/freakshare.png" width="390" height="55" /></a></p>
patron = '<p+<iframe href="(http.//[a-z0-9]+.video.mail.r[^"]+)[^>]+><img.*?title="([^"]+)".*?src="([^"]+)"' #estoy buscando esto pero falla si es el segundo enlace y no esta en el misco <p> que el primer servidor
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for url,servertag,serverthumb in matches:
itemlist.append( Item(channel=__channel__, action="play", server="linkbucks", title=servertag+" [linkbucks]" , url=url , thumbnail=serverthumb , plot=item.plot , folder=False) )
from servers import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
if videoitem.server!="linkbucks":
videoitem.channel=__channel__
videoitem.action="play"
videoitem.folder=False
videoitem.title = "["+videoitem.server+"]"
return itemlist
def play(item):
logger.info("[filmenoi.py] play")
itemlist=[]
if item.server=="linkbucks":
logger.info("Es linkbucks")
# Averigua el enlace
from servers import linkbucks
location = linkbucks.get_long_url(item.url)
logger.info("location="+location)
# Extrae la URL de saltar el anuncio en adf.ly
if location.startswith("http://adf"):
# Averigua el enlace
from servers import adfly
location = adfly.get_long_url(location)
logger.info("location="+location)
from servers import servertools
itemlist=servertools.find_video_items(data=location)
for videoitem in itemlist:
videoitem.channel=__channel__
videoitem.folder=False
else:
itemlist.append(item)
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
# mainlist
novedades_items = mainlist(Item())
# Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
bien = False
for singleitem in novedades_items:
mirrors_items = findvideos( item=singleitem )
for mirror_item in mirrors_items:
video_items = play(mirror_item)
if len(video_items)>0:
return True
return False
|
superberny70/plugin.video.pelisalacarta-3-9X
|
pelisalacarta/channels/filmenoi.py
|
Python
|
gpl-3.0
| 5,966
|
[
"ADF"
] |
6749a3b26122cfb4132e8d741ba1391056fc19fa7314fafa0f828b6894204241
|
import time
import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import mbuild as mb
import metamds as mds
import mdtraj as md
def build_monolayer(chain_length, n_molecules, **kwargs):
from mbuild.examples import AlkaneMonolayer
pattern = mb.Random2DPattern(n_molecules)
monolayer = AlkaneMonolayer(pattern, tile_x=1, tile_y=1, chain_length=chain_length)
monolayer.name = 'alkane_n-{}_l-{}'.format(n_molecules, chain_length)
mb.translate(monolayer, [0, 0, 2])
return monolayer
def create_run_script(build_func, forcefield, input_dir, **kwargs):
compound = build_func(**kwargs)
name = compound.name
em = os.path.join(input_dir, 'em.mdp')
nvt = os.path.join(input_dir, 'nvt.mdp')
gro = '{name}.gro'.format(name=name)
top = '{name}.top'.format(name=name)
box = compound.boundingbox
compound.periodicity += np.array([0, 0, 5 * box.lengths[2]])
compound.save(top, forcefield=forcefield, overwrite=True)
em_grompp = 'gmx grompp -f {mdp} -c {gro} -p {top} -o em.tpr'.format(mdp=em, gro=gro, top=top)
em_mdrun = 'gmx mdrun -v -deffnm em -ntmpi 1'
nvt_grompp = 'gmx grompp -f {mdp} -c em.gro -p {top} -o nvt.tpr'.format(mdp=nvt, top=top)
nvt_mdrun = 'gmx mdrun -v -deffnm nvt -ntmpi 1'
script = (em_grompp, em_mdrun, nvt_grompp, nvt_mdrun)
return script
if __name__ == '__main__':
# Input parameters
parameters = {'chain_length': 10,
'n_molecules': 100,
'forcefield': 'OPLS-aa'}
# Build the initial configuration
compound = build_monolayer(**parameters)
#compound.visualize()
parameters['build_func'] = build_monolayer
# Initialize a simulation instance with a template and some metadata
sim = mds.Simulation(name='monolayer', template=create_run_script, input_dir='static_input_files', output_dir='output')
# Parameterize our simulation template
task = sim.parametrize(**parameters)
# Run
task.execute()
exit()
task.execute(hostname='rahman.vuse.vanderbilt.edu', username='ctk3b')
print(task.status())
time.sleep(10)
task.sync()
# Analyze
trajectories = task.get_output_files('trajectories')
topologies = task.get_output_files('topologies')
# Pick which one to select?
import pdb; pdb.set_trace()
trj_path = os.path.join(task.output_dir, 'nvt.xtc')
top_path = os.path.join(task.output_dir, 'em.gro')
traj = md.load(trj_path, top=top_path)
print(traj)
# RDF
# pairs = traj.top.select_pairs('name C', 'name C')
# r, g_r = md.compute_rdf(traj, pairs)
# plt.plot(r, g_r)
# plt.xlabel('r (nm)')
# plt.ylabel('g(r)')
# plt.show()
#
# s2 = md.compute_nematic_order(traj, 'residues')
# plt.plot(traj.time, s2)
# plt.xlabel('time (ps)')
# plt.ylabel('S2')
|
iModels/demos
|
demos/monolayer/monolayer.py
|
Python
|
mit
| 2,860
|
[
"MDTraj"
] |
b93a0c02eee88f079eae49e3b1cb042f0ee83ed02704b2f0afc7d0899f02a879
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from zope.interface import implements
from twisted.python import log, components
from twisted.internet import defer
import urllib
import time, locale
import operator
from buildbot import interfaces, util
from buildbot.status import builder, buildstep, build
from buildbot.changes import changes
from buildbot.status.web.base import Box, HtmlResource, IBox, ICurrentBox, \
ITopBox, build_get_class, path_to_build, path_to_step, path_to_root, \
map_branches
def earlier(old, new):
# minimum of two things, but "None" counts as +infinity
if old:
if new < old:
return new
return old
return new
def later(old, new):
# maximum of two things, but "None" counts as -infinity
if old:
if new > old:
return new
return old
return new
class CurrentBox(components.Adapter):
# this provides the "current activity" box, just above the builder name
implements(ICurrentBox)
def formatETA(self, prefix, eta):
if eta is None:
return []
if eta < 60:
return ["< 1 min"]
eta_parts = ["~"]
eta_secs = eta
if eta_secs > 3600:
eta_parts.append("%d hrs" % (eta_secs / 3600))
eta_secs %= 3600
if eta_secs > 60:
eta_parts.append("%d mins" % (eta_secs / 60))
eta_secs %= 60
abstime = time.strftime("%H:%M", time.localtime(util.now()+eta))
return [prefix, " ".join(eta_parts), "at %s" % abstime]
def getBox(self, status, brcounts):
# getState() returns offline, idle, or building
state, builds = self.original.getState()
# look for upcoming builds. We say the state is "waiting" if the
# builder is otherwise idle and there is a scheduler which tells us a
# build will be performed some time in the near future. TODO: this
# functionality used to be in BuilderStatus.. maybe this code should
# be merged back into it.
upcoming = []
builderName = self.original.getName()
for s in status.getSchedulers():
if builderName in s.listBuilderNames():
upcoming.extend(s.getPendingBuildTimes())
if state == "idle" and upcoming:
state = "waiting"
if state == "building":
text = ["building"]
if builds:
for b in builds:
eta = b.getETA()
text.extend(self.formatETA("ETA in", eta))
elif state == "offline":
text = ["offline"]
elif state == "idle":
text = ["idle"]
elif state == "waiting":
text = ["waiting"]
else:
# just in case I add a state and forget to update this
text = [state]
# TODO: for now, this pending/upcoming stuff is in the "current
# activity" box, but really it should go into a "next activity" row
# instead. The only times it should show up in "current activity" is
# when the builder is otherwise idle.
# are any builds pending? (waiting for a slave to be free)
brcount = brcounts[builderName]
if brcount:
text.append("%d pending" % brcount)
for t in sorted(upcoming):
if t is not None:
eta = t - util.now()
text.extend(self.formatETA("next in", eta))
return Box(text, class_="Activity " + state)
components.registerAdapter(CurrentBox, builder.BuilderStatus, ICurrentBox)
class BuildTopBox(components.Adapter):
# this provides a per-builder box at the very top of the display,
# showing the results of the most recent build
implements(IBox)
def getBox(self, req):
assert interfaces.IBuilderStatus(self.original)
branches = [b for b in req.args.get("branch", []) if b]
builder = self.original
builds = list(builder.generateFinishedBuilds(map_branches(branches),
num_builds=1))
if not builds:
return Box(["none"], class_="LastBuild")
b = builds[0]
url = path_to_build(req, b)
text = b.getText()
tests_failed = b.getSummaryStatistic('tests-failed', operator.add, 0)
if tests_failed: text.extend(["Failed tests: %d" % tests_failed])
# TODO: maybe add logs?
class_ = build_get_class(b)
return Box(text, urlbase=url, class_="LastBuild %s" % class_)
components.registerAdapter(BuildTopBox, builder.BuilderStatus, ITopBox)
class BuildBox(components.Adapter):
# this provides the yellow "starting line" box for each build
implements(IBox)
def getBox(self, req):
b = self.original
number = b.getNumber()
url = path_to_build(req, b)
reason = b.getReason()
template = req.site.buildbot_service.templates.get_template("box_macros.html")
text = template.module.build_box(reason=reason,url=url,number=number)
class_ = "start"
if b.isFinished() and not b.getSteps():
# the steps have been pruned, so there won't be any indication
# of whether it succeeded or failed.
class_ = build_get_class(b)
return Box([text], class_="BuildStep " + class_)
components.registerAdapter(BuildBox, build.BuildStatus, IBox)
class StepBox(components.Adapter):
implements(IBox)
def getBox(self, req):
urlbase = path_to_step(req, self.original)
text = self.original.getText()
if text is None:
log.msg("getText() gave None", urlbase)
text = []
text = text[:]
logs = self.original.getLogs()
cxt = dict(text=text, logs=[], urls=[], stepinfo=self)
for num in range(len(logs)):
name = logs[num].getName()
if logs[num].hasContents():
url = urlbase + "/logs/%s" % urllib.quote(name)
else:
url = None
cxt['logs'].append(dict(name=name, url=url))
for name, target in self.original.getURLs().items():
cxt['urls'].append(dict(link=target,name=name))
template = req.site.buildbot_service.templates.get_template("box_macros.html")
text = template.module.step_box(**cxt)
class_ = "BuildStep " + build_get_class(self.original)
return Box(text, class_=class_)
components.registerAdapter(StepBox, buildstep.BuildStepStatus, IBox)
class EventBox(components.Adapter):
implements(IBox)
def getBox(self, req):
text = self.original.getText()
class_ = "Event"
return Box(text, class_=class_)
components.registerAdapter(EventBox, builder.Event, IBox)
class Spacer:
implements(interfaces.IStatusEvent)
def __init__(self, start, finish):
self.started = start
self.finished = finish
def getTimes(self):
return (self.started, self.finished)
def getText(self):
return []
class SpacerBox(components.Adapter):
implements(IBox)
def getBox(self, req):
#b = Box(["spacer"], "white")
b = Box([])
b.spacer = True
return b
components.registerAdapter(SpacerBox, Spacer, IBox)
def insertGaps(g, showEvents, lastEventTime, idleGap=2):
debug = False
e = g.next()
starts, finishes = e.getTimes()
if debug: log.msg("E0", starts, finishes)
if finishes == 0:
finishes = starts
if debug: log.msg("E1 finishes=%s, gap=%s, lET=%s" % \
(finishes, idleGap, lastEventTime))
if finishes is not None and finishes + idleGap < lastEventTime:
if debug: log.msg(" spacer0")
yield Spacer(finishes, lastEventTime)
followingEventStarts = starts
if debug: log.msg(" fES0", starts)
yield e
while 1:
e = g.next()
if not showEvents and isinstance(e, builder.Event):
continue
starts, finishes = e.getTimes()
if debug: log.msg("E2", starts, finishes)
if finishes == 0:
finishes = starts
if finishes is not None and finishes + idleGap < followingEventStarts:
# there is a gap between the end of this event and the beginning
# of the next one. Insert an idle event so the waterfall display
# shows a gap here.
if debug:
log.msg(" finishes=%s, gap=%s, fES=%s" % \
(finishes, idleGap, followingEventStarts))
yield Spacer(finishes, followingEventStarts)
yield e
followingEventStarts = starts
if debug: log.msg(" fES1", starts)
class WaterfallHelp(HtmlResource):
pageTitle = "Waterfall Help"
def __init__(self, categories=None):
HtmlResource.__init__(self)
self.categories = categories
def content(self, request, cxt):
status = self.getStatus(request)
cxt['show_events_checked'] = request.args.get("show_events", ["false"])[0].lower() == "true"
cxt['branches'] = [b for b in request.args.get("branch", []) if b]
cxt['failures_only'] = request.args.get("failures_only", ["false"])[0].lower() == "true"
cxt['committers'] = [c for c in request.args.get("committer", []) if c]
# this has a set of toggle-buttons to let the user choose the
# builders
show_builders = request.args.get("show", [])
show_builders.extend(request.args.get("builder", []))
cxt['show_builders'] = show_builders
cxt['all_builders'] = status.getBuilderNames(categories=self.categories)
# a couple of radio-button selectors for refresh time will appear
# just after that text
times = [("none", "None"),
("60", "60 seconds"),
("300", "5 minutes"),
("600", "10 minutes"),
]
current_reload_time = request.args.get("reload", ["none"])
if current_reload_time:
current_reload_time = current_reload_time[0]
if current_reload_time not in [t[0] for t in times]:
times.insert(0, (current_reload_time, current_reload_time) )
cxt['times'] = times
cxt['current_reload_time'] = current_reload_time
template = request.site.buildbot_service.templates.get_template("waterfallhelp.html")
return template.render(**cxt)
class ChangeEventSource(object):
"A wrapper around a list of changes to supply the IEventSource interface"
def __init__(self, changes):
self.changes = changes
# we want them in newest-to-oldest order
self.changes.reverse()
def eventGenerator(self, branches, categories, committers, minTime):
for change in self.changes:
if branches and change.branch not in branches:
continue
if categories and change.category not in categories:
continue
if committers and change.author not in committers:
continue
if minTime and change.when < minTime:
continue
yield change
class WaterfallStatusResource(HtmlResource):
"""This builds the main status page, with the waterfall display, and
all child pages."""
def __init__(self, categories=None, num_events=200, num_events_max=None):
HtmlResource.__init__(self)
self.categories = categories
self.num_events=num_events
self.num_events_max=num_events_max
self.putChild("help", WaterfallHelp(categories))
def getPageTitle(self, request):
status = self.getStatus(request)
p = status.getTitle()
if p:
return "BuildBot: %s" % p
else:
return "BuildBot"
def getChangeManager(self, request):
# TODO: this wants to go away, access it through IStatus
return request.site.buildbot_service.getChangeSvc()
def get_reload_time(self, request):
if "reload" in request.args:
try:
reload_time = int(request.args["reload"][0])
return max(reload_time, 15)
except ValueError:
pass
return None
def isSuccess(self, builderStatus):
# Helper function to return True if the builder is not failing.
# The function will return false if the current state is "offline",
# the last build was not successful, or if a step from the current
# build(s) failed.
# Make sure the builder is online.
if builderStatus.getState()[0] == 'offline':
return False
# Look at the last finished build to see if it was success or not.
lastBuild = builderStatus.getLastFinishedBuild()
if lastBuild and lastBuild.getResults() != builder.SUCCESS:
return False
# Check all the current builds to see if one step is already
# failing.
currentBuilds = builderStatus.getCurrentBuilds()
if currentBuilds:
for build in currentBuilds:
for step in build.getSteps():
if step.getResults()[0] == builder.FAILURE:
return False
# The last finished build was successful, and all the current builds
# don't have any failed steps.
return True
def content(self, request, ctx):
status = self.getStatus(request)
master = request.site.buildbot_service.master
# before calling content_with_db_data, make a bunch of database
# queries. This is a sick hack, but beats rewriting the entire
# waterfall around asynchronous calls
results = {}
# recent changes
changes_d = master.db.changes.getRecentChanges(40)
def to_changes(chdicts):
return defer.gatherResults([
changes.Change.fromChdict(master, chdict)
for chdict in chdicts ])
changes_d.addCallback(to_changes)
def keep_changes(changes):
results['changes'] = changes
changes_d.addCallback(keep_changes)
# build request counts for each builder
allBuilderNames = status.getBuilderNames(categories=self.categories)
brstatus_ds = []
brcounts = {}
def keep_count(statuses, builderName):
brcounts[builderName] = len(statuses)
for builderName in allBuilderNames:
builder_status = status.getBuilder(builderName)
d = builder_status.getPendingBuildRequestStatuses()
d.addCallback(keep_count, builderName)
brstatus_ds.append(d)
# wait for it all to finish
d = defer.gatherResults([ changes_d ] + brstatus_ds)
def call_content(_):
return self.content_with_db_data(results['changes'],
brcounts, request, ctx)
d.addCallback(call_content)
return d
def content_with_db_data(self, changes, brcounts, request, ctx):
status = self.getStatus(request)
ctx['refresh'] = self.get_reload_time(request)
# we start with all Builders available to this Waterfall: this is
# limited by the config-file -time categories= argument, and defaults
# to all defined Builders.
allBuilderNames = status.getBuilderNames(categories=self.categories)
builders = [status.getBuilder(name) for name in allBuilderNames]
# but if the URL has one or more builder= arguments (or the old show=
# argument, which is still accepted for backwards compatibility), we
# use that set of builders instead. We still don't show anything
# outside the config-file time set limited by categories=.
showBuilders = request.args.get("show", [])
showBuilders.extend(request.args.get("builder", []))
if showBuilders:
builders = [b for b in builders if b.name in showBuilders]
# now, if the URL has one or category= arguments, use them as a
# filter: only show those builders which belong to one of the given
# categories.
showCategories = request.args.get("category", [])
if showCategories:
builders = [b for b in builders if b.category in showCategories]
# If the URL has the failures_only=true argument, we remove all the
# builders that are not currently red or won't be turning red at the end
# of their current run.
failuresOnly = request.args.get("failures_only", ["false"])[0]
if failuresOnly.lower() == "true":
builders = [b for b in builders if not self.isSuccess(b)]
(changeNames, builderNames, timestamps, eventGrid, sourceEvents) = \
self.buildGrid(request, builders, changes)
# start the table: top-header material
locale_enc = locale.getdefaultlocale()[1]
if locale_enc is not None:
locale_tz = unicode(time.tzname[time.localtime()[-1]], locale_enc)
else:
locale_tz = unicode(time.tzname[time.localtime()[-1]])
ctx['tz'] = locale_tz
ctx['changes_url'] = request.childLink("../changes")
bn = ctx['builders'] = []
for name in builderNames:
builder = status.getBuilder(name)
top_box = ITopBox(builder).getBox(request)
current_box = ICurrentBox(builder).getBox(status, brcounts)
bn.append({'name': name,
'url': request.childLink("../builders/%s" % urllib.quote(name, safe='')),
'top': top_box.text,
'top_class': top_box.class_,
'status': current_box.text,
'status_class': current_box.class_,
})
ctx.update(self.phase2(request, changeNames + builderNames, timestamps, eventGrid,
sourceEvents))
def with_args(req, remove_args=[], new_args=[], new_path=None):
# sigh, nevow makes this sort of manipulation easier
newargs = req.args.copy()
for argname in remove_args:
newargs[argname] = []
if "branch" in newargs:
newargs["branch"] = [b for b in newargs["branch"] if b]
for k,v in new_args:
if k in newargs:
newargs[k].append(v)
else:
newargs[k] = [v]
newquery = "&".join(["%s=%s" % (urllib.quote(k), urllib.quote(v))
for k in newargs
for v in newargs[k]
])
if new_path:
new_url = new_path
elif req.prepath:
new_url = req.prepath[-1]
else:
new_url = ''
if newquery:
new_url += "?" + newquery
return new_url
if timestamps:
bottom = timestamps[-1]
ctx['nextpage'] = with_args(request, ["last_time"],
[("last_time", str(int(bottom)))])
helpurl = path_to_root(request) + "waterfall/help"
ctx['help_url'] = with_args(request, new_path=helpurl)
if self.get_reload_time(request) is not None:
ctx['no_reload_page'] = with_args(request, remove_args=["reload"])
template = request.site.buildbot_service.templates.get_template("waterfall.html")
data = template.render(**ctx)
return data
def buildGrid(self, request, builders, changes):
debug = False
# TODO: see if we can use a cached copy
showEvents = False
if request.args.get("show_events", ["false"])[0].lower() == "true":
showEvents = True
filterCategories = request.args.get('category', [])
filterBranches = [b for b in request.args.get("branch", []) if b]
filterBranches = map_branches(filterBranches)
filterCommitters = [c for c in request.args.get("committer", []) if c]
maxTime = int(request.args.get("last_time", [util.now()])[0])
if "show_time" in request.args:
minTime = maxTime - int(request.args["show_time"][0])
elif "first_time" in request.args:
minTime = int(request.args["first_time"][0])
elif filterBranches or filterCommitters:
minTime = util.now() - 24 * 60 * 60
else:
minTime = 0
spanLength = 10 # ten-second chunks
req_events=int(request.args.get("num_events", [self.num_events])[0])
if self.num_events_max and req_events > self.num_events_max:
maxPageLen = self.num_events_max
else:
maxPageLen = req_events
# first step is to walk backwards in time, asking each column
# (commit, all builders) if they have any events there. Build up the
# array of events, and stop when we have a reasonable number.
commit_source = ChangeEventSource(changes)
lastEventTime = util.now()
sources = [commit_source] + builders
changeNames = ["changes"]
builderNames = map(lambda builder: builder.getName(), builders)
sourceNames = changeNames + builderNames
sourceEvents = []
sourceGenerators = []
def get_event_from(g):
try:
while True:
e = g.next()
# e might be buildstep.BuildStepStatus,
# builder.BuildStatus, builder.Event,
# waterfall.Spacer(builder.Event), or changes.Change .
# The showEvents=False flag means we should hide
# builder.Event .
if not showEvents and isinstance(e, builder.Event):
continue
if isinstance(e, buildstep.BuildStepStatus):
# unfinished steps are always shown
if e.isFinished() and e.isHidden():
continue
break
event = interfaces.IStatusEvent(e)
if debug:
log.msg("gen %s gave1 %s" % (g, event.getText()))
except StopIteration:
event = None
return event
for s in sources:
gen = insertGaps(s.eventGenerator(filterBranches,
filterCategories,
filterCommitters,
minTime),
showEvents,
lastEventTime)
sourceGenerators.append(gen)
# get the first event
sourceEvents.append(get_event_from(gen))
eventGrid = []
timestamps = []
lastEventTime = 0
for e in sourceEvents:
if e and e.getTimes()[0] > lastEventTime:
lastEventTime = e.getTimes()[0]
if lastEventTime == 0:
lastEventTime = util.now()
spanStart = lastEventTime - spanLength
debugGather = 0
while 1:
if debugGather: log.msg("checking (%s,]" % spanStart)
# the tableau of potential events is in sourceEvents[]. The
# window crawls backwards, and we examine one source at a time.
# If the source's top-most event is in the window, is it pushed
# onto the events[] array and the tableau is refilled. This
# continues until the tableau event is not in the window (or is
# missing).
spanEvents = [] # for all sources, in this span. row of eventGrid
firstTimestamp = None # timestamp of first event in the span
lastTimestamp = None # last pre-span event, for next span
for c in range(len(sourceGenerators)):
events = [] # for this source, in this span. cell of eventGrid
event = sourceEvents[c]
while event and spanStart < event.getTimes()[0]:
# to look at windows that don't end with the present,
# condition the .append on event.time <= spanFinish
if not IBox(event, None):
log.msg("BAD EVENT", event, event.getText())
assert 0
if debug:
log.msg("pushing", event.getText(), event)
events.append(event)
starts, finishes = event.getTimes()
firstTimestamp = earlier(firstTimestamp, starts)
event = get_event_from(sourceGenerators[c])
if debug:
log.msg("finished span")
if event:
# this is the last pre-span event for this source
lastTimestamp = later(lastTimestamp,
event.getTimes()[0])
if debugGather:
log.msg(" got %s from %s" % (events, sourceNames[c]))
sourceEvents[c] = event # refill the tableau
spanEvents.append(events)
# only show events older than maxTime. This makes it possible to
# visit a page that shows what it would be like to scroll off the
# bottom of this one.
if firstTimestamp is not None and firstTimestamp <= maxTime:
eventGrid.append(spanEvents)
timestamps.append(firstTimestamp)
if lastTimestamp:
spanStart = lastTimestamp - spanLength
else:
# no more events
break
if minTime is not None and lastTimestamp < minTime:
break
if len(timestamps) > maxPageLen:
break
# now loop
# loop is finished. now we have eventGrid[] and timestamps[]
if debugGather: log.msg("finished loop")
assert(len(timestamps) == len(eventGrid))
return (changeNames, builderNames, timestamps, eventGrid, sourceEvents)
def phase2(self, request, sourceNames, timestamps, eventGrid,
sourceEvents):
if not timestamps:
return dict(grid=[], gridlen=0)
# first pass: figure out the height of the chunks, populate grid
grid = []
for i in range(1+len(sourceNames)):
grid.append([])
# grid is a list of columns, one for the timestamps, and one per
# event source. Each column is exactly the same height. Each element
# of the list is a single <td> box.
lastDate = time.strftime("%d %b %Y",
time.localtime(util.now()))
for r in range(0, len(timestamps)):
chunkstrip = eventGrid[r]
# chunkstrip is a horizontal strip of event blocks. Each block
# is a vertical list of events, all for the same source.
assert(len(chunkstrip) == len(sourceNames))
maxRows = reduce(lambda x,y: max(x,y),
map(lambda x: len(x), chunkstrip))
for i in range(maxRows):
if i != maxRows-1:
grid[0].append(None)
else:
# timestamp goes at the bottom of the chunk
stuff = []
# add the date at the beginning (if it is not the same as
# today's date), and each time it changes
todayday = time.strftime("%a",
time.localtime(timestamps[r]))
today = time.strftime("%d %b %Y",
time.localtime(timestamps[r]))
if today != lastDate:
stuff.append(todayday)
stuff.append(today)
lastDate = today
stuff.append(
time.strftime("%H:%M:%S",
time.localtime(timestamps[r])))
grid[0].append(Box(text=stuff, class_="Time",
valign="bottom", align="center"))
# at this point the timestamp column has been populated with
# maxRows boxes, most None but the last one has the time string
for c in range(0, len(chunkstrip)):
block = chunkstrip[c]
assert(block != None) # should be [] instead
for i in range(maxRows - len(block)):
# fill top of chunk with blank space
grid[c+1].append(None)
for i in range(len(block)):
# so the events are bottom-justified
b = IBox(block[i]).getBox(request)
b.parms['valign'] = "top"
b.parms['align'] = "center"
grid[c+1].append(b)
# now all the other columns have maxRows new boxes too
# populate the last row, if empty
gridlen = len(grid[0])
for i in range(len(grid)):
strip = grid[i]
assert(len(strip) == gridlen)
if strip[-1] == None:
if sourceEvents[i-1]:
filler = IBox(sourceEvents[i-1]).getBox(request)
else:
# this can happen if you delete part of the build history
filler = Box(text=["?"], align="center")
strip[-1] = filler
strip[-1].parms['rowspan'] = 1
# second pass: bubble the events upwards to un-occupied locations
# Every square of the grid that has a None in it needs to have
# something else take its place.
noBubble = request.args.get("nobubble",['0'])
noBubble = int(noBubble[0])
if not noBubble:
for col in range(len(grid)):
strip = grid[col]
if col == 1: # changes are handled differently
for i in range(2, len(strip)+1):
# only merge empty boxes. Don't bubble commit boxes.
if strip[-i] == None:
next = strip[-i+1]
assert(next)
if next:
#if not next.event:
if next.spacer:
# bubble the empty box up
strip[-i] = next
strip[-i].parms['rowspan'] += 1
strip[-i+1] = None
else:
# we are above a commit box. Leave it
# be, and turn the current box into an
# empty one
strip[-i] = Box([], rowspan=1,
comment="commit bubble")
strip[-i].spacer = True
else:
# we are above another empty box, which
# somehow wasn't already converted.
# Shouldn't happen
pass
else:
for i in range(2, len(strip)+1):
# strip[-i] will go from next-to-last back to first
if strip[-i] == None:
# bubble previous item up
assert(strip[-i+1] != None)
strip[-i] = strip[-i+1]
strip[-i].parms['rowspan'] += 1
strip[-i+1] = None
else:
strip[-i].parms['rowspan'] = 1
# convert to dicts
for i in range(gridlen):
for strip in grid:
if strip[i]:
strip[i] = strip[i].td()
return dict(grid=grid, gridlen=gridlen, no_bubble=noBubble, time=lastDate)
|
denny820909/builder
|
lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/status/web/waterfall.py
|
Python
|
mit
| 33,047
|
[
"VisIt"
] |
559d2e307402c1e5a11d69d73ad0789d0592dda288ef3669eb4136bb9c6f1538
|
################################################################################
# #
# Copyright (C) 2010-2018 The ESPResSo project #
# #
# This file is part of ESPResSo. #
# #
# ESPResSo is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# ESPResSo is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
# #
# Active Matter: Enhanced Diffusion Tutorial #
# #
##########################################################################
from __future__ import print_function
import numpy as np
import os
import sys
import time
import espressomd
from espressomd import assert_features
from espressomd.observables import ParticlePositions, ParticleVelocities, ParticleAngularVelocities
from espressomd.accumulators import Correlator
required_features = ["ENGINE", "ROTATION"]
assert_features(required_features)
# create an output folder
outdir = "./RESULTS_ENHANCED_DIFFUSION/"
try:
os.makedirs(outdir)
except:
print("INFO: Directory \"{}\" exists".format(outdir))
##########################################################################
# Read in the active velocity from the command prompt
if len(sys.argv) != 2:
print("Usage:", sys.argv[0], "<vel> (0 <= vel < 10.0)")
exit()
vel = float(sys.argv[1])
# Set the basic simulation parameters
sampsteps = 5000
samplength = 1000
tstep = 0.01
## Exercise 2 ##
# Why can we get away with such a small box?
# Could it be even smaller?
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
system.cell_system.skin = 0.3
system.time_step = tstep
################################################################################
#
# To obtain accurate statistics, you will need to run the simulation
# several times, which is accomplished by this loop. Do not increase
# this number too much, as it will slow down the simulation.
#
##########################################################################
## Exercise 4 ##
# Once you have tested the routine for a single , then
# make it such that you can loop over the run parameter
# and repeat the simulation 5 times.
for ...:
# Set up a random seed (a new one for each run)
## Exercise 1 ##
# Explain the choice of the random seed
system.seed = np.random.randint(0, 2**31 - 1)
# Use the Langevin thermostat (no hydrodynamics)
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
# Place a single active particle (that can rotate freely! rotation=[1,1,1])
system.part.add(pos=[5.0, 5.0, 5.0], swimming={
'v_swim': vel}, rotation=[1, 1, 1])
# Initialize the mean squared displacement (MSD) correlator
tmax = tstep * sampsteps
pos_id = ParticlePositions(ids=[0])
msd = Correlator(obs1=pos_id,
corr_operation="square_distance_componentwise",
delta_N=1,
tau_max=tmax,
tau_lin=16)
system.auto_update_accumulators.add(msd)
## Exercise 3 ##
# Construct the auto-accumulators for the VACF and AVACF,
# using the example of the MSD
# Initialize the velocity auto-correlation function (VACF) correlator
...
# Initialize the angular velocity auto-correlation function (AVACF)
# correlator
...
# Integrate 5,000,000 steps. This can be done in one go as well.
for i in range(sampsteps):
system.integrator.run(samplength)
# Finalize the accumulators and write to disk
system.auto_update_accumulators.remove(msd)
msd.finalize()
np.savetxt("{}/msd_{}_{}.dat".format(outdir, vel, run), msd.result())
...
...
|
hmenke/espresso
|
doc/tutorials/06-active_matter/EXERCISES/enhanced_diffusion.py
|
Python
|
gpl-3.0
| 5,115
|
[
"ESPResSo"
] |
6a01fa560a37c082e4a40dc76d02c3dd0edf9e54b5830a840cac796be5b8e247
|
"""
Overview
--------
This module implements the Multiple Imputation through Chained
Equations (MICE) approach to handling missing data in statistical data
analyses. The approach has the following steps:
0. Impute each missing value with the mean of the observed values of
the same variable.
1. For each variable in the data set with missing values (termed the
'focus variable'), do the following:
1a. Fit an 'imputation model', which is a regression model for the
focus variable, regressed on the observed and (current) imputed values
of some or all of the other variables.
1b. Impute the missing values for the focus variable. Currently this
imputation must use the 'predictive mean matching' (pmm) procedure.
2. Once all variables have been imputed, fit the 'analysis model' to
the data set.
3. Repeat steps 1-2 multiple times and combine the results using a
'combining rule' to produce point estimates of all parameters in the
analysis model and standard errors for them.
The imputations for each variable are based on an imputation model
that is specified via a model class and a formula for the regression
relationship. The default model is OLS, with a formula specifying
main effects for all other variables.
The MICE procedure can be used in one of two ways:
* If the goal is only to produce imputed data sets, the MICEData class
can be used to wrap a data frame, providing facilities for doing the
imputation. Summary plots are available for assessing the performance
of the imputation.
* If the imputed data sets are to be used to fit an additional
'analysis model', a MICE instance can be used. After specifying the
MICE instance and running it, the results are combined using the
`combine` method. Results and various summary plots are then
available.
Terminology
-----------
The primary goal of the analysis is usually to fit and perform
inference using an 'analysis model'. If an analysis model is not
specified, then imputed datasets are produced for later use.
The MICE procedure involves a family of imputation models. There is
one imputation model for each variable with missing values. An
imputation model may be conditioned on all or a subset of the
remaining variables, using main effects, transformations,
interactions, etc. as desired.
A 'perturbation method' is a method for setting the parameter estimate
in an imputation model. The 'gaussian' perturbation method first fits
the model (usually using maximum likelihood, but it could use any
statsmodels fit procedure), then sets the parameter vector equal to a
draw from the Gaussian approximation to the sampling distribution for
the fit. The 'bootstrap' perturbation method sets the parameter
vector equal to a fitted parameter vector obtained when fitting the
conditional model to a bootstrapped version of the data set.
Class structure
---------------
There are two main classes in the module:
* 'MICEData' wraps a Pandas dataframe, incorporating information about
the imputation model for each variable with missing values. It can
be used to produce multiply imputed data sets that are to be further
processed or distributed to other researchers. A number of plotting
procedures are provided to visualize the imputation results and
missing data patterns. The `history_func` hook allows any features
of interest of the imputed data sets to be saved for further
analysis.
* 'MICE' takes both a 'MICEData' object and an analysis model
specification. It runs the multiple imputation, fits the analysis
models, and combines the results to produce a `MICEResults` object.
The summary method of this results object can be used to see the key
estimands and inferential quantities..
Notes
-----
By default, to conserve memory 'MICEData' saves very little
information from one iteration to the next. The data set passed by
the user is copied on entry, but then is over-written each time new
imputations are produced. If using 'MICE', the fitted
analysis models and results are saved. MICEData includes a
`history_callback` hook that allows arbitrary information from the
intermediate datasets to be saved for future use.
References
----------
JL Schafer: 'Multiple Imputation: A Primer', Stat Methods Med Res,
1999.
TE Raghunathan et al.: 'A Multivariate Technique for Multiply
Imputing Missing Values Using a Sequence of Regression Models', Survey
Methodology, 2001.
SAS Institute: 'Predictive Mean Matching Method for Monotone Missing
Data', SAS 9.2 User's Guide, 2014.
A Gelman et al.: 'Multiple Imputation with Diagnostics (mi) in R:
Opening Windows into the Black Box', Journal of Statistical Software,
2009.
"""
import pandas as pd
import numpy as np
import patsy
import statsmodels
from statsmodels.base.model import LikelihoodModelResults
from statsmodels.regression.linear_model import OLS
from collections import defaultdict
_mice_data_example_1 = """
>>> imp = mice.MICEData(data)
>>> imp.set_imputer('x1', formula='x2 + np.square(x2) + x3')
>>> for j in range(20):
... imp.update_all()
... imp.data.to_csv('data%02d.csv' % j)"""
_mice_data_example_2 = """
>>> imp = mice.MICEData(data)
>>> j = 0
>>> for data in imp:
... imp.data.to_csv('data%02d.csv' % j)
... j += 1"""
class PatsyFormula(object):
"""
A simple wrapper for a string to be interpreted as a Patsy formula.
"""
def __init__(self, formula):
self.formula = "0 + " + formula
class MICEData(object):
__doc__ = """\
Wrap a data set to allow missing data handling with MICE.
Parameters
----------
data : Pandas data frame
The data set, whch is copied internally.
perturbation_method : string
The default perturbation method
k_pmm : int
The number of nearest neighbors to use during predictive mean
matching. Can also be specified in `fit`.
history_callback : function
A function that is called after each complete imputation
cycle. The return value is appended to `history`. The
MICEData object is passed as the sole argument to
`history_callback`.
Examples
--------
Draw 20 imputations from a data set called `data` and save them in
separate files with filename pattern `dataXX.csv`. The variables
other than `x1` are imputed using linear models fit with OLS, with
mean structures containing main effects of all other variables in
`data`. The variable named `x1` has a condtional mean structure
that includes an additional term for x2^2.
%(_mice_data_example_1)s
Impute using default models, using the MICEData object as an
iterator.
%(_mice_data_example_2)s
Notes
-----
Allowed perturbation methods are 'gaussian' (the model parameters
are set to a draw from the Gaussian approximation to the posterior
distribution), and 'boot' (the model parameters are set to the
estimated values obtained when fitting a bootstrapped version of
the data set).
`history_callback` can be implemented to have side effects such as
saving the current imputed data set to disk.
""" % {'_mice_data_example_1': _mice_data_example_1,
'_mice_data_example_2': _mice_data_example_2}
def __init__(self, data, perturbation_method='gaussian',
k_pmm=20, history_callback=None):
if data.columns.dtype != np.dtype('O'):
raise ValueError("MICEData data column names should be string type")
# Drop observations where all variables are missing. This
# also has the effect of copying the data frame.
self.data = data.dropna(how='all').reset_index(drop=True)
self.history_callback = history_callback
self.history = []
self.predict_kwds = {}
# Assign the same perturbation method for all variables.
# Can be overriden when calling 'set_imputer'.
self.perturbation_method = defaultdict(lambda :
perturbation_method)
# Map from variable name to indices of observed/missing
# values.
self.ix_obs = {}
self.ix_miss = {}
for col in self.data.columns:
ix_obs, ix_miss = self._split_indices(self.data[col])
self.ix_obs[col] = ix_obs
self.ix_miss[col] = ix_miss
# Most recent model instance and results instance for each variable.
self.models = {}
self.results = {}
# Map from variable names to the conditional formula.
self.conditional_formula = {}
# Map from variable names to init/fit args of the conditional
# models.
self.init_kwds = defaultdict(lambda : dict())
self.fit_kwds = defaultdict(lambda : dict())
# Map from variable names to the model class.
self.model_class = {}
# Map from variable names to most recent params update.
self.params = {}
# Set default imputers.
for vname in data.columns:
self.set_imputer(vname)
# The order in which variables are imputed in each cycle.
# Impute variables with the fewest missing values first.
vnames =list(data.columns)
nmiss = [len(self.ix_miss[v]) for v in vnames]
nmiss = np.asarray(nmiss)
ii = np.argsort(nmiss)
ii = ii[sum(nmiss == 0):]
self._cycle_order = [vnames[i] for i in ii]
self._initial_imputation()
self.k_pmm = k_pmm
def next_sample(self):
"""
Returns the next imputed dataset in the imputation process.
Returns
-------
data : array-like
An imputed dataset from the MICE chain.
Notes
-----
`MICEData` does not have a `skip` parameter. Consecutive
values returned by `next_sample` are immediately consecutive
in the imputation chain.
The returned value is a reference to the data attribute of
the class and should be copied before making any changes.
"""
self.update_all(1)
return self.data
def _initial_imputation(self):
"""
Use a PMM-like procedure for initial imputed values.
For each variable, missing values are imputed as the observed
value that is closest to the mean over all observed values.
"""
for col in self.data.columns:
di = self.data[col] - self.data[col].mean()
di = np.abs(di)
ix = di.idxmin()
imp = di.loc[ix]
self.data[col].fillna(imp, inplace=True)
def _split_indices(self, vec):
null = pd.isnull(vec)
ix_obs = np.flatnonzero(~null)
ix_miss = np.flatnonzero(null)
if len(ix_obs) == 0:
raise ValueError("variable to be imputed has no observed values")
return ix_obs, ix_miss
def set_imputer(self, endog_name, formula=None, model_class=None,
init_kwds=None, fit_kwds=None, predict_kwds=None,
k_pmm=20, perturbation_method=None):
"""
Specify the imputation process for a single variable.
Parameters
----------
endog_name : string
Name of the variable to be imputed.
formula : string
Conditional formula for imputation. Defaults to a formula
with main effects for all other variables in dataset. The
formula should only include an expression for the mean
structure, e.g. use 'x1 + x2' not 'x4 ~ x1 + x2'.
model_class : statsmodels model
Conditional model for imputation. Defaults to OLS. See below
for more information.
init_kwds : dit-like
Keyword arguments passed to the model init method.
fit_kwds : dict-like
Keyword arguments passed to the model fit method.
predict_kwds : dict-like
Keyword arguments passed to the model predict method.
k_pmm : int
Determines number of neighboring observations from which
to randomly sample when using predictive mean matching.
perturbation_method : string
Either 'gaussian' or 'bootstrap'. Determines the method
for perturbing parameters in the imputation model. If
None, uses the default specified at class initialization.
Notes
-----
The model class must meet the following conditions:
* A model must have a 'fit' method that returns an object.
* The object returned from `fit` must have a `params` attribute
that is an array-like object.
* The object returned from `fit` must have a cov_params method
that returns a square array-like object.
* The model must have a `predict` method.
"""
if formula is None:
main_effects = [x for x in self.data.columns
if x != endog_name]
fml = endog_name + " ~ " + " + ".join(main_effects)
self.conditional_formula[endog_name] = fml
else:
fml = endog_name + " ~ " + formula
self.conditional_formula[endog_name] = fml
if model_class is None:
self.model_class[endog_name] = OLS
else:
self.model_class[endog_name] = model_class
if init_kwds is not None:
self.init_kwds[endog_name] = init_kwds
if fit_kwds is not None:
self.fit_kwds[endog_name] = fit_kwds
if predict_kwds is not None:
self.predict_kwds[endog_name] = predict_kwds
if perturbation_method is not None:
self.perturbation_method[endog_name] = perturbation_method
self.k_pmm = k_pmm
def _store_changes(self, col, vals):
"""
Fill in dataset with imputed values.
Parameters
----------
col : string
Name of variable to be filled in.
vals : array
Array of imputed values to use for filling-in missing values.
"""
ix = self.ix_miss[col]
if len(ix) > 0:
self.data[col].iloc[ix] = vals
def update_all(self, n_iter=1):
"""
Perform a specified number of MICE iterations.
Parameters
----------
n_iter : int
The number of updates to perform. Only the result of the
final update will be available.
Notes
-----
The imputed values are stored in the class attribute `self.data`.
"""
for k in range(n_iter):
for vname in self._cycle_order:
self.update(vname)
if self.history_callback is not None:
hv = self.history_callback(self)
self.history.append(hv)
def get_split_data(self, vname):
"""
Return endog and exog for imputation of a given variable.
Parameters
----------
vname : string
The variable for which the split data is returned.
Returns
-------
endog_obs : DataFrame
Observed values of the variable to be imputed.
exog_obs : DataFrame
Current values of the predictors where the variable to be
imputed is observed.
exog_miss : DataFrame
Current values of the predictors where the variable to be
Imputed is missing.
init_kwds : dict-like
The init keyword arguments for `vname`, processed through Patsy
as required.
fit_kwds : dict-like
The fit keyword arguments for `vname`, processed through Patsy
as required.
"""
formula = self.conditional_formula[vname]
endog, exog = patsy.dmatrices(formula, self.data,
return_type="dataframe")
# Rows with observed endog
ixo = self.ix_obs[vname]
endog_obs = np.asarray(endog.iloc[ixo])
exog_obs = np.asarray(exog.iloc[ixo, :])
# Rows with missing endog
ixm = self.ix_miss[vname]
exog_miss = np.asarray(exog.iloc[ixm, :])
predict_obs_kwds = {}
if vname in self.predict_kwds:
kwds = self.predict_kwds[vname]
predict_obs_kwds = self._process_kwds(kwds, ixo)
predict_miss_kwds = {}
if vname in self.predict_kwds:
kwds = self.predict_kwds[vname]
predict_miss_kwds = self._process_kwds(kwds, ixo)
return endog_obs, exog_obs, exog_miss, predict_obs_kwds, predict_miss_kwds
def _process_kwds(self, kwds, ix):
kwds = kwds.copy()
for k in kwds:
v = kwds[k]
if isinstance(v, PatsyFormula):
mat = patsy.dmatrix(v.formula, self.data,
return_type="dataframe")
mat = np.asarray(mat)[ix, :]
if mat.shape[1] == 1:
mat = mat[:, 0]
kwds[k] = mat
return kwds
def get_fitting_data(self, vname):
"""
Return the data needed to fit a model for imputation.
The data is used to impute variable `vname`, and therefore
only includes cases for which `vname` is observed.
Values of type `PatsyFormula` in `init_kwds` or `fit_kwds` are
processed through Patsy and subset to align with the model's
endog and exog.
Parameters
----------
vname : string
The variable for which the fitting data is returned.
Returns
-------
endog : DataFrame
Observed values of `vname`.
exog : DataFrame
Regression design matrix for imputing `vname`.
init_kwds : dict-like
The init keyword arguments for `vname`, processed through Patsy
as required.
fit_kwds : dict-like
The fit keyword arguments for `vname`, processed through Patsy
as required.
"""
# Rows with observed endog
ix = self.ix_obs[vname]
formula = self.conditional_formula[vname]
endog, exog = patsy.dmatrices(formula, self.data,
return_type="dataframe")
endog = np.asarray(endog.iloc[ix, 0])
exog = np.asarray(exog.iloc[ix, :])
init_kwds = self._process_kwds(self.init_kwds[vname], ix)
fit_kwds = self._process_kwds(self.fit_kwds[vname], ix)
return endog, exog, init_kwds, fit_kwds
def plot_missing_pattern(self, ax=None, row_order="pattern",
column_order="pattern",
hide_complete_rows=False,
hide_complete_columns=False,
color_row_patterns=True):
"""
Generate an image showing the missing data pattern.
Parameters
----------
ax : matplotlib axes
Axes on which to draw the plot.
row_order : string
The method for ordering the rows. Must be one of 'pattern',
'proportion', or 'raw'.
column_order : string
The method for ordering the columns. Must be one of 'pattern',
'proportion', or 'raw'.
hide_complete_rows : boolean
If True, rows with no missing values are not drawn.
hide_complete_columns : boolean
If True, columns with no missing values are not drawn.
color_row_patterns : boolean
If True, color the unique row patterns, otherwise use grey
and white as colors.
Returns
-------
A figure containing a plot of the missing data pattern.
"""
# Create an indicator matrix for missing values.
miss = np.zeros(self.data.shape)
cols = self.data.columns
for j, col in enumerate(cols):
ix = self.ix_miss[col]
miss[ix, j] = 1
# Order the columns as requested
if column_order == "proportion":
ix = np.argsort(miss.mean(0))
elif column_order == "pattern":
cv = np.cov(miss.T)
u, s, vt = np.linalg.svd(cv, 0)
ix = np.argsort(cv[:, 0])
elif column_order == "raw":
ix = np.arange(len(cols))
else:
raise ValueError(column_order + " is not an allowed value for `column_order`.")
miss = miss[:, ix]
cols = [cols[i] for i in ix]
# Order the rows as requested
if row_order == "proportion":
ix = np.argsort(miss.mean(1))
elif row_order == "pattern":
x = 2**np.arange(miss.shape[1])
rky = np.dot(miss, x)
ix = np.argsort(rky)
elif row_order == "raw":
ix = np.arange(miss.shape[0])
else:
raise ValueError(row_order + " is not an allowed value for `row_order`.")
miss = miss[ix, :]
if hide_complete_rows:
ix = np.flatnonzero((miss == 1).any(1))
miss = miss[ix, :]
if hide_complete_columns:
ix = np.flatnonzero((miss == 1).any(0))
miss = miss[:, ix]
cols = [cols[i] for i in ix]
from statsmodels.graphics import utils as gutils
from matplotlib.colors import LinearSegmentedColormap
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
if color_row_patterns:
x = 2**np.arange(miss.shape[1])
rky = np.dot(miss, x)
_, rcol = np.unique(rky, return_inverse=True)
miss *= 1 + rcol[:, None]
ax.imshow(miss, aspect="auto", interpolation="nearest",
cmap='gist_ncar_r')
else:
cmap = LinearSegmentedColormap.from_list("_",
["white", "darkgrey"])
ax.imshow(miss, aspect="auto", interpolation="nearest",
cmap=cmap)
ax.set_ylabel("Cases")
ax.set_xticks(range(len(cols)))
ax.set_xticklabels(cols, rotation=90)
return fig
def plot_bivariate(self, col1_name, col2_name,
lowess_args=None, lowess_min_n=40,
jitter=None, plot_points=True, ax=None):
"""
Plot observed and imputed values for two variables.
Displays a scatterplot of one variable against another. The
points are colored according to whether the values are
observed or imputed.
Parameters
----------
col1_name : string
The variable to be plotted on the horizontal axis.
col2_name : string
The variable to be plotted on the vertical axis.
lowess_args : dictionary
A dictionary of dictionaries, keys are 'ii', 'io', 'oi'
and 'oo', where 'o' denotes 'observed' and 'i' denotes
imputed. See Notes for details.
lowess_min_n : integer
Minimum sample size to plot a lowess fit
jitter : float or tuple
Standard deviation for jittering points in the plot.
Either a single scalar applied to both axes, or a tuple
containing x-axis jitter and y-axis jitter, respectively.
plot_points : bool
If True, the data points are plotted.
ax : matplotlib axes object
Axes on which to plot, created if not provided.
Returns
-------
The matplotlib figure on which the plot id drawn.
"""
from statsmodels.graphics import utils as gutils
from statsmodels.nonparametric.smoothers_lowess import lowess
if lowess_args is None:
lowess_args = {}
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
ax.set_position([0.1, 0.1, 0.7, 0.8])
ix1i = self.ix_miss[col1_name]
ix1o = self.ix_obs[col1_name]
ix2i = self.ix_miss[col2_name]
ix2o = self.ix_obs[col2_name]
ix_ii = np.intersect1d(ix1i, ix2i)
ix_io = np.intersect1d(ix1i, ix2o)
ix_oi = np.intersect1d(ix1o, ix2i)
ix_oo = np.intersect1d(ix1o, ix2o)
vec1 = np.asarray(self.data[col1_name])
vec2 = np.asarray(self.data[col2_name])
if jitter is not None:
if np.isscalar(jitter):
jitter = (jitter, jitter)
vec1 += jitter[0] * np.random.normal(size=len(vec1))
vec2 += jitter[1] * np.random.normal(size=len(vec2))
# Plot the points
keys = ['oo', 'io', 'oi', 'ii']
lak = {'i': 'imp', 'o': 'obs'}
ixs = {'ii': ix_ii, 'io': ix_io, 'oi': ix_oi, 'oo': ix_oo}
color = {'oo': 'grey', 'ii': 'red', 'io': 'orange',
'oi': 'lime'}
if plot_points:
for ky in keys:
ix = ixs[ky]
lab = lak[ky[0]] + "/" + lak[ky[1]]
ax.plot(vec1[ix], vec2[ix], 'o', color=color[ky],
label=lab, alpha=0.6)
# Plot the lowess fits
for ky in keys:
ix = ixs[ky]
if len(ix) < lowess_min_n:
continue
if ky in lowess_args:
la = lowess_args[ky]
else:
la = {}
ix = ixs[ky]
lfit = lowess(vec2[ix], vec1[ix], **la)
if plot_points:
ax.plot(lfit[:, 0], lfit[:, 1], '-', color=color[ky],
alpha=0.6, lw=4)
else:
lab = lak[ky[0]] + "/" + lak[ky[1]]
ax.plot(lfit[:, 0], lfit[:, 1], '-', color=color[ky],
alpha=0.6, lw=4, label=lab)
ha, la = ax.get_legend_handles_labels()
pad = 0.0001 if plot_points else 0.5
leg = fig.legend(ha, la, 'center right', numpoints=1,
handletextpad=pad)
leg.draw_frame(False)
ax.set_xlabel(col1_name)
ax.set_ylabel(col2_name)
return fig
def plot_fit_obs(self, col_name, lowess_args=None,
lowess_min_n=40, jitter=None,
plot_points=True, ax=None):
"""
Plot fitted versus imputed or observed values as a scatterplot.
Parameters
----------
col_name : string
The variable to be plotted on the horizontal axis.
lowess_args : dict-like
Keyword arguments passed to lowess fit. A dictionary of
dictionaries, keys are 'o' and 'i' denoting 'observed' and
'imputed', respectively.
lowess_min_n : integer
Minimum sample size to plot a lowess fit
jitter : float or tuple
Standard deviation for jittering points in the plot.
Either a single scalar applied to both axes, or a tuple
containing x-axis jitter and y-axis jitter, respectively.
plot_points : bool
If True, the data points are plotted.
ax : matplotlib axes object
Axes on which to plot, created if not provided.
Returns
-------
The matplotlib figure on which the plot is drawn.
"""
from statsmodels.graphics import utils as gutils
from statsmodels.nonparametric.smoothers_lowess import lowess
import pandas as pd
if lowess_args is None:
lowess_args = {}
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
ax.set_position([0.1, 0.1, 0.7, 0.8])
ixi = self.ix_miss[col_name]
ixo = self.ix_obs[col_name]
vec1 = np.asarray(self.data[col_name])
# Fitted values
formula = self.conditional_formula[col_name]
endog, exog = patsy.dmatrices(formula, self.data,
return_type="dataframe")
results = self.results[col_name]
vec2 = results.predict(exog=exog)
vec2 = self._get_predicted(vec2)
if jitter is not None:
if np.isscalar(jitter):
jitter = (jitter, jitter)
vec1 += jitter[0] * np.random.normal(size=len(vec1))
vec2 += jitter[1] * np.random.normal(size=len(vec2))
# Plot the points
keys = ['o', 'i']
ixs = {'o': ixo, 'i': ixi}
lak = {'o': 'obs', 'i': 'imp'}
color = {'o': 'orange', 'i': 'lime'}
if plot_points:
for ky in keys:
ix = ixs[ky]
ax.plot(vec1[ix], vec2[ix], 'o', color=color[ky],
label=lak[ky], alpha=0.6)
# Plot the lowess fits
for ky in keys:
ix = ixs[ky]
if len(ix) < lowess_min_n:
continue
if ky in lowess_args:
la = lowess_args[ky]
else:
la = {}
ix = ixs[ky]
lfit = lowess(vec2[ix], vec1[ix], **la)
ax.plot(lfit[:, 0], lfit[:, 1], '-', color=color[ky],
alpha=0.6, lw=4, label=lak[ky])
ha, la = ax.get_legend_handles_labels()
leg = fig.legend(ha, la, 'center right', numpoints=1)
leg.draw_frame(False)
ax.set_xlabel(col_name + " observed or imputed")
ax.set_ylabel(col_name + " fitted")
return fig
def plot_imputed_hist(self, col_name, ax=None, imp_hist_args=None,
obs_hist_args=None, all_hist_args=None):
"""
Display imputed values for one variable as a histogram.
Parameters
----------
col_name : string
The name of the variable to be plotted.
ax : matplotlib axes
An axes on which to draw the histograms. If not provided,
one is created.
imp_hist_args : dict
Keyword arguments to be passed to pyplot.hist when
creating the histogram for imputed values.
obs_hist_args : dict
Keyword arguments to be passed to pyplot.hist when
creating the histogram for observed values.
all_hist_args : dict
Keyword arguments to be passed to pyplot.hist when
creating the histogram for all values.
Returns
-------
The matplotlib figure on which the histograms were drawn
"""
from statsmodels.graphics import utils as gutils
from matplotlib.colors import LinearSegmentedColormap
if imp_hist_args is None:
imp_hist_args = {}
if obs_hist_args is None:
obs_hist_args = {}
if all_hist_args is None:
all_hist_args = {}
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
ax.set_position([0.1, 0.1, 0.7, 0.8])
ixm = self.ix_miss[col_name]
ixo = self.ix_obs[col_name]
imp = self.data[col_name].iloc[ixm]
obs = self.data[col_name].iloc[ixo]
for di in imp_hist_args, obs_hist_args, all_hist_args:
if 'histtype' not in di:
di['histtype'] = 'step'
ha, la = [], []
if len(imp) > 0:
h = ax.hist(np.asarray(imp), **imp_hist_args)
ha.append(h[-1][0])
la.append("Imp")
h1 = ax.hist(np.asarray(obs), **obs_hist_args)
h2 = ax.hist(np.asarray(self.data[col_name]), **all_hist_args)
ha.extend([h1[-1][0], h2[-1][0]])
la.extend(["Obs", "All"])
leg = fig.legend(ha, la, 'center right', numpoints=1)
leg.draw_frame(False)
ax.set_xlabel(col_name)
ax.set_ylabel("Frequency")
return fig
def _boot_kwds(self, kwds, rix):
for k in kwds:
v = kwds[k]
if not isinstance(v, np.ndarray):
continue
if (v.ndim == 1) and (v.shape[0] == len(rix)):
kwds[k] = v[rix]
if (v.ndim == 2) and (v.shape[0] == len(rix)):
kwds[k] = v[rix, :]
return kwds
def _perturb_bootstrap(self, vname):
"""
Perturbs the model's parameters using a bootstrap.
"""
endog, exog, init_kwds, fit_kwds = self.get_fitting_data(vname)
m = len(endog)
rix = np.random.randint(0, m, m)
endog = endog[rix]
exog = exog[rix, :]
init_kwds = self._boot_kwds(init_kwds, rix)
fit_kwds = self._boot_kwds(fit_kwds, rix)
klass = self.model_class[vname]
self.models[vname] = klass(endog, exog, **init_kwds)
self.results[vname] = self.models[vname].fit(**fit_kwds)
self.params[vname] = self.results[vname].params
def _perturb_gaussian(self, vname):
"""
Gaussian perturbation of model parameters.
The normal approximation to the sampling distribution of the
parameter estimates is used to define the mean and covariance
structure of the perturbation distribution.
"""
endog, exog, init_kwds, fit_kwds = self.get_fitting_data(vname)
klass = self.model_class[vname]
self.models[vname] = klass(endog, exog, **init_kwds)
self.results[vname] = self.models[vname].fit(**fit_kwds)
cov = self.results[vname].cov_params()
mu = self.results[vname].params
self.params[vname] = np.random.multivariate_normal(mean=mu, cov=cov)
def perturb_params(self, vname):
if self.perturbation_method[vname] == "gaussian":
self._perturb_gaussian(vname)
elif self.perturbation_method[vname] == "boot":
self._perturb_bootstrap(vname)
else:
raise ValueError("unknown perturbation method")
def impute(self, vname):
# Wrap this in case we later add additional imputation
# methods.
self.impute_pmm(vname)
def update(self, vname):
"""
Impute missing values for a single variable.
This is a two-step process in which first the parameters are
perturbed, then the missing values are re-imputed.
Parameters
----------
vname : string
The name of the variable to be updated.
"""
self.perturb_params(vname)
self.impute(vname)
# work-around for inconsistent predict return values
def _get_predicted(self, obj):
if isinstance(obj, np.ndarray):
return obj
elif isinstance(obj, pd.Series):
return obj.values
elif hasattr(obj, 'predicted_values'):
return obj.predicted_values
else:
raise ValueError("cannot obtain predicted values from %s" % obj.__class__)
def impute_pmm(self, vname):
"""
Use predictive mean matching to impute missing values.
Notes
-----
The `perturb_params` method must be called first to define the
model.
"""
k_pmm = self.k_pmm
endog_obs, exog_obs, exog_miss, predict_obs_kwds, predict_miss_kwds =\
self.get_split_data(vname)
# Predict imputed variable for both missing and non-missing
# observations
model = self.models[vname]
pendog_obs = model.predict(self.params[vname], exog_obs, **predict_obs_kwds)
pendog_miss = model.predict(self.params[vname], exog_miss, **predict_miss_kwds)
pendog_obs = self._get_predicted(pendog_obs)
pendog_miss = self._get_predicted(pendog_miss)
# Jointly sort the observed and predicted endog values for the
# cases with observed values.
ii = np.argsort(pendog_obs)
endog_obs = endog_obs[ii]
pendog_obs = pendog_obs[ii]
# Find the closest match to the predicted endog values for
# cases with missing endog values.
ix = np.searchsorted(pendog_obs, pendog_miss)
# Get the indices for the closest k_pmm values on
# either side of the closest index.
ixm = ix[:, None] + np.arange(-k_pmm, k_pmm)[None, :]
# Account for boundary effects
msk = np.nonzero((ixm < 0) | (ixm > len(endog_obs) - 1))
ixm = np.clip(ixm, 0, len(endog_obs) - 1)
# Get the distances
dx = pendog_miss[:, None] - pendog_obs[ixm]
dx = np.abs(dx)
dx[msk] = np.inf
# Closest positions in ix, row-wise.
dxi = np.argsort(dx, 1)[:, 0:k_pmm]
# Choose a column for each row.
ir = np.random.randint(0, k_pmm, len(pendog_miss))
# Unwind the indices
jj = np.arange(dxi.shape[0])
ix = dxi[[jj, ir]]
iz = ixm[[jj, ix]]
imputed_miss = np.array(endog_obs[iz])
self._store_changes(vname, imputed_miss)
_mice_example_1 = """
>>> imp = mice.MICEData(data)
>>> fml = 'y ~ x1 + x2 + x3 + x4'
>>> mice = mice.MICE(fml, sm.OLS, imp)
>>> results = mice.fit(10, 10)
>>> print(results.summary())
::
Results: MICE
=================================================================
Method: MICE Sample size: 1000
Model: OLS Scale 1.00
Dependent variable: y Num. imputations 10
-----------------------------------------------------------------
Coef. Std.Err. t P>|t| [0.025 0.975] FMI
-----------------------------------------------------------------
Intercept -0.0234 0.0318 -0.7345 0.4626 -0.0858 0.0390 0.0128
x1 1.0305 0.0578 17.8342 0.0000 0.9172 1.1437 0.0309
x2 -0.0134 0.0162 -0.8282 0.4076 -0.0451 0.0183 0.0236
x3 -1.0260 0.0328 -31.2706 0.0000 -1.0903 -0.9617 0.0169
x4 -0.0253 0.0336 -0.7520 0.4521 -0.0911 0.0406 0.0269
=================================================================
"""
_mice_example_2 = """
>>> imp = mice.MICEData(data)
>>> fml = 'y ~ x1 + x2 + x3 + x4'
>>> mice = mice.MICE(fml, sm.OLS, imp)
>>> results = []
>>> for k in range(10):
>>> x = mice.next_sample()
>>> results.append(x)
"""
class MICE(object):
__doc__ = """\
Multiple Imputation with Chained Equations.
This class can be used to fit most Statsmodels models to data sets
with missing values using the 'multiple imputation with chained
equations' (MICE) approach..
Parameters
----------
model_formula : string
The model formula to be fit to the imputed data sets. This
formula is for the 'analysis model'.
model_class : statsmodels model
The model to be fit to the imputed data sets. This model
class if for the 'analysis model'.
data : MICEData instance
MICEData object containing the data set for which
missing values will be imputed
n_skip : int
The number of imputed datasets to skip between consecutive
imputed datasets that are used for analysis.
init_kwds : dict-like
Dictionary of keyword arguments passed to the init method
of the analysis model.
fit_kwds : dict-like
Dictionary of keyword arguments passed to the fit method
of the analysis model.
Examples
--------
Run all MICE steps and obtain results:
%(mice_example_1)s
Obtain a sequence of fitted analysis models without combining
to obtain summary:
%(mice_example_2)s
""" % {'mice_example_1' : _mice_example_1,
'mice_example_2' : _mice_example_2}
def __init__(self, model_formula, model_class, data, n_skip=3,
init_kwds=None, fit_kwds=None):
self.model_formula = model_formula
self.model_class = model_class
self.n_skip = n_skip
self.data = data
self.results_list = []
self.init_kwds = init_kwds if init_kwds is not None else {}
self.fit_kwds = fit_kwds if fit_kwds is not None else {}
def next_sample(self):
"""
Perform one complete MICE iteration.
A single MICE iteration updates all missing values using their
respective imputation models, then fits the analysis model to
the imputed data.
Returns
-------
params : array-like
The model parameters for the analysis model.
Notes
-----
This function fits the analysis model and returns its
parameter estimate. The parameter vector is not stored by the
class and is not used in any subsequent calls to `combine`.
Use `fit` to run all MICE steps together and obtain summary
results.
The complete cycle of missing value imputation followed by
fitting the analysis model is repeated `n_skip + 1` times and
the analysis model parameters from the final fit are returned.
"""
# Impute missing values
self.data.update_all(self.n_skip + 1)
start_params = None
if len(self.results_list) > 0:
start_params = self.results_list[-1].params
# Fit the analysis model.
model = self.model_class.from_formula(self.model_formula,
self.data.data,
**self.init_kwds)
self.fit_kwds.update({"start_params": start_params})
result = model.fit(**self.fit_kwds)
return result
def fit(self, n_burnin=10, n_imputations=10):
"""
Fit a model using MICE.
Parameters
----------
n_burnin : int
The number of burn-in cycles to skip.
n_imputations : int
The number of data sets to impute
"""
# Run without fitting the analysis model
self.data.update_all(n_burnin)
for j in range(n_imputations):
result = self.next_sample()
self.results_list.append(result)
self.endog_names = result.model.endog_names
self.exog_names = result.model.exog_names
return self.combine()
def combine(self):
"""
Pools MICE imputation results.
This method can only be used after the `run` method has been
called. Returns estimates and standard errors of the analysis
model parameters.
Returns a MICEResults instance.
"""
# Extract a few things from the models that were fit to
# imputed data sets.
params_list = []
cov_within = 0.
scale_list = []
for results in self.results_list:
results_uw = results._results
params_list.append(results_uw.params)
cov_within += results_uw.cov_params()
scale_list.append(results.scale)
params_list = np.asarray(params_list)
scale_list = np.asarray(scale_list)
# The estimated parameters for the MICE analysis
params = params_list.mean(0)
# The average of the within-imputation covariances
cov_within /= len(self.results_list)
# The between-imputation covariance
cov_between = np.cov(params_list.T)
# The estimated covariance matrix for the MICE analysis
f = 1 + 1 / float(len(self.results_list))
cov_params = cov_within + f * cov_between
# Fraction of missing information
fmi = f * np.diag(cov_between) / np.diag(cov_params)
# Set up a results instance
scale = np.mean(scale_list)
results = MICEResults(self, params, cov_params / scale)
results.scale = scale
results.frac_miss_info = fmi
results.exog_names = self.exog_names
results.endog_names = self.endog_names
results.model_class = self.model_class
return results
class MICEResults(LikelihoodModelResults):
def __init__(self, model, params, normalized_cov_params):
super(MICEResults, self).__init__(model, params,
normalized_cov_params)
def summary(self, title=None, alpha=.05):
"""
Summarize the results of running MICE.
Parameters
-----------
title : string, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
Significance level for the confidence intervals
Returns
-------
smry : Summary instance
This holds the summary tables and text, which can be
printed or converted to various output formats.
"""
from statsmodels.iolib import summary2
from statsmodels.compat.collections import OrderedDict
smry = summary2.Summary()
float_format = "%8.3f"
info = OrderedDict()
info["Method:"] = "MICE"
info["Model:"] = self.model_class.__name__
info["Dependent variable:"] = self.endog_names
info["Sample size:"] = "%d" % self.model.data.data.shape[0]
info["Scale"] = "%.2f" % self.scale
info["Num. imputations"] = "%d" % len(self.model.results_list)
smry.add_dict(info, align='l', float_format=float_format)
param = summary2.summary_params(self, alpha=alpha)
param["FMI"] = self.frac_miss_info
smry.add_df(param, float_format=float_format)
smry.add_title(title=title, results=self)
return smry
|
bert9bert/statsmodels
|
statsmodels/imputation/mice.py
|
Python
|
bsd-3-clause
| 45,391
|
[
"Gaussian"
] |
362ec9c64bb6dd25928aed5df7cf257c2becf0a48137c069e6e87fcff89baabd
|
"""
.. _tut_forward:
Head model and forward computation
==================================
The aim of this tutorial is to be a getting started for forward
computation.
For more extensive details and presentation of the general
concepts for forward modeling. See :ref:`ch_forward`.
"""
import mne
from mne.datasets import sample
data_path = sample.data_path()
# the raw file containing the channel location + types
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# The paths to Freesurfer reconstructions
subjects_dir = data_path + '/subjects'
subject = 'sample'
###############################################################################
# Computing the forward operator
# ------------------------------
#
# To compute a forward operator we need:
#
# - a ``-trans.fif`` file that contains the coregistration info.
# - a source space
# - the BEM surfaces
###############################################################################
# Compute and visualize BEM surfaces
# ----------------------------------
#
# The BEM surfaces are the triangulations of the interfaces between different
# tissues needed for forward computation. These surfaces are for example
# the inner skull surface, the outer skull surface and the outer skill
# surface.
#
# Computing the BEM surfaces requires FreeSurfer and makes use of either of
# the two following command line tools:
#
# - :ref:`gen_mne_watershed_bem`
# - :ref:`gen_mne_flash_bem`
#
# Here we'll assume it's already computed. It takes a few minutes per subject.
#
# For EEG we use 3 layers (inner skull, outer skull, and skin) while for
# MEG 1 layer (inner skull) is enough.
#
# Let's look at these surfaces. The function :func:`mne.viz.plot_bem`
# assumes that you have the the *bem* folder of your subject FreeSurfer
# reconstruction the necessary files.
mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,
brain_surfaces='white', orientation='coronal')
###############################################################################
# Visualization the coregistration
# --------------------------------
#
# The coregistration is operation that allows to position the head and the
# sensors in a common coordinate system. In the MNE software the transformation
# to align the head and the sensors in stored in a so-called **trans file**.
# It is a FIF file that ends with -trans.fif. It can be obtained with
# mne_analyze (Unix tools), mne.gui.coregistration (in Python) or mrilab
# if you're using a Neuromag system.
#
# For the Python version see :func:`mne.gui.coregistration`
#
# Here we assume the coregistration is done, so we just visually check the
# alignment with the following code.
# The transformation file obtained by coregistration
trans = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
info = mne.io.read_info(raw_fname)
# Here we look at the dense head, which isn't used for BEM computations but
# is useful for coregistration.
mne.viz.plot_alignment(info, trans, subject=subject, dig=True,
meg=['helmet', 'sensors'], subjects_dir=subjects_dir,
surfaces='head-dense')
###############################################################################
# Compute Source Space
# --------------------
#
# The source space defines the position of the candidate source locations.
# The following code compute such a cortical source space with
# an OCT-6 resolution.
#
# See :ref:`setting_up_source_space` for details on source space definition
# and spacing parameter.
src = mne.setup_source_space(subject, spacing='oct6',
subjects_dir=subjects_dir, add_dist=False)
print(src)
###############################################################################
# ``src`` contains two parts, one for the left hemisphere (4098 locations) and
# one for the right hemisphere (4098 locations). Sources can be visualized on
# top of the BEM surfaces.
mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,
brain_surfaces='white', src=src, orientation='coronal')
###############################################################################
# However, only sources that lie in the plotted MRI slices are shown.
# Let's write a few lines of mayavi to see all sources.
import numpy as np # noqa
from mayavi import mlab # noqa
from surfer import Brain # noqa
brain = Brain('sample', 'lh', 'inflated', subjects_dir=subjects_dir)
surf = brain.geo['lh']
vertidx = np.where(src[0]['inuse'])[0]
mlab.points3d(surf.x[vertidx], surf.y[vertidx],
surf.z[vertidx], color=(1, 1, 0), scale_factor=1.5)
###############################################################################
# Compute forward solution
# ------------------------
#
# We can now compute the forward solution.
# To reduce computation we'll just compute a single layer BEM (just inner
# skull) that can then be used for MEG (not EEG).
#
# We specify if we want a one-layer or a three-layer BEM using the
# conductivity parameter.
#
# The BEM solution requires a BEM model which describes the geometry
# of the head the conductivities of the different tissues.
conductivity = (0.3,) # for single layer
# conductivity = (0.3, 0.006, 0.3) # for three layers
model = mne.make_bem_model(subject='sample', ico=4,
conductivity=conductivity,
subjects_dir=subjects_dir)
bem = mne.make_bem_solution(model)
###############################################################################
# Note that the BEM does not involve any use of the trans file. The BEM
# only depends on the head geometry and conductivities.
# It is therefore independent from the MEG data and the head position.
#
# Let's now compute the forward operator, commonly referred to as the
# gain or leadfield matrix.
#
# See :func:`mne.make_forward_solution` for details on parameters meaning.
fwd = mne.make_forward_solution(raw_fname, trans=trans, src=src, bem=bem,
meg=True, eeg=False, mindist=5.0, n_jobs=2)
print(fwd)
###############################################################################
# We can explore the content of fwd to access the numpy array that contains
# the gain matrix.
leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
###############################################################################
# To extract the numpy array containing the forward operator corresponding to
# the source space `fwd['src']` with cortical orientation constraint
# we can use the following:
fwd_fixed = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=True)
leadfield = fwd_fixed['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
###############################################################################
# This is equivalent to the following code that explicitly applies the
# forward operator to a source estimate composed of the identity operator:
n_dipoles = leadfield.shape[1]
vertices = [src_hemi['vertno'] for src_hemi in fwd_fixed['src']]
stc = mne.SourceEstimate(1e-9 * np.eye(n_dipoles), vertices, tmin=0., tstep=1)
leadfield = mne.apply_forward(fwd_fixed, stc, info).data / 1e-9
###############################################################################
# To save to disk a forward solution you can use
# :func:`mne.write_forward_solution` and to read it back from disk
# :func:`mne.read_forward_solution`. Don't forget that FIF files containing
# forward solution should end with *-fwd.fif*.
#
# To get a fixed-orientation forward solution, use
# :func:`mne.convert_forward_solution` to convert the free-orientation
# solution to (surface-oriented) fixed orientation.
###############################################################################
# Exercise
# --------
#
# By looking at
# :ref:`sphx_glr_auto_examples_forward_plot_forward_sensitivity_maps.py`
# plot the sensitivity maps for EEG and compare it with the MEG, can you
# justify the claims that:
#
# - MEG is not sensitive to radial sources
# - EEG is more sensitive to deep sources
#
# How will the MEG sensitivity maps and histograms change if you use a free
# instead if a fixed/surface oriented orientation?
#
# Try this changing the mode parameter in :func:`mne.sensitivity_map`
# accordingly. Why don't we see any dipoles on the gyri?
|
teonlamont/mne-python
|
tutorials/plot_forward.py
|
Python
|
bsd-3-clause
| 8,449
|
[
"Mayavi"
] |
52c9b4122dae3c49b7f37111bea44271fa76f5d15926066c6ead3a5553469e0e
|
from sympy import Basic, Symbol, Integer, C, S, Dummy, Rational, Add, Pow
from sympy.core.numbers import Zero
from sympy.core.sympify import sympify, converter, SympifyError
from sympy.core.compatibility import is_sequence
from sympy.polys import Poly, roots, cancel
from sympy.simplify import simplify as sympy_simplify
from sympy.utilities.iterables import flatten
from sympy.functions.elementary.miscellaneous import sqrt, Max, Min
from sympy.functions.elementary.complexes import re, Abs
from sympy.printing import sstr
from sympy.core.compatibility import callable, reduce
import random
class MatrixError(Exception):
pass
class ShapeError(ValueError, MatrixError):
"""Wrong matrix shape"""
pass
class NonSquareMatrixError(ShapeError):
pass
def _dims_to_nm(dims):
"""Converts dimensions tuple (or any object with length 1 or 2) or scalar
in dims to matrix dimensions n and m."""
try:
l = len(dims)
except TypeError:
dims = (dims,)
l = 1
# This will work for nd-array too when they are added to sympy.
for dim in dims:
if dim < 0:
raise ValueError("Matrix dimensions should be non-negative integers.")
if l == 2:
n, m = map(int, dims)
elif l == 1:
n = m = int(dims[0])
else:
raise ValueError("Matrix dimensions should be a two-element tuple of ints or a single int!")
return n, m
def _iszero(x):
"""Returns True if x is zero."""
return x.is_zero
class DeferredVector(object):
def __init__(self,name):
self.name=name
def __getitem__(self,i):
component_name = '%s[%d]'%(self.name,i)
return Symbol(component_name)
def __str__(self):
return sstr(self)
def __repr__(self):
return sstr(self)
class Matrix(object):
# Added just for numpy compatibility
# TODO: investigate about __array_priority__
__array_priority__ = 10.0
def __init__(self, *args):
"""
Matrix can be constructed with values or a rule.
>>> from sympy import Matrix, I
>>> Matrix( ((1,2+I), (3,4)) ) #doctest:+NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> Matrix(2, 2, lambda i,j: (i+1)*j ) #doctest:+NORMALIZE_WHITESPACE
[0, 1]
[0, 2]
"""
if len(args) == 3 and callable(args[2]):
operation = args[2]
self.rows = int(args[0])
self.cols = int(args[1])
self.mat = []
for i in range(self.rows):
for j in range(self.cols):
self.mat.append(sympify(operation(i, j)))
elif len(args)==3 and is_sequence(args[2]):
self.rows=args[0]
self.cols=args[1]
mat = args[2]
if len(mat) != len(self):
raise ValueError('List length should be equal to rows*columns')
self.mat = map(lambda i: sympify(i), mat)
elif len(args) == 1:
mat = args[0]
if isinstance(mat, Matrix):
self.rows = mat.rows
self.cols = mat.cols
self.mat = mat[:]
return
elif hasattr(mat, "__array__"):
# NumPy array or matrix or some other object that implements
# __array__. So let's first use this method to get a
# numpy.array() and then make a python list out of it.
arr = mat.__array__()
if len(arr.shape) == 2:
self.rows, self.cols = arr.shape[0], arr.shape[1]
self.mat = map(lambda i: sympify(i), arr.ravel())
return
elif len(arr.shape) == 1:
self.rows, self.cols = 1, arr.shape[0]
self.mat = [0]*self.cols
for i in xrange(len(arr)):
self.mat[i] = sympify(arr[i])
return
else:
raise NotImplementedError("Sympy supports just 1D and 2D matrices")
elif not is_sequence(mat, include=Matrix):
raise TypeError("Matrix constructor doesn't accept %s as input" % str(type(mat)))
mat = []
for row in args[0]:
if isinstance(row, Matrix):
mat.extend(row.tolist())
else:
mat.append(row)
self.rows = len(mat)
if len(mat) != 0:
if not is_sequence(mat[0]):
self.cols = 1
self.mat = map(lambda i: sympify(i), mat)
return
self.cols = len(mat[0])
else:
self.cols = 0
self.mat = []
for j in xrange(self.rows):
if len(mat[j]) != self.cols:
raise ValueError("Input %s inconsistant to form a Matrix." %
args)
for i in xrange(self.cols):
self.mat.append(sympify(mat[j][i]))
elif len(args) == 0:
# Empty Matrix
self.rows = self.cols = 0
self.mat = []
else:
raise TypeError("Data type not understood")
def key2ij(self,key):
"""Converts key=(4,6) to 4,6 and ensures the key is correct."""
if not (is_sequence(key) and len(key) == 2):
raise TypeError("wrong syntax: a[%s]. Use a[i,j] or a[(i,j)]"
%repr(key))
i,j=key
if not (i>=0 and i<self.rows and j>=0 and j < self.cols):
print self.rows, " ", self.cols
raise IndexError("Index out of range: a[%s]"%repr(key))
return i,j
def transpose(self):
"""
Matrix transposition.
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m.transpose() #doctest: +NORMALIZE_WHITESPACE
[ 1, 3]
[2 + I, 4]
>>> m.T == m.transpose()
True
"""
a = [0]*len(self)
for i in xrange(self.cols):
a[i*self.rows:(i+1)*self.rows] = self.mat[i::self.cols]
return Matrix(self.cols,self.rows,a)
T = property(transpose,None,None,"Matrix transposition.")
def conjugate(self):
"""By-element conjugation."""
out = Matrix(self.rows,self.cols,
lambda i,j: self[i,j].conjugate())
return out
C = property(conjugate,None,None,"By-element conjugation.")
@property
def H(self):
"""
Hermite conjugation.
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m.H #doctest: +NORMALIZE_WHITESPACE
[ 1, 3]
[2 - I, 4]
"""
out = self.T.C
return out
@property
def D(self):
"""Dirac conjugation."""
from sympy.physics.matrices import mgamma
out = self.H * mgamma(0)
return out
def __getitem__(self,key):
"""
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m[1,0]
3
>>> m.H[1,0]
2 - I
"""
if type(key) is tuple:
i, j = key
if type(i) is slice or type(j) is slice:
return self.submatrix(key)
else:
# a2idx inlined
if not type(i) is int:
try:
i = i.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
# a2idx inlined
if not type(j) is int:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
if not (i>=0 and i<self.rows and j>=0 and j < self.cols):
raise IndexError("Index out of range: a[%s]" % (key,))
else:
return self.mat[i*self.cols + j]
else:
# row-wise decomposition of matrix
if type(key) is slice:
return self.mat[key]
else:
k = a2idx(key)
if k is not None:
return self.mat[k]
raise IndexError("Invalid index: a[%s]" % repr(key))
def __setitem__(self, key, value):
"""
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m[1,0]=9
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[9, 4]
"""
if type(key) is tuple:
i, j = key
if type(i) is slice or type(j) is slice:
if isinstance(value, Matrix):
self.copyin_matrix(key, value)
return
if is_sequence(value):
self.copyin_list(key, value)
return
else:
# a2idx inlined
if not type(i) is int:
try:
i = i.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
# a2idx inlined
if not type(j) is int:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
if not (i>=0 and i<self.rows and j>=0 and j < self.cols):
raise IndexError("Index out of range: a[%s]" % (key,))
else:
self.mat[i*self.cols + j] = sympify(value)
return
else:
# row-wise decomposition of matrix
if type(key) is slice:
raise IndexError("Vector slices not implemented yet.")
else:
k = a2idx(key)
if k is not None:
self.mat[k] = sympify(value)
return
raise IndexError("Invalid index: a[%s]"%repr(key))
def __array__(self):
return matrix2numpy(self)
def __len__(self):
"""
Return the number of elements of self.
Implemented mainly so bool(Matrix()) == False.
"""
return self.rows * self.cols
def tolist(self):
"""
Return the Matrix converted in a python list.
>>> from sympy import Matrix
>>> m=Matrix(3, 3, range(9))
>>> m
[0, 1, 2]
[3, 4, 5]
[6, 7, 8]
>>> m.tolist()
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
ret = [0]*self.rows
for i in xrange(self.rows):
ret[i] = self.mat[i*self.cols:(i+1)*self.cols]
return ret
def copyin_matrix(self, key, value):
rlo, rhi = self.slice2bounds(key[0], self.rows)
clo, chi = self.slice2bounds(key[1], self.cols)
if value.rows != rhi - rlo or value.cols != chi - clo:
raise ShapeError("The Matrix `value` doesn't have the same dimensions " +
"as the in sub-Matrix given by `key`.")
for i in range(value.rows):
for j in range(value.cols):
self[i+rlo, j+clo] = sympify(value[i,j])
def copyin_list(self, key, value):
if not is_sequence(value):
raise TypeError("`value` must be an ordered iterable, not %s." % type(value))
self.copyin_matrix(key, Matrix(value))
def hash(self):
"""Compute a hash every time, because the matrix elements
could change."""
return hash(self.__str__() )
@property
def shape(self):
return (self.rows, self.cols)
def __rmul__(self,a):
if hasattr(a, "__array__") and a.shape != ():
return matrix_multiply(a,self)
out = Matrix(self.rows,self.cols,map(lambda i: a*i,self.mat))
return out
def expand(self):
out = Matrix(self.rows,self.cols,map(lambda i: i.expand(), self.mat))
return out
def combine(self):
out = Matrix(self.rows,self.cols,map(lambda i: i.combine(),self.mat))
return out
def subs(self, *args):
out = Matrix(self.rows,self.cols,map(lambda i: i.subs(*args),self.mat))
return out
def __sub__(self,a):
return self + (-a)
def __mul__(self,a):
if hasattr(a, "__array__") and a.shape != ():
return matrix_multiply(self,a)
out = Matrix(self.rows,self.cols,map(lambda i: i*a,self.mat))
return out
def __pow__(self, num):
if not self.is_square:
raise NonSquareMatrixError()
if isinstance(num, int) or isinstance(num, Integer):
n = int(num)
if n < 0:
return self.inv() ** -n # A**-2 = (A**-1)**2
a = eye(self.cols)
s = self
while n:
if n%2:
a *= s
n -= 1
s *= s
n //= 2
return a
elif isinstance(num, Rational):
try:
P, D = self.diagonalize()
except MatrixError:
raise NotImplementedError("Implemented only for diagonalizable matrices")
for i in range(D.rows):
D[i, i] = D[i, i]**num
return P * D * P.inv()
else:
raise NotImplementedError("Only integer and rational values are supported")
def __add__(self,a):
return matrix_add(self,a)
def __radd__(self,a):
return matrix_add(a,self)
def __div__(self,a):
return self * (S.One/a)
def __truediv__(self,a):
return self.__div__(a)
def multiply(self,b):
"""Returns self*b """
return matrix_multiply(self,b)
def add(self,b):
"""Return self+b """
return matrix_add(self,b)
def __neg__(self):
return -1*self
def __eq__(self, a):
if not isinstance(a, (Matrix, Basic)):
a = sympify(a)
if isinstance(a, Matrix) and self.shape == a.shape:
return all(self[i, j] == a[i, j]
for i in xrange(self.rows)
for j in xrange(self.cols))
else:
return False
def __ne__(self, a):
if not isinstance(a, (Matrix, Basic)):
a = sympify(a)
if isinstance(a, Matrix) and self.shape == a.shape:
return any(self[i, j] != a[i, j]
for i in xrange(self.rows)
for j in xrange(self.cols))
else:
return True
def __hash__(self):
return super(Matrix, self).__hash__()
def _format_str(self, strfunc, rowsep='\n'):
# Handle zero dimensions:
if self.rows == 0 or self.cols == 0:
return '[]'
# Build table of string representations of the elements
res = []
# Track per-column max lengths for pretty alignment
maxlen = [0] * self.cols
for i in range(self.rows):
res.append([])
for j in range(self.cols):
string = strfunc(self[i,j])
res[-1].append(string)
maxlen[j] = max(len(string), maxlen[j])
# Patch strings together
for i, row in enumerate(res):
for j, elem in enumerate(row):
# Pad each element up to maxlen so the columns line up
row[j] = elem.rjust(maxlen[j])
res[i] = "[" + ", ".join(row) + "]"
return rowsep.join(res)
def __str__(self):
return sstr(self)
def __repr__(self):
return sstr(self)
def cholesky(self):
"""
Returns the Cholesky Decomposition L of a Matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix
>>> from sympy.matrices import Matrix
>>> A = Matrix(((25,15,-5),(15,18,0),(-5,0,11)))
>>> A.cholesky()
[ 5, 0, 0]
[ 3, 3, 0]
[-1, 1, 3]
>>> A.cholesky() * A.cholesky().T
[25, 15, -5]
[15, 18, 0]
[-5, 0, 11]
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if not self.is_symmetric():
raise ValueError("Matrix must be symmetric.")
return self._cholesky()
def _cholesky(self):
"""
Helper function of cholesky.
Without the error checks.
To be used privately. """
L = zeros((self.rows, self.rows))
for i in xrange(self.rows):
for j in xrange(i):
L[i, j] = (1 / L[j, j]) * (self[i, j] - sum(L[i, k] * L[j, k]
for k in xrange(j)))
L[i, i] = (self[i, i] - sum(L[i, k] ** 2
for k in xrange(i))) ** (S(1)/2)
return L
def LDLdecomposition(self):
"""
Returns the LDL Decomposition (L,D) of matrix A,
such that L * D * L.T == A
This method eliminates the use of square root.
Further this ensures that all the diagonal entries of L are 1.
A must be a square, symmetric, positive-definite
and non-singular matrix.
>>> from sympy.matrices import Matrix, eye
>>> A = Matrix(((25,15,-5),(15,18,0),(-5,0,11)))
>>> L, D = A.LDLdecomposition()
>>> L
[ 1, 0, 0]
[ 3/5, 1, 0]
[-1/5, 1/3, 1]
>>> D
[25, 0, 0]
[ 0, 9, 0]
[ 0, 0, 9]
>>> L * D * L.T * A.inv() == eye(A.rows)
True
"""
if not self.is_square:
raise NonSquareMatrixException("Matrix must be square.")
if not self.is_symmetric():
raise ValueError("Matrix must be symmetric.")
return self._LDLdecomposition()
def _LDLdecomposition(self):
"""
Helper function of LDLdecomposition.
Without the error checks.
To be used privately.
"""
D = zeros((self.rows, self.rows))
L = eye(self.rows)
for i in xrange(self.rows):
for j in xrange(i):
L[i, j] = (1 / D[j, j]) * (self[i, j] - sum(
L[i, k] * L[j, k] * D[k, k] for k in xrange(j)))
D[i, i] = self[i, i] - sum(L[i, k]**2 * D[k, k]
for k in xrange(i))
return L, D
def lower_triangular_solve(self, rhs):
"""
Solves Ax = B, where A is a lower triangular matrix.
"""
if not self.is_square:
raise NonSquareMatrixException("Matrix must be square.")
if rhs.rows != self.rows:
raise ShapeError("Matrices size mismatch.")
if not self.is_lower():
raise ValueError("Matrix must be lower triangular.")
return self._lower_triangular_solve(rhs)
def _lower_triangular_solve(self, rhs):
"""
Helper function of function lower_triangular_solve.
Without the error checks.
To be used privately.
"""
X = zeros((self.rows, 1))
for i in xrange(self.rows):
if self[i, i] == 0:
raise TypeError("Matrix must be non-singular.")
X[i, 0] = (rhs[i, 0] - sum(self[i, k] * X[k, 0]
for k in xrange(i))) / self[i, i]
return X
def upper_triangular_solve(self, rhs):
"""
Solves Ax = B, where A is an upper triangular matrix.
"""
if not self.is_square:
raise NonSquareMatrixException("Matrix must be square.")
if rhs.rows != self.rows:
raise TypeError("Matrix size mismatch.")
if not self.is_upper():
raise TypeError("Matrix is not upper triangular.")
return self._upper_triangular_solve(rhs)
def _upper_triangular_solve(self, rhs):
"""
Helper function of function upper_triangular_solve.
Without the error checks, to be used privately. """
X = zeros((self.rows, 1))
for i in reversed(xrange(self.rows)):
if self[i, i] == 0:
raise ValueError("Matrix must be non-singular.")
X[i, 0] = (rhs[i, 0] - sum(self[i, k] * X[k, 0]
for k in xrange(i+1, self.rows))) / self[i, i]
return X
def cholesky_solve(self, rhs):
"""
Solves Ax = B using Cholesky decomposition,
for a general square non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
"""
if self.is_symmetric():
L = self._cholesky()
elif self.rows >= self.cols:
L = (self.T * self)._cholesky()
rhs = self.T * rhs
else:
raise NotImplementedError("Under-determined System.")
Y = L._lower_triangular_solve(rhs)
return (L.T)._upper_triangular_solve(Y)
def diagonal_solve(self, rhs):
"""
Solves Ax = B efficiently, where A is a diagonal Matrix,
with non-zero diagonal entries.
"""
if not self.is_diagonal:
raise TypeError("Matrix should be diagonal")
if rhs.rows != self.rows:
raise TypeError("Size mis-match")
return self._diagonal_solve(rhs)
def _diagonal_solve(self, rhs):
"""
Helper function of function diagonal_solve,
without the error checks, to be used privately.
"""
return Matrix(rhs.rows, 1, lambda i, j: rhs[i, 0] / self[i, i])
def LDLsolve(self, rhs):
"""
Solves Ax = B using LDL decomposition,
for a general square and non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
"""
if self.is_symmetric():
L, D = self.LDLdecomposition()
elif self.rows >= self.cols:
L, D = (self.T * self).LDLdecomposition()
rhs = self.T * rhs
else:
raise NotImplementedError("Under-determined System.")
Y = L._lower_triangular_solve(rhs)
Z = D._diagonal_solve(Y)
return (L.T)._upper_triangular_solve(Z)
def inv(self, method="GE", iszerofunc=_iszero, try_block_diag=False):
"""
Calculates the matrix inverse.
According to the "method" parameter, it calls the appropriate method:
GE .... inverse_GE()
LU .... inverse_LU()
ADJ ... inverse_ADJ()
According to the "try_block_diag" parameter, it will try to form block
diagonal matrices using the method get_diag_blocks(), invert these
individually, and then reconstruct the full inverse matrix.
Note, the GE and LU methods may require the matrix to be simplified
before it is inverted in order to properly detect zeros during
pivoting. In difficult cases a custom zero detection function can
be provided by setting the iszerosfunc argument to a function that
should return True if its argument is zero.
"""
if not self.is_square:
raise NonSquareMatrixError()
if try_block_diag:
blocks = self.get_diag_blocks()
r = []
for block in blocks:
r.append(block.inv(method=method, iszerofunc=iszerofunc))
return diag(*r)
if method == "GE":
return self.inverse_GE(iszerofunc=iszerofunc)
elif method == "LU":
return self.inverse_LU(iszerofunc=iszerofunc)
elif method == "ADJ":
return self.inverse_ADJ()
else:
raise ValueError("Inversion method unrecognized")
def __mathml__(self):
mml = ""
for i in range(self.rows):
mml += "<matrixrow>"
for j in range(self.cols):
mml += self[i,j].__mathml__()
mml += "</matrixrow>"
return "<matrix>" + mml + "</matrix>"
def row(self, i, f):
"""
Elementary row operation using functor
>>> from sympy import ones
>>> I = ones(3)
>>> I.row(1,lambda i,j: i*3)
>>> I
[1, 1, 1]
[3, 3, 3]
[1, 1, 1]
"""
for j in range(0, self.cols):
self[i, j] = f(self[i, j], j)
def col(self, j, f):
"""
Elementary column operation using functor
>>> from sympy import ones
>>> I = ones(3)
>>> I.col(0,lambda i,j: i*3)
>>> I
[3, 1, 1]
[3, 1, 1]
[3, 1, 1]
"""
for i in range(0, self.rows):
self[i, j] = f(self[i, j], i)
def row_swap(self, i, j):
for k in range(0, self.cols):
self[i, k], self[j, k] = self[j, k], self[i, k]
def col_swap(self, i, j):
for k in range(0, self.rows):
self[k, i], self[k, j] = self[k, j], self[k, i]
def row_del(self, i):
self.mat = self.mat[:i*self.cols] + self.mat[(i+1)*self.cols:]
self.rows -= 1
def col_del(self, i):
"""
>>> import sympy
>>> M = sympy.matrices.eye(3)
>>> M.col_del(1)
>>> M #doctest: +NORMALIZE_WHITESPACE
[1, 0]
[0, 0]
[0, 1]
"""
for j in range(self.rows-1, -1, -1):
del self.mat[i+j*self.cols]
self.cols -= 1
def row_join(self, rhs):
"""
Concatenates two matrices along self's last and rhs's first column
>>> from sympy import Matrix
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> V = Matrix(3,1,lambda i,j: 3+i+j)
>>> M.row_join(V)
[0, 1, 2, 3]
[1, 2, 3, 4]
[2, 3, 4, 5]
"""
if self.rows != rhs.rows:
raise ShapeError("`self` and `rhs` must have the same number of rows.")
newmat = self.zeros((self.rows, self.cols + rhs.cols))
newmat[:,:self.cols] = self[:,:]
newmat[:,self.cols:] = rhs
return newmat
def col_join(self, bott):
"""
Concatenates two matrices along self's last and bott's first row
>>> from sympy import Matrix
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> V = Matrix(1,3,lambda i,j: 3+i+j)
>>> M.col_join(V)
[0, 1, 2]
[1, 2, 3]
[2, 3, 4]
[3, 4, 5]
"""
if self.cols != bott.cols:
raise ShapeError("`self` and `bott` must have the same number of columns.")
newmat = self.zeros((self.rows+bott.rows, self.cols))
newmat[:self.rows,:] = self[:,:]
newmat[self.rows:,:] = bott
return newmat
def row_insert(self, pos, mti):
"""
>>> from sympy import Matrix, zeros
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> M
[0, 1, 2]
[1, 2, 3]
[2, 3, 4]
>>> V = zeros((1, 3))
>>> V
[0, 0, 0]
>>> M.row_insert(1,V)
[0, 1, 2]
[0, 0, 0]
[1, 2, 3]
[2, 3, 4]
"""
if pos is 0:
return mti.col_join(self)
if self.cols != mti.cols:
raise ShapeError("`self` and `mti` must have the same number of columns.")
newmat = self.zeros((self.rows + mti.rows, self.cols))
newmat[:pos,:] = self[:pos,:]
newmat[pos:pos+mti.rows,:] = mti[:,:]
newmat[pos+mti.rows:,:] = self[pos:,:]
return newmat
def col_insert(self, pos, mti):
"""
>>> from sympy import Matrix, zeros
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> M
[0, 1, 2]
[1, 2, 3]
[2, 3, 4]
>>> V = zeros((3, 1))
>>> V
[0]
[0]
[0]
>>> M.col_insert(1,V)
[0, 0, 1, 2]
[1, 0, 2, 3]
[2, 0, 3, 4]
"""
if pos is 0:
return mti.row_join(self)
if self.rows != mti.rows:
raise ShapeError("self and mti must have the same number of rows.")
newmat = self.zeros((self.rows, self.cols + mti.cols))
newmat[:,:pos] = self[:,:pos]
newmat[:,pos:pos+mti.cols] = mti[:,:]
newmat[:,pos+mti.cols:] = self[:,pos:]
return newmat
def trace(self):
if not self.is_square:
raise NonSquareMatrixError()
trace = 0
for i in range(self.cols):
trace += self[i,i]
return trace
def submatrix(self, keys):
"""
>>> from sympy import Matrix
>>> m = Matrix(4,4,lambda i,j: i+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3]
[1, 2, 3, 4]
[2, 3, 4, 5]
[3, 4, 5, 6]
>>> m[0:1, 1] #doctest: +NORMALIZE_WHITESPACE
[1]
>>> m[0:2, 0:1] #doctest: +NORMALIZE_WHITESPACE
[0]
[1]
>>> m[2:4, 2:4] #doctest: +NORMALIZE_WHITESPACE
[4, 5]
[5, 6]
"""
if not isinstance(keys[0], slice) and not isinstance(keys[1], slice):
raise TypeError("At least one element of `keys` must be a slice object.")
rlo, rhi = self.slice2bounds(keys[0], self.rows)
clo, chi = self.slice2bounds(keys[1], self.cols)
if not ( 0<=rlo<=rhi and 0<=clo<=chi ):
raise IndexError("Slice indices out of range: a[%s]"%repr(keys))
outLines, outCols = rhi-rlo, chi-clo
outMat = [0]*outLines*outCols
for i in xrange(outLines):
outMat[i*outCols:(i+1)*outCols] = self.mat[(i+rlo)*self.cols+clo:(i+rlo)*self.cols+chi]
return Matrix(outLines,outCols,outMat)
def extract(self, rowsList, colsList):
"""
Extract a submatrix by specifying a list of rows and columns
Examples:
>>> from sympy import Matrix
>>> m = Matrix(4, 3, lambda i, j: i*3 + j)
>>> m #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2]
[3, 4, 5]
[6, 7, 8]
[9, 10, 11]
>>> m.extract([0,1,3],[0,1]) #doctest: +NORMALIZE_WHITESPACE
[0, 1]
[3, 4]
[9, 10]
See also: .submatrix()
"""
cols = self.cols
rows = self.rows
mat = self.mat
if not all(i < rows for i in rowsList):
raise IndexError("Row indices out of range")
if not all(j < cols for j in colsList):
raise IndexError("Column indices out of range")
return Matrix(len(rowsList), len(colsList), lambda i,j: mat[rowsList[i]*cols + colsList[j]])
def slice2bounds(self, key, defmax):
"""
Takes slice or number and returns (min,max) for iteration
Takes a default maxval to deal with the slice ':' which is (none, none)
"""
if isinstance(key, slice):
lo, hi = 0, defmax
if key.start is not None:
if key.start >= 0:
lo = key.start
else:
lo = defmax+key.start
if key.stop is not None:
if key.stop >= 0:
hi = key.stop
else:
hi = defmax+key.stop
return lo, hi
elif isinstance(key, int):
if key >= 0:
return key, key+1
else:
return defmax+key, defmax+key+1
else:
raise IndexError("Improper index type")
def applyfunc(self, f):
"""
>>> from sympy import Matrix
>>> m = Matrix(2,2,lambda i,j: i*2+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
[0, 1]
[2, 3]
>>> m.applyfunc(lambda i: 2*i) #doctest: +NORMALIZE_WHITESPACE
[0, 2]
[4, 6]
"""
if not callable(f):
raise TypeError("`f` must be callable.")
out = Matrix(self.rows,self.cols,map(f,self.mat))
return out
def evalf(self, prec=None, **options):
if prec is None:
return self.applyfunc(lambda i: i.evalf(**options))
else:
return self.applyfunc(lambda i: i.evalf(prec, **options))
def reshape(self, _rows, _cols):
"""
>>> from sympy import Matrix
>>> m = Matrix(2,3,lambda i,j: 1)
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 1, 1]
[1, 1, 1]
>>> m.reshape(1,6) #doctest: +NORMALIZE_WHITESPACE
[1, 1, 1, 1, 1, 1]
>>> m.reshape(3,2) #doctest: +NORMALIZE_WHITESPACE
[1, 1]
[1, 1]
[1, 1]
"""
if len(self) != _rows*_cols:
print "Invalid reshape parameters %d %d" % (_rows, _cols)
return Matrix(_rows, _cols, lambda i,j: self.mat[i*_cols + j])
def print_nonzero (self, symb="X"):
"""
Shows location of non-zero entries for fast shape lookup ::
>>> from sympy import Matrix, matrices
>>> m = Matrix(2,3,lambda i,j: i*3+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2]
[3, 4, 5]
>>> m.print_nonzero() #doctest: +NORMALIZE_WHITESPACE
[ XX]
[XXX]
>>> m = matrices.eye(4)
>>> m.print_nonzero("x") #doctest: +NORMALIZE_WHITESPACE
[x ]
[ x ]
[ x ]
[ x]
"""
s = ""
for i in range(self.rows):
s += "["
for j in range(self.cols):
if self[i,j] == 0:
s += " "
else:
s += symb + ""
s += "]\n"
print s
def LUsolve(self, rhs, iszerofunc=_iszero):
"""
Solve the linear system Ax = b for x.
self is the coefficient matrix A and rhs is the right side b.
This is for symbolic matrices, for real or complex ones use
sympy.mpmath.lu_solve or sympy.mpmath.qr_solve.
"""
if rhs.rows != self.rows:
raise ShapeError("`self` and `rhs` must have the same number of rows.")
A, perm = self.LUdecomposition_Simple(iszerofunc=_iszero)
n = self.rows
b = rhs.permuteFwd(perm)
# forward substitution, all diag entries are scaled to 1
for i in range(n):
for j in range(i):
b.row(i, lambda x,k: x - b[j,k]*A[i,j])
# backward substitution
for i in range(n-1,-1,-1):
for j in range(i+1, n):
b.row(i, lambda x,k: x - b[j,k]*A[i,j])
b.row(i, lambda x,k: x / A[i,i])
return b
def LUdecomposition(self, iszerofunc=_iszero):
"""
Returns the decomposition LU and the row swaps p.
Example:
>>> from sympy import Matrix
>>> a = Matrix([[4, 3], [6, 3]])
>>> L, U, _ = a.LUdecomposition()
>>> L
[ 1, 0]
[3/2, 1]
>>> U
[4, 3]
[0, -3/2]
"""
combined, p = self.LUdecomposition_Simple(iszerofunc=_iszero)
L = self.zeros(self.rows)
U = self.zeros(self.rows)
for i in range(self.rows):
for j in range(self.rows):
if i > j:
L[i,j] = combined[i,j]
else:
if i == j:
L[i,i] = 1
U[i,j] = combined[i,j]
return L, U, p
def LUdecomposition_Simple(self, iszerofunc=_iszero):
"""
Returns A comprised of L,U (L's diag entries are 1) and
p which is the list of the row swaps (in order).
"""
if not self.is_square:
raise NonSquareMatrixError()
n = self.rows
A = self[:,:]
p = []
# factorization
for j in range(n):
for i in range(j):
for k in range(i):
A[i,j] = A[i,j] - A[i,k]*A[k,j]
pivot = -1
for i in range(j,n):
for k in range(j):
A[i,j] = A[i,j] - A[i,k]*A[k,j]
# find the first non-zero pivot, includes any expression
if pivot == -1 and not iszerofunc(A[i,j]):
pivot = i
if pivot < 0:
# this result is based on iszerofunc's analysis of the possible pivots, so even though
# the element may not be strictly zero, the supplied iszerofunc's evaluation gave True
raise ValueError("No nonzero pivot found; inversion failed.")
if pivot != j: # row must be swapped
A.row_swap(pivot,j)
p.append([pivot,j])
scale = 1 / A[j,j]
for i in range(j+1,n):
A[i,j] = A[i,j] * scale
return A, p
def LUdecompositionFF(self):
"""
Compute a fraction-free LU decomposition.
Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
If the elements of the matrix belong to some integral domain I, then all
elements of L, D and U are guaranteed to belong to I.
**Reference**
- W. Zhou & D.J. Jeffrey, "Fraction-free matrix factors: new forms
for LU and QR factors". Frontiers in Computer Science in China,
Vol 2, no. 1, pp. 67-80, 2008.
"""
n, m = self.rows, self.cols
U, L, P = self[:,:], eye(n), eye(n)
DD = zeros(n) # store it smarter since it's just diagonal
oldpivot = 1
for k in range(n-1):
if U[k,k] == 0:
for kpivot in range(k+1, n):
if U[kpivot, k] != 0:
break
else:
raise ValueError("Matrix is not full rank")
U[k, k:], U[kpivot, k:] = U[kpivot, k:], U[k, k:]
L[k, :k], L[kpivot, :k] = L[kpivot, :k], L[k, :k]
P[k, :], P[kpivot, :] = P[kpivot, :], P[k, :]
L[k,k] = Ukk = U[k,k]
DD[k,k] = oldpivot * Ukk
for i in range(k+1, n):
L[i,k] = Uik = U[i,k]
for j in range(k+1, m):
U[i,j] = (Ukk * U[i,j] - U[k,j]*Uik) / oldpivot
U[i,k] = 0
oldpivot = Ukk
DD[n-1,n-1] = oldpivot
return P, L, DD, U
def cofactorMatrix(self, method="berkowitz"):
out = Matrix(self.rows, self.cols, lambda i,j:
self.cofactor(i, j, method))
return out
def minorEntry(self, i, j, method="berkowitz"):
if not 0 <= i < self.rows or not 0 <= j < self.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < `self.rows` " +
"(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
return self.minorMatrix(i,j).det(method)
def minorMatrix(self, i, j):
if not 0 <= i < self.rows or not 0 <= j < self.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < `self.rows` " +
"(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
return self.delRowCol(i,j)
def cofactor(self, i, j, method="berkowitz"):
if (i+j) % 2 == 0:
return self.minorEntry(i, j, method)
else:
return -1 * self.minorEntry(i, j, method)
def jacobian(self, X):
"""
Calculates the Jacobian matrix (derivative of a vectorial function).
*self*
A vector of expressions representing functions f_i(x_1, ..., x_n).
*X*
The set of x_i's in order, it can be a list or a Matrix
Both self and X can be a row or a column matrix in any order
(jacobian() should always work).
Examples::
>>> from sympy import sin, cos, Matrix
>>> from sympy.abc import rho, phi
>>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
>>> Y = Matrix([rho, phi])
>>> X.jacobian(Y)
[cos(phi), -rho*sin(phi)]
[sin(phi), rho*cos(phi)]
[ 2*rho, 0]
>>> X = Matrix([rho*cos(phi), rho*sin(phi)])
>>> X.jacobian(Y)
[cos(phi), -rho*sin(phi)]
[sin(phi), rho*cos(phi)]
"""
if not isinstance(X, Matrix):
X = Matrix(X)
# Both X and self can be a row or a column matrix, so we need to make
# sure all valid combinations work, but everything else fails:
if self.shape[0] == 1:
m = self.shape[1]
elif self.shape[1] == 1:
m = self.shape[0]
else:
raise TypeError("self must be a row or a column matrix")
if X.shape[0] == 1:
n = X.shape[1]
elif X.shape[1] == 1:
n = X.shape[0]
else:
raise TypeError("X must be a row or a column matrix")
# m is the number of functions and n is the number of variables
# computing the Jacobian is now easy:
return Matrix(m, n, lambda j, i: self[j].diff(X[i]))
def QRdecomposition(self):
"""
Return Q,R where A = Q*R, Q is orthogonal and R is upper triangular.
Examples
This is the example from wikipedia::
>>> from sympy import Matrix, eye
>>> A = Matrix([[12,-51,4],[6,167,-68],[-4,24,-41]])
>>> Q, R = A.QRdecomposition()
>>> Q
[ 6/7, -69/175, -58/175]
[ 3/7, 158/175, 6/175]
[-2/7, 6/35, -33/35]
>>> R
[14, 21, -14]
[ 0, 175, -70]
[ 0, 0, 35]
>>> A == Q*R
True
QR factorization of an identity matrix
>>> A = Matrix([[1,0,0],[0,1,0],[0,0,1]])
>>> Q, R = A.QRdecomposition()
>>> Q
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
>>> R
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
"""
if not self.rows >= self.cols:
raise MatrixError("The number of rows must be greater than columns")
n = self.rows
m = self.cols
rank = n
row_reduced = self.rref()[0]
for i in range(row_reduced.rows):
if Matrix(row_reduced[i*m:(i+1)*m]).norm() == 0:
rank -= 1
if not rank == self.cols:
raise MatrixError("The rank of the matrix must match the columns")
Q, R = self.zeros((n, m)), self.zeros(m)
for j in range(m): # for each column vector
tmp = self[:,j] # take original v
for i in range(j):
# subtract the project of self on new vector
tmp -= Q[:,i] * self[:,j].dot(Q[:,i])
tmp.expand()
# normalize it
R[j,j] = tmp.norm()
Q[:,j] = tmp / R[j,j]
if Q[:,j].norm() != 1:
raise NotImplementedError("Could not normalize the vector %d." % j)
for i in range(j):
R[i,j] = Q[:,i].dot(self[:,j])
return Q,R
def QRsolve(self, b):
"""
Solve the linear system 'Ax = b'.
'self' is the matrix 'A', the method argument is the vector
'b'. The method returns the solution vector 'x'. If 'b' is a
matrix, the system is solved for each column of 'b' and the
return value is a matrix of the same shape as 'b'.
This method is slower (approximately by a factor of 2) but
more stable for floating-point arithmetic than the LUsolve method.
However, LUsolve usually uses an exact arithmetic, so you don't need
to use QRsolve.
This is mainly for educational purposes and symbolic matrices, for real
(or complex) matrices use sympy.mpmath.qr_solve.
"""
Q, R = self.QRdecomposition()
y = Q.T * b
# back substitution to solve R*x = y:
# We build up the result "backwards" in the vector 'x' and reverse it
# only in the end.
x = []
n = R.rows
for j in range(n-1, -1, -1):
tmp = y[j,:]
for k in range(j+1, n):
tmp -= R[j,k] * x[n-1-k]
x.append(tmp/R[j,j])
return Matrix([row.mat for row in reversed(x)])
# Utility functions
def simplify(self, simplify=sympy_simplify, ratio=1.7):
"""Simplify the elements of a matrix in place.
If (result length)/(input length) > ratio, then input is returned
unmodified. If 'ratio=oo', then simplify() is applied anyway.
See also simplify().
"""
for i in xrange(len(self.mat)):
self.mat[i] = simplify(self.mat[i], ratio=ratio)
#def evaluate(self): # no more eval() so should be removed
# for i in range(self.rows):
# for j in range(self.cols):
# self[i,j] = self[i,j].eval()
def cross(self, b):
if not is_sequence(b, include=Matrix):
raise TypeError("`b` must be an ordered iterable or Matrix, not %s." %
type(b))
if not (self.rows == 1 and self.cols == 3 or \
self.rows == 3 and self.cols == 1 ) and \
(b.rows == 1 and b.cols == 3 or \
b.rows == 3 and b.cols == 1):
raise ShapeError("Dimensions incorrect for cross product.")
else:
return Matrix(1,3,((self[1]*b[2] - self[2]*b[1]),
(self[2]*b[0] - self[0]*b[2]),
(self[0]*b[1] - self[1]*b[0])))
def dot(self, b):
if not is_sequence(b, include=Matrix):
raise TypeError("`b` must be an ordered iterable or Matrix, not %s." %
type(b))
m = len(b)
if len(self) != m:
raise ShapeError("Dimensions incorrect for dot product.")
prod = 0
for i in range(m):
prod += self[i] * b[i]
return prod
def multiply_elementwise(self, b):
"""Return the Hadamard product (elementwise product) of A and B
>>> import sympy
>>> A = sympy.Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = sympy.Matrix([[1, 10, 100], [100, 10, 1]])
>>> print A.multiply_elementwise(B)
[ 0, 10, 200]
[300, 40, 5]
"""
return matrix_multiply_elementwise(self, b)
def norm(self, ord=None):
"""Return the Norm of a Matrix or Vector.
In the simplest case this is the geometric size of the vector
Other norms can be specified by the ord parameter
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm - does not exist
inf -- max(abs(x))
-inf -- min(abs(x))
1 -- as below
-1 -- as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other - does not exist sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
>>> from sympy import Matrix, var, trigsimp, cos, sin
>>> x = var('x', real=True)
>>> v = Matrix([cos(x), sin(x)])
>>> print trigsimp( v.norm() )
1
>>> print v.norm(10)
(sin(x)**10 + cos(x)**10)**(1/10)
>>> A = Matrix([[1,1], [1,1]])
>>> print A.norm(2)# Spectral norm (max of |Ax|/|x| under 2-vector-norm)
2
>>> print A.norm(-2) # Inverse spectral norm (smallest singular value)
0
>>> print A.norm() # Frobenius Norm
2
"""
# Row or Column Vector Norms
if self.rows == 1 or self.cols == 1:
if ord == 2 or ord == None: # Common case sqrt(<x,x>)
return Add(*(abs(i)**2 for i in self.mat))**S.Half
elif ord == 1: # sum(abs(x))
return Add(*(abs(i) for i in self.mat))
elif ord == S.Infinity: # max(abs(x))
return Max(*self.applyfunc(abs))
elif ord == S.NegativeInfinity: # min(abs(x))
return Min(*self.applyfunc(abs))
# Otherwise generalize the 2-norm, Sum(x_i**ord)**(1/ord)
# Note that while useful this is not mathematically a norm
try:
return Pow( Add(*(abs(i)**ord for i in self.mat)), S(1)/ord )
except:
raise ValueError("Expected order to be Number, Symbol, oo")
# Matrix Norms
else:
if ord == 2: # Spectral Norm
# Maximum singular value
return Max(*self.singular_values())
elif ord == -2:
# Minimum singular value
return Min(*self.singular_values())
elif (ord == None or isinstance(ord,str) and ord.lower() in
['f', 'fro', 'frobenius', 'vector']):
# Reshape as vector and send back to norm function
return self.vec().norm(ord=2)
else:
raise NotImplementedError("Matrix Norms under development")
def normalized(self):
if self.rows != 1 and self.cols != 1:
raise ShapeError("A Matrix must be a vector to normalize.")
norm = self.norm()
out = self.applyfunc(lambda i: i / norm)
return out
def project(self, v):
"""Project onto v."""
return v * (self.dot(v) / v.dot(v))
def permuteBkwd(self, perm):
copy = self[:,:]
for i in range(len(perm)-1, -1, -1):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def permuteFwd(self, perm):
copy = self[:,:]
for i in range(len(perm)):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def delRowCol(self, i, j):
# used only for cofactors, makes a copy
M = self[:,:]
M.row_del(i)
M.col_del(j)
return M
def exp(self):
""" Returns the exponent of a matrix """
if not self.is_square:
raise NonSquareMatrixError("Exponentiation is valid only for square matrices")
try:
U, D = self.diagonalize()
except MatrixError:
raise NotImplementedError("Exponentiation is implemented only for diagonalizable matrices")
for i in xrange(0, D.rows):
D[i, i] = C.exp(D[i, i])
return U * D * U.inv()
def zeros(self, dims):
"""Returns a dims = (d1,d2) matrix of zeros."""
n, m = _dims_to_nm( dims )
return Matrix(n,m,[S.Zero]*n*m)
def eye(self, n):
"""Returns the identity matrix of size n."""
tmp = self.zeros(n)
for i in range(tmp.rows):
tmp[i,i] = S.One
return tmp
@property
def is_square(self):
return self.rows == self.cols
def is_nilpotent(self):
"""
Checks if a matrix is nilpotent.
A matrix B is nilpotent if for some integer k, B**k is
a zero matrix.
Example:
>>> from sympy import Matrix
>>> a = Matrix([[0,0,0],[1,0,0],[1,1,0]])
>>> a.is_nilpotent()
True
>>> a = Matrix([[1,0,1],[1,0,0],[1,1,0]])
>>> a.is_nilpotent()
False
"""
if not self.is_square:
raise NonSquareMatrixError("Nilpotency is valid only for square matrices")
x = Dummy('x')
if self.charpoly(x).args[0] == x**self.rows:
return True
return False
def is_upper(self):
"""
Check if matrix is an upper triangular matrix.
Example:
>>> from sympy import Matrix
>>> m = Matrix(2,2,[1, 0, 0, 1])
>>> m
[1, 0]
[0, 1]
>>> m.is_upper()
True
>>> m = Matrix(3,3,[5, 1, 9, 0, 4 , 6, 0, 0, 5])
>>> m
[5, 1, 9]
[0, 4, 6]
[0, 0, 5]
>>> m.is_upper()
True
>>> m = Matrix(2,3,[4, 2, 5, 6, 1, 1])
>>> m
[4, 2, 5]
[6, 1, 1]
>>> m.is_upper()
False
"""
for i in xrange(1, self.rows):
for j in xrange(0, i):
if self[i,j] != 0:
return False
return True
def is_lower(self):
"""
Check if matrix is a lower triangular matrix.
Example:
>>> from sympy import Matrix
>>> m = Matrix(2,2,[1, 0, 0, 1])
>>> m
[1, 0]
[0, 1]
>>> m.is_lower()
True
>>> m = Matrix(3,3,[2, 0, 0, 1, 4 , 0, 6, 6, 5])
>>> m
[2, 0, 0]
[1, 4, 0]
[6, 6, 5]
>>> m.is_lower()
True
>>> from sympy.abc import x, y
>>> m = Matrix(2,2,[x**2 + y, y**2 + x, 0, x + y])
>>> m
[x**2 + y, x + y**2]
[ 0, x + y]
>>> m.is_lower()
False
"""
for i in xrange(0, self.rows):
for j in xrange(i+1, self.cols):
if self[i, j] != 0:
return False
return True
def is_upper_hessenberg(self):
"""
Checks if the matrix is the upper hessenberg form.
The upper hessenberg matrix has zero entries
below the first subdiagonal.
Example:
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1,4,2,3],[3,4,1,7],[0,2,3,4],[0,0,1,3]])
>>> a
[1, 4, 2, 3]
[3, 4, 1, 7]
[0, 2, 3, 4]
[0, 0, 1, 3]
>>> a.is_upper_hessenberg()
True
"""
for i in xrange(2, self.rows):
for j in xrange(0, i - 1):
if self[i,j] != 0:
return False
return True
def is_lower_hessenberg(self):
r"""
Checks if the matrix is in the lower hessenberg form.
The lower hessenberg matrix has zero entries
above the first superdiagonal.
Example:
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1,2,0,0],[5,2,3,0],[3,4,3,7],[5,6,1,1]])
>>> a
[1, 2, 0, 0]
[5, 2, 3, 0]
[3, 4, 3, 7]
[5, 6, 1, 1]
>>> a.is_lower_hessenberg()
True
"""
for i in xrange(0, self.rows):
for j in xrange(i + 2, self.cols):
if self[i, j] != 0:
return False
return True
def is_symbolic(self):
for element in self.mat:
if element.has(Symbol):
return True
return False
def is_symmetric(self, simplify=True):
"""
Check if matrix is symmetric matrix,
that is square matrix and is equal to its transpose.
By default, simplifications occur before testing symmetry.
They can be skipped using 'simplify=False'; while speeding things a bit,
this may however induce false negatives.
Example:
>>> from sympy import Matrix
>>> m = Matrix(2,2,[0, 1, 1, 2])
>>> m
[0, 1]
[1, 2]
>>> m.is_symmetric()
True
>>> m = Matrix(2,2,[0, 1, 2, 0])
>>> m
[0, 1]
[2, 0]
>>> m.is_symmetric()
False
>>> m = Matrix(2,3,[0, 0, 0, 0, 0, 0])
>>> m
[0, 0, 0]
[0, 0, 0]
>>> m.is_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3,3,[1, x**2 + 2*x + 1, y, (x + 1)**2 , 2, 0, y, 0, 3])
>>> m
[ 1, x**2 + 2*x + 1, y]
[(x + 1)**2, 2, 0]
[ y, 0, 3]
>>> m.is_symmetric()
True
If the matrix is already simplified, you may speed-up is_symmetric()
test by using 'simplify=False'.
>>> m.is_symmetric(simplify=False)
False
>>> m1 = m.expand()
>>> m1.is_symmetric(simplify=False)
True
"""
if not self.is_square:
return False
if simplify:
delta = self - self.transpose()
delta.simplify()
return delta == self.zeros((self.rows, self.cols))
else:
return self == self.transpose()
def is_diagonal(self):
"""
Check if matrix is diagonal,
that is matrix in which the entries outside the main diagonal are all zero.
Example:
>>> from sympy import Matrix, diag
>>> m = Matrix(2,2,[1, 0, 0, 2])
>>> m
[1, 0]
[0, 2]
>>> m.is_diagonal()
True
>>> m = Matrix(2,2,[1, 1, 0, 2])
>>> m
[1, 1]
[0, 2]
>>> m.is_diagonal()
False
>>> m = diag(1, 2, 3)
>>> m
[1, 0, 0]
[0, 2, 0]
[0, 0, 3]
>>> m.is_diagonal()
True
See also: .is_lower(), is_upper() .is_diagonalizable()
"""
for i in xrange(self.rows):
for j in xrange(self.cols):
if i != j and self[i, j] != 0:
return False
return True
def clone(self):
return Matrix(self.rows, self.cols, lambda i, j: self[i, j])
def det(self, method="bareis"):
"""
Computes the matrix determinant using the method "method".
Possible values for "method":
bareis ... det_bareis
berkowitz ... berkowitz_det
"""
if method == "bareis":
return self.det_bareis()
elif method == "berkowitz":
return self.berkowitz_det()
else:
raise ValueError("Determinant method unrecognized")
def det_bareis(self):
"""Compute matrix determinant using Bareis' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal number of fractions. It means that less term
rewriting is needed on resulting formulae.
TODO: Implement algorithm for sparse matrices (SFF).
"""
if not self.is_square:
raise NonSquareMatrixError()
M, n = self[:,:], self.rows
if n == 1:
det = M[0, 0]
elif n == 2:
det = M[0, 0]*M[1, 1] - M[0, 1]*M[1, 0]
else:
sign = 1 # track current sign in case of column swap
for k in range(n-1):
# look for a pivot in the current column
# and assume det == 0 if none is found
if M[k, k] == 0:
for i in range(k+1, n):
if M[i, k] != 0:
M.row_swap(i, k)
sign *= -1
break
else:
return S.Zero
# proceed with Bareis' fraction-free (FF)
# form of Gaussian elimination algorithm
for i in range(k+1, n):
for j in range(k+1, n):
D = M[k, k]*M[i, j] - M[i, k]*M[k, j]
if k > 0:
D /= M[k-1, k-1]
if D.is_Atom:
M[i, j] = D
else:
M[i, j] = cancel(D)
det = sign * M[n-1, n-1]
return det.expand()
def adjugate(self, method="berkowitz"):
"""
Returns the adjugate matrix.
Adjugate matrix is the transpose of the cofactor matrix.
http://en.wikipedia.org/wiki/Adjugate
See also: .cofactorMatrix(), .T
"""
return self.cofactorMatrix(method).T
def inverse_LU(self, iszerofunc=_iszero):
"""
Calculates the inverse using LU decomposition.
"""
return self.LUsolve(self.eye(self.rows), iszerofunc=_iszero)
def inverse_GE(self, iszerofunc=_iszero):
"""
Calculates the inverse using Gaussian elimination.
"""
if not self.is_square:
raise NonSquareMatrixError()
if self.det() == 0:
raise ValueError("A Matrix must have non-zero determinant to invert.")
big = self.row_join(self.eye(self.rows))
red = big.rref(iszerofunc=iszerofunc)
return red[0][:,big.rows:]
def inverse_ADJ(self):
"""
Calculates the inverse using the adjugate matrix and a determinant.
"""
if not self.is_square:
raise NonSquareMatrixError()
d = self.berkowitz_det()
if d == 0:
raise ValueError("A Matrix must have non-zero determinant to invert.")
return self.adjugate()/d
def rref(self,simplified=False, iszerofunc=_iszero, simplify=sympy_simplify):
"""
Take any matrix and return reduced row-echelon form and indices of pivot vars
To simplify elements before finding nonzero pivots set simplified=True.
To set a custom simplify function, use the simplify keyword argument.
"""
# TODO: rewrite inverse_GE to use this
pivots, r = 0, self[:,:] # pivot: index of next row to contain a pivot
pivotlist = [] # indices of pivot variables (non-free)
for i in range(r.cols):
if pivots == r.rows:
break
if simplified:
r[pivots,i] = simplify(r[pivots,i])
if iszerofunc(r[pivots,i]):
for k in range(pivots, r.rows):
if simplified and k > pivots:
r[k,i] = simplify(r[k,i])
if not iszerofunc(r[k,i]):
break
if k == r.rows - 1 and iszerofunc(r[k,i]):
continue
r.row_swap(pivots,k)
scale = r[pivots,i]
r.row(pivots, lambda x, _: x/scale)
for j in range(r.rows):
if j == pivots:
continue
scale = r[j,i]
r.row(j, lambda x, k: x - scale*r[pivots,k])
pivotlist.append(i)
pivots += 1
return r, pivotlist
def nullspace(self,simplified=False):
"""
Returns list of vectors (Matrix objects) that span nullspace of self
"""
reduced, pivots = self.rref(simplified)
basis = []
# create a set of vectors for the basis
for i in range(self.cols - len(pivots)):
basis.append(zeros((self.cols, 1)))
# contains the variable index to which the vector corresponds
basiskey, cur = [-1]*len(basis), 0
for i in range(self.cols):
if i not in pivots:
basiskey[cur] = i
cur += 1
for i in range(self.cols):
if i not in pivots: # free var, just set vector's ith place to 1
basis[basiskey.index(i)][i,0] = 1
else: # add negative of nonpivot entry to corr vector
for j in range(i+1, self.cols):
line = pivots.index(i)
if reduced[line, j] != 0:
if j in pivots:
# XXX: Is this the correct error?
raise NotImplementedError("Could not compute the nullspace of `self`.")
basis[basiskey.index(j)][i,0] = -1 * reduced[line, j]
return basis
def berkowitz(self):
"""The Berkowitz algorithm.
Given N x N matrix with symbolic content, compute efficiently
coefficients of characteristic polynomials of 'self' and all
its square sub-matrices composed by removing both i-th row
and column, without division in the ground domain.
This method is particularly useful for computing determinant,
principal minors and characteristic polynomial, when 'self'
has complicated coefficients e.g. polynomials. Semi-direct
usage of this algorithm is also important in computing
efficiently sub-resultant PRS.
Assuming that M is a square matrix of dimension N x N and
I is N x N identity matrix, then the following following
definition of characteristic polynomial is begin used:
charpoly(M) = det(t*I - M)
As a consequence, all polynomials generated by Berkowitz
algorithm are monic.
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> M = Matrix([ [x,y,z], [1,0,0], [y,z,x] ])
>>> p, q, r = M.berkowitz()
>>> print p # 1 x 1 M's sub-matrix
(1, -x)
>>> print q # 2 x 2 M's sub-matrix
(1, -x, -y)
>>> print r # 3 x 3 M's sub-matrix
(1, -2*x, x**2 - y*z - y, x*y - z**2)
For more information on the implemented algorithm refer to:
[1] S.J. Berkowitz, On computing the determinant in small
parallel time using a small number of processors, ACM,
Information Processing Letters 18, 1984, pp. 147-150
[2] M. Keber, Division-Free computation of sub-resultants
using Bezout matrices, Tech. Report MPI-I-2006-1-006,
Saarbrucken, 2006
"""
if not self.is_square:
raise NonSquareMatrixError()
A, N = self, self.rows
transforms = [0] * (N-1)
for n in xrange(N, 1, -1):
T, k = zeros((n+1,n)), n - 1
R, C = -A[k,:k], A[:k,k]
A, a = A[:k,:k], -A[k,k]
items = [ C ]
for i in xrange(0, n-2):
items.append(A * items[i])
for i, B in enumerate(items):
items[i] = (R * B)[0,0]
items = [ S.One, a ] + items
for i in xrange(n):
T[i:,i] = items[:n-i+1]
transforms[k-1] = T
polys = [ Matrix([S.One, -A[0,0]]) ]
for i, T in enumerate(transforms):
polys.append(T * polys[i])
return tuple(map(tuple, polys))
def berkowitz_det(self):
"""Computes determinant using Berkowitz method."""
poly = self.berkowitz()[-1]
sign = (-1)**(len(poly)-1)
return sign * poly[-1]
def berkowitz_minors(self):
"""Computes principal minors using Berkowitz method."""
sign, minors = S.NegativeOne, []
for poly in self.berkowitz():
minors.append(sign*poly[-1])
sign = -sign
return tuple(minors)
def berkowitz_charpoly(self, x, simplify=sympy_simplify):
"""Computes characteristic polynomial minors using Berkowitz method."""
return Poly(map(simplify, self.berkowitz()[-1]), x)
charpoly = berkowitz_charpoly
def berkowitz_eigenvals(self, **flags):
"""Computes eigenvalues of a Matrix using Berkowitz method. """
return roots(self.berkowitz_charpoly(Dummy('x')), **flags)
eigenvals = berkowitz_eigenvals
def eigenvects(self, **flags):
"""Return list of triples (eigenval, multiplicity, basis)."""
if 'multiple' in flags:
del flags['multiple']
out, vlist = [], self.eigenvals(**flags)
for r, k in vlist.iteritems():
tmp = self - eye(self.rows)*r
basis = tmp.nullspace()
# whether tmp.is_symbolic() is True or False, it is possible that
# the basis will come back as [] in which case simplification is
# necessary.
if not basis:
# The nullspace routine failed, try it again with simplification
basis = tmp.nullspace(simplified=True)
if not basis:
raise NotImplementedError("Can't evaluate eigenvector for eigenvalue %s" % r)
out.append((r, k, basis))
return out
def singular_values(self):
"""
Compute the singular values of a Matrix
>>> from sympy import Matrix, Symbol, eye
>>> x = Symbol('x', real=True)
>>> A = Matrix([[0, 1, 0], [0, x, 0], [-1, 0, 0]])
>>> print A.singular_values()
[1, (x**2 + 1)**(1/2), 0]
"""
# Compute eigenvalues of A.H A
valmultpairs = (self.H*self).eigenvals()
# Expands result from eigenvals into a simple list
vals = []
for k,v in valmultpairs.items():
vals += [sqrt(k)]*v # dangerous! same k in several spots!
# If sorting makes sense then sort
if all(val.is_number for val in vals):
vals.sort(reverse=True) # sort them in descending order
return vals
def condition_number(self):
"""
Returns the condition number of a matrix.
This is the maximum singular value divided by the minimum singular value
>>> from sympy import Matrix, S
>>> A = Matrix([[1, 0, 0], [0, 10, 0], [0,0,S.One/10]])
>>> print A.condition_number()
100
"""
singularvalues = self.singular_values()
return Max(*singularvalues) / Min(*singularvalues)
def fill(self, value):
"""Fill the matrix with the scalar value."""
self.mat = [value]*len(self)
def __getattr__(self, attr):
if attr in ('diff','integrate','limit'):
def doit(*args):
item_doit = lambda item: getattr(item, attr)(*args)
return self.applyfunc( item_doit )
return doit
else:
raise AttributeError("Matrix has no attribute %s." % attr)
def integrate(self, *args):
return Matrix(self.rows, self.cols, lambda i, j: self[i, j].integrate(*args))
def limit(self, *args):
return Matrix(self.rows, self.cols, lambda i, j: self[i, j].limit(*args))
def diff(self, *args):
return Matrix(self.rows, self.cols, lambda i, j: self[i, j].diff(*args))
def vec(self):
"""
Return the Matrix converted into a one column matrix by stacking columns
>>> from sympy import Matrix
>>> m=Matrix([ [1,3], [2,4] ])
>>> m
[1, 3]
[2, 4]
>>> m.vec()
[1]
[2]
[3]
[4]
"""
return Matrix(len(self), 1, self.transpose().mat)
def vech(self, diagonal=True, check_symmetry=True):
"""
Return the unique elements of a symmetric Matrix as a one column matrix
by stacking the elements in the lower triangle.
Arguments:
diagonal -- include the diagonal cells of self or not
check_symmetry -- checks symmetry of self but not completely reliably
>>> from sympy import Matrix
>>> m=Matrix([ [1,2], [2,3] ])
>>> m
[1, 2]
[2, 3]
>>> m.vech()
[1]
[2]
[3]
>>> m.vech(diagonal=False)
[2]
"""
c = self.cols
if c != self.rows:
raise ShapeError("Matrix must be square")
if check_symmetry:
self.simplify()
if self != self.transpose():
raise ValueError("Matrix appears to be asymmetric; consider check_symmetry=False")
count = 0
if diagonal:
v = zeros( (c * (c + 1) // 2, 1) )
for j in xrange(c):
for i in xrange(j,c):
v[count] = self[i,j]
count += 1
else:
v = zeros( (c * (c - 1) // 2, 1) )
for j in xrange(c):
for i in xrange(j+1,c):
v[count] = self[i,j]
count += 1
return v
def get_diag_blocks(self):
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of
linear equations which may be decoupled by having a block diagonal
structure.
Example:
>>> from sympy import Matrix, symbols
>>> from sympy.abc import x, y, z
>>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
>>> a1, a2, a3 = A.get_diag_blocks()
>>> a1
[1, 3]
[y, z**2]
>>> a2
[x]
>>> a3
[0]
>>>
"""
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[0:i, i:]
to_the_bottom = M[i:, 0:i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[0:i, 0:i])
if M.shape == M[0:i, 0:i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
def diagonalize(self, reals_only = False):
"""
Return diagonalized matrix D and transformation P such as
D = P^-1 * M * P
where M is current matrix.
Example:
>>> from sympy import Matrix
>>> m = Matrix(3,3,[1, 2, 0, 0, 3, 0, 2, -4, 2])
>>> m
[1, 2, 0]
[0, 3, 0]
[2, -4, 2]
>>> (P, D) = m.diagonalize()
>>> D
[1, 0, 0]
[0, 2, 0]
[0, 0, 3]
>>> P
[-1/2, 0, -1/2]
[ 0, 0, -1/2]
[ 1, 1, 1]
>>> P.inv() * m * P
[1, 0, 0]
[0, 2, 0]
[0, 0, 3]
See also: .is_diagonalizable(), .is_diagonal()
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self.is_diagonalizable(reals_only, False):
self._diagonalize_clear_subproducts()
raise MatrixError("Matrix is not diagonalizable")
else:
if self._eigenvects == None:
self._eigenvects = self.eigenvects()
diagvals = []
P = Matrix(self.rows, 0, [])
for eigenval, multiplicity, vects in self._eigenvects:
for k in range(multiplicity):
diagvals.append(eigenval)
vec = vects[k]
P = P.col_insert(P.cols, vec)
D = diag(*diagvals)
self._diagonalize_clear_subproducts()
return (P, D)
def is_diagonalizable(self, reals_only = False, clear_subproducts=True):
"""
Check if matrix is diagonalizable.
If reals_only==True then check that diagonalized matrix consists of the only not complex values.
Some subproducts could be used further in other methods to avoid double calculations,
By default (if clear_subproducts==True) they will be deleted.
Example:
>>> from sympy import Matrix
>>> m = Matrix(3,3,[1, 2, 0, 0, 3, 0, 2, -4, 2])
>>> m
[1, 2, 0]
[0, 3, 0]
[2, -4, 2]
>>> m.is_diagonalizable()
True
>>> m = Matrix(2,2,[0, 1, 0, 0])
>>> m
[0, 1]
[0, 0]
>>> m.is_diagonalizable()
False
>>> m = Matrix(2,2,[0, 1, -1, 0])
>>> m
[ 0, 1]
[-1, 0]
>>> m.is_diagonalizable()
True
>>> m.is_diagonalizable(True)
False
"""
if not self.is_square:
return False
res = False
self._is_symbolic = self.is_symbolic()
self._is_symmetric = self.is_symmetric()
self._eigenvects = None
#if self._is_symbolic:
# self._diagonalize_clear_subproducts()
# raise NotImplementedError("Symbolic matrices are not implemented for diagonalization yet")
self._eigenvects = self.eigenvects()
all_iscorrect = True
for eigenval, multiplicity, vects in self._eigenvects:
if len(vects) != multiplicity:
all_iscorrect = False
break
elif reals_only and not eigenval.is_real:
all_iscorrect = False
break
res = all_iscorrect
if clear_subproducts:
self._diagonalize_clear_subproducts()
return res
def _diagonalize_clear_subproducts(self):
del self._is_symbolic
del self._is_symmetric
del self._eigenvects
def jordan_form(self, calc_transformation = True):
"""
Return Jordan form J of current matrix.
If calc_transformation is specified as False, then transformation P such that
J = P^-1 * M * P
will not be calculated.
Note:
Calculation of transformation P is not implemented yet
Example:
>>> from sympy import Matrix
>>> m = Matrix(4, 4, [6, 5, -2, -3, -3, -1, 3, 3, 2, 1, -2, -3, -1, 1, 5, 5])
>>> m
[ 6, 5, -2, -3]
[-3, -1, 3, 3]
[ 2, 1, -2, -3]
[-1, 1, 5, 5]
>>> (P, J) = m.jordan_form()
>>> J
[2, 1, 0, 0]
[0, 2, 0, 0]
[0, 0, 2, 1]
[0, 0, 0, 2]
See also: jordan_cells()
"""
(P, Jcells) = self.jordan_cells(calc_transformation)
J = diag(*Jcells)
return (P, J)
def jordan_cells(self, calc_transformation = True):
"""
Return a list of Jordan cells of current matrix.
This list shape Jordan matrix J.
If calc_transformation is specified as False, then transformation P such that
J = P^-1 * M * P
will not be calculated.
Note:
Calculation of transformation P is not implemented yet
Example:
>>> from sympy import Matrix
>>> m = Matrix(4, 4, [6, 5, -2, -3, -3, -1, 3, 3, 2, 1, -2, -3, -1, 1, 5, 5])
>>> m
[ 6, 5, -2, -3]
[-3, -1, 3, 3]
[ 2, 1, -2, -3]
[-1, 1, 5, 5]
>>> (P, Jcells) = m.jordan_cells()
>>> Jcells[0]
[2, 1]
[0, 2]
>>> Jcells[1]
[2, 1]
[0, 2]
See also: jordan_form()
"""
if not self.is_square:
raise NonSquareMatrixError()
_eigenvects = self.eigenvects()
Jcells = []
for eigenval, multiplicity, vects in _eigenvects:
geometrical = len(vects)
if geometrical == multiplicity:
Jcell = diag( *([eigenval] * multiplicity))
Jcells.append(Jcell)
elif geometrical==0:
raise MatrixError("Matrix has the eigen vector with geometrical multiplicity equal zero.")
else:
sizes = self._jordan_split(multiplicity, geometrical)
cells = []
for size in sizes:
cell = jordan_cell(eigenval, size)
cells.append(cell)
Jcells += cells
return (None, Jcells)
def _jordan_split(self, algebraical, geometrical):
"return a list which sum is equal to 'algebraical' and length is equal to 'geometrical'"
n1 = algebraical // geometrical
res = [n1] * geometrical
res[len(res)-1] += algebraical % geometrical
assert sum(res) == algebraical
return res
def has(self, *patterns):
"""
Test whether any subexpression matches any of the patterns.
Examples:
>>> from sympy import Matrix, Float
>>> from sympy.abc import x, y
>>> A = Matrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
"""
return any(a.has(*patterns) for a in self.mat)
def matrix_multiply(A, B):
"""
Matrix product A*B.
A and B must be of appropriate dimensions. If A is an m x k matrix, and B
is a k x n matrix, the product will be an m x n matrix.
Example:
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> A*B
[30, 36, 42]
[66, 81, 96]
>>> B*A
Traceback (most recent call last):
...
ShapeError
>>>
"""
# The following implmentation is equivalent, but about 5% slower
#ma, na = A.shape
#mb, nb = B.shape
#
#if na != mb:
# raise ShapeError()
#product = Matrix(ma, nb, lambda i,j: 0)
#for i in xrange(ma):
# for j in xrange(nb):
# s = 0
# for k in range(na):
# s += A[i, k]*B[k, j]
# product[i, j] = s
#return product
if A.shape[1] != B.shape[0]:
raise ShapeError()
blst = B.T.tolist()
alst = A.tolist()
return Matrix(A.shape[0], B.shape[1], lambda i, j:
reduce(lambda k, l: k+l,
map(lambda n, m: n*m,
alst[i],
blst[j])))
def matrix_multiply_elementwise(A, B):
"""Return the Hadamard product (elementwise product) of A and B
>>> import sympy
>>> A = sympy.Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = sympy.Matrix([[1, 10, 100], [100, 10, 1]])
>>> print sympy.matrices.matrix_multiply_elementwise(A, B)
[ 0, 10, 200]
[300, 40, 5]
"""
if A.shape != B.shape:
raise ShapeError()
shape = A.shape
return Matrix(shape[0], shape[1],
lambda i, j: A[i,j] * B[i, j])
def matrix_add(A,B):
"""Return A+B"""
if A.shape != B.shape:
raise ShapeError()
alst = A.tolist()
blst = B.tolist()
ret = [0]*A.shape[0]
for i in xrange(A.shape[0]):
ret[i] = map(lambda j,k: j+k, alst[i], blst[i])
return Matrix(ret)
def zeros(dims):
"""Create zero matrix of dimensions dims = (d1,d2)"""
n, m = _dims_to_nm(dims)
return Matrix(n, m, [S.Zero]*m*n)
def ones(dims):
"""Create all-one matrix of dimensions dims = (d1,d2)"""
n, m = _dims_to_nm( dims )
return Matrix(n, m, [S.One]*m*n)
def eye(n):
"""Create square identity matrix n x n
See also: diag()
"""
n = int(n)
out = zeros(n)
for i in range(n):
out[i, i] = S.One
return out
def diag(*values):
"""Create diagonal matrix from a list as a diagonal values.
Arguments might be matrices too, in case of it they are fitted in result matrix
Example:
>>> from sympy.matrices import diag, Matrix
>>> diag(1, 2, 3)
[1, 0, 0]
[0, 2, 0]
[0, 0, 3]
>>> from sympy.abc import x, y, z
>>> a = Matrix([x, y, z])
>>> b = Matrix([[1, 2], [3, 4]])
>>> c = Matrix([[5, 6]])
>>> diag(a, 7, b, c)
[x, 0, 0, 0, 0, 0]
[y, 0, 0, 0, 0, 0]
[z, 0, 0, 0, 0, 0]
[0, 7, 0, 0, 0, 0]
[0, 0, 1, 2, 0, 0]
[0, 0, 3, 4, 0, 0]
[0, 0, 0, 0, 5, 6]
See also: eye()
"""
rows = 0
cols = 0
for m in values:
if isinstance(m, Matrix):
rows += m.rows
cols += m.cols
else:
rows += 1
cols += 1
res = zeros((rows, cols))
i_row = 0
i_col = 0
for m in values:
if isinstance(m, Matrix):
res[i_row:i_row + m.rows, i_col:i_col + m.cols] = m
i_row += m.rows
i_col += m.cols
else:
res[i_row, i_col] = m
i_row += 1
i_col += 1
return res
def block_diag(matrices):
"""
Warning: this function is deprecated. See .diag()
"""
import warnings
warnings.warn("block_diag() is deprecated, use diag() instead", DeprecationWarning)
return diag(*matrices)
def jordan_cell(eigenval, n):
"""
Create matrix of Jordan cell kind:
Example:
>>> from sympy.matrices.matrices import jordan_cell
>>> from sympy.abc import x
>>> jordan_cell(x, 4)
[x, 1, 0, 0]
[0, x, 1, 0]
[0, 0, x, 1]
[0, 0, 0, x]
"""
n = int(n)
out = zeros(n)
for i in range(n-1):
out[i, i] = eigenval
out[i, i+1] = S.One
out[n-1, n-1] = eigenval
return out
def randMatrix(r,c,min=0,max=99,seed=[]):
"""Create random matrix r x c"""
if seed == []:
prng = random.Random() # use system time
else:
prng = random.Random(seed)
return Matrix(r,c,lambda i,j: prng.randint(min,max))
def hessian(f, varlist):
"""Compute Hessian matrix for a function f
see: http://en.wikipedia.org/wiki/Hessian_matrix
"""
# f is the expression representing a function f, return regular matrix
if is_sequence(varlist):
m = len(varlist)
if not m:
raise ShapeError("`len(varlist)` must not be zero.")
elif isinstance(varlist, Matrix):
m = varlist.cols
if not m:
raise ShapeError("`varlist.cols` must not be zero.")
if varlist.rows != 1:
raise ShapeError("`varlist` must be a row vector.")
else:
raise ValueError("Improper variable list in hessian function")
if not getattr(f, 'diff'):
# check differentiability
raise ValueError("Function `f` (%s) is not differentiable" % f)
out = zeros(m)
for i in range(m):
for j in range(i,m):
out[i,j] = f.diff(varlist[i]).diff(varlist[j])
for i in range(m):
for j in range(i):
out[i,j] = out[j,i]
return out
def GramSchmidt(vlist, orthog=False):
out = []
m = len(vlist)
for i in range(m):
tmp = vlist[i]
for j in range(i):
tmp -= vlist[i].project(out[j])
if tmp == Matrix([[0,0,0]]):
raise ValueError("GramSchmidt: vector set not linearly independent")
out.append(tmp)
if orthog:
for i in range(len(out)):
out[i] = out[i].normalized()
return out
def wronskian(functions, var, method='bareis'):
"""Compute Wronskian for [] of functions
| f1 f2 ... fn |
| f1' f2' ... fn' |
| . . . . |
W(f1,...,fn) = | . . . . |
| . . . . |
| (n) (n) (n) |
| D (f1) D (f2) ... D (fn) |
see: http://en.wikipedia.org/wiki/Wronskian
"""
for index in xrange(0, len(functions)):
functions[index] = sympify(functions[index])
n = len(functions)
if n == 0:
return 1
W = Matrix(n, n, lambda i,j: functions[i].diff(var, j) )
return W.det(method)
def casoratian(seqs, n, zero=True):
"""Given linear difference operator L of order 'k' and homogeneous
equation Ly = 0 we want to compute kernel of L, which is a set
of 'k' sequences: a(n), b(n), ... z(n).
Solutions of L are linearly independent iff their Casoratian,
denoted as C(a, b, ..., z), do not vanish for n = 0.
Casoratian is defined by k x k determinant:
+ a(n) b(n) . . . z(n) +
| a(n+1) b(n+1) . . . z(n+1) |
| . . . . |
| . . . . |
| . . . . |
+ a(n+k-1) b(n+k-1) . . . z(n+k-1) +
It proves very useful in rsolve_hyper() where it is applied
to a generating set of a recurrence to factor out linearly
dependent solutions and return a basis.
>>> from sympy import Symbol, casoratian, factorial
>>> n = Symbol('n', integer=True)
Exponential and factorial are linearly independent:
>>> casoratian([2**n, factorial(n)], n) != 0
True
"""
seqs = map(sympify, seqs)
if not zero:
f = lambda i, j: seqs[j].subs(n, n+i)
else:
f = lambda i, j: seqs[j].subs(n, i)
k = len(seqs)
return Matrix(k, k, f).det()
# Add sympify converters
def _matrix_sympify(matrix):
raise SympifyError('Matrix cannot be sympified')
converter[Matrix] = _matrix_sympify
del _matrix_sympify
class SparseMatrix(Matrix):
"""Sparse matrix"""
def __init__(self, *args):
if len(args) == 3 and callable(args[2]):
op = args[2]
if not isinstance(args[0], (int, Integer)) or not isinstance(args[1], (int, Integer)):
raise TypeError("`args[0]` and `args[1]` must both be integers.")
self.rows = args[0]
self.cols = args[1]
self.mat = {}
for i in range(self.rows):
for j in range(self.cols):
value = sympify(op(i,j))
if value != 0:
self.mat[(i,j)] = value
elif len(args)==3 and isinstance(args[0],int) and \
isinstance(args[1],int) and is_sequence(args[2]):
self.rows = args[0]
self.cols = args[1]
mat = args[2]
self.mat = {}
for i in range(self.rows):
for j in range(self.cols):
value = sympify(mat[i*self.cols+j])
if value != 0:
self.mat[(i,j)] = value
elif len(args)==3 and isinstance(args[0],int) and \
isinstance(args[1],int) and isinstance(args[2], dict):
self.rows = args[0]
self.cols = args[1]
self.mat = {}
# manual copy, copy.deepcopy() doesn't work
for key in args[2].keys():
self.mat[key] = args[2][key]
else:
if len(args) == 1:
mat = args[0]
else:
mat = args
if not is_sequence(mat[0]):
mat = [ [element] for element in mat ]
self.rows = len(mat)
self.cols = len(mat[0])
self.mat = {}
for i in range(self.rows):
if len(mat[i]) != self.cols:
raise ValueError("All arguments must have the same length.")
for j in range(self.cols):
value = sympify(mat[i][j])
if value != 0:
self.mat[(i,j)] = value
def __getitem__(self, key):
if isinstance(key, slice) or isinstance(key, int):
lo, hi = self.slice2bounds(key, len(self))
L = []
for i in range(lo, hi):
m,n = self.rowdecomp(i)
if (m,n) in self.mat:
L.append(self.mat[(m,n)])
else:
L.append(0)
if len(L) == 1:
return L[0]
else:
return L
if len(key) != 2:
raise ValueError("`key` must be of length 2.")
if isinstance(key[0], int) and isinstance(key[1], int):
i,j=self.key2ij(key)
if (i, j) in self.mat:
return self.mat[(i,j)]
else:
return 0
elif isinstance(key[0], slice) or isinstance(key[1], slice):
return self.submatrix(key)
else:
raise IndexError("Index out of range: a[%s]"%repr(key))
def rowdecomp(self, num):
nmax = len(self)
if not (0 <= num < nmax) or not (0 <= -num < nmax):
raise ValueError("`num` must satisfy 0 <= `num` < `self.rows*" +
"*self.cols` (%d) and 0 <= -num < " % nmax +
"`self.rows*self.cols` (%d) to apply redecomp()." % nmax)
i, j = 0, num
while j >= self.cols:
j -= self.cols
i += 1
return i,j
def __setitem__(self, key, value):
# almost identical, need to test for 0
if len(key) != 2:
raise ValueError("`key` must be of length 2.")
if isinstance(key[0], slice) or isinstance(key[1], slice):
if isinstance(value, Matrix):
self.copyin_matrix(key, value)
if is_sequence(value):
self.copyin_list(key, value)
else:
i,j=self.key2ij(key)
testval = sympify(value)
if testval != 0:
self.mat[(i,j)] = testval
elif (i,j) in self.mat:
del self.mat[(i,j)]
def row_del(self, k):
newD = {}
for (i,j) in self.mat.keys():
if i==k:
pass
elif i > k:
newD[i-1,j] = self.mat[i,j]
else:
newD[i,j] = self.mat[i,j]
self.mat = newD
self.rows -= 1
def col_del(self, k):
newD = {}
for (i,j) in self.mat.keys():
if j==k:
pass
elif j > k:
newD[i,j-1] = self.mat[i,j]
else:
newD[i,j] = self.mat[i,j]
self.mat = newD
self.cols -= 1
def toMatrix(self):
l = []
for i in range(self.rows):
c = []
l.append(c)
for j in range(self.cols):
if (i, j) in self.mat:
c.append(self[i, j])
else:
c.append(0)
return Matrix(l)
def row_list(self):
"""
Returns a Row-sorted list of non-zero elements of the matrix.
>>> from sympy.matrices import SparseMatrix
>>> a=SparseMatrix((1,2),(3,4))
>>> a
[1, 2]
[3, 4]
>>> a.RL
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
"""
new=[]
for i in range(self.rows):
for j in range(self.cols):
value = self[(i,j)]
if value!=0:
new.append((i,j,value))
return new
RL = property(row_list,None,None,"Alternate faster representation")
def col_list(self):
"""
Returns a Column-sorted list of non-zero elements of the matrix.
>>> from sympy.matrices import SparseMatrix
>>> a=SparseMatrix((1,2),(3,4))
>>> a
[1, 2]
[3, 4]
>>> a.CL
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
"""
new=[]
for j in range(self.cols):
for i in range(self.rows):
value = self[(i,j)]
if value!=0:
new.append((i,j,value))
return new
CL = property(col_list,None,None,"Alternate faster representation")
def transpose(self):
"""
Returns the transposed SparseMatrix of this SparseMatrix
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix((1,2),(3,4))
>>> a
[1, 2]
[3, 4]
>>> a.T
[1, 3]
[2, 4]
"""
tran = SparseMatrix(self.cols,self.rows,{})
for key,value in self.mat.iteritems():
tran.mat[key[1],key[0]]=value
return tran
T = property(transpose,None,None,"Matrix transposition.")
def __add__(self, other):
if isinstance(other, SparseMatrix):
return self.add(other)
else:
raise NotImplementedError("Only SparseMatrix + SparseMatrix supported")
def __radd__(self, other):
if isinstance(other, SparseMatrix):
return self.add(other)
else:
raise NotImplementedError("Only SparseMatrix + SparseMatrix supported")
def add(self, other):
"""
Add two sparse matrices with dictionary representation.
>>> from sympy.matrices.matrices import SparseMatrix
>>> A = SparseMatrix(5, 5, lambda i, j : i * j + i)
>>> A
[0, 0, 0, 0, 0]
[1, 2, 3, 4, 5]
[2, 4, 6, 8, 10]
[3, 6, 9, 12, 15]
[4, 8, 12, 16, 20]
>>> B = SparseMatrix(5, 5, lambda i, j : i + 2 * j)
>>> B
[0, 2, 4, 6, 8]
[1, 3, 5, 7, 9]
[2, 4, 6, 8, 10]
[3, 5, 7, 9, 11]
[4, 6, 8, 10, 12]
>>> A + B
[0, 2, 4, 6, 8]
[2, 5, 8, 11, 14]
[4, 8, 12, 16, 20]
[6, 11, 16, 21, 26]
[8, 14, 20, 26, 32]
"""
if self.shape != other.shape:
raise ShapeError()
a, b = self.mat.keys(), other.mat.keys()
a.sort()
b.sort()
i = j = 0
c = {}
while i < len(a) or j < len(b):
if j >= len(b) or (i < len(a) and a[i] < b[j]):
c[a[i]] = self.mat[a[i]]
i = i + 1
continue
elif i >= len(a) or (j < len(b) and a[i] > b[j]):
c[b[j]] = other.mat[b[j]]
j = j + 1
continue
else:
c[a[i]] = self.mat[a[i]] + other.mat[b[j]]
i = i + 1
j = j + 1
return SparseMatrix(self.rows, self.cols, c)
# from here to end all functions are same as in matrices.py
# with Matrix replaced with SparseMatrix
def copyin_list(self, key, value):
if not is_sequence(value):
raise TypeError("`value` must be of type list or tuple.")
self.copyin_matrix(key, SparseMatrix(value))
def multiply(self,b):
"""Returns self*b """
def dotprod(a,b,i,j):
if a.cols != b.rows:
raise ShapeError("`self.cols` must equal `b.rows`.")
r=0
for x in range(a.cols):
r+=a[i,x]*b[x,j]
return r
r = SparseMatrix(self.rows, b.cols, lambda i,j: dotprod(self,b,i,j))
if r.rows == 1 and r.cols ==1:
return r[0,0]
return r
def submatrix(self, keys):
if not isinstance(keys[0], slice) and not isinstance(keys[1], slice):
raise TypeError("Both elements of `keys` must be slice objects.")
rlo, rhi = self.slice2bounds(keys[0], self.rows)
clo, chi = self.slice2bounds(keys[1], self.cols)
if not ( 0<=rlo<=rhi and 0<=clo<=chi ):
raise IndexError("Slice indices out of range: a[%s]"%repr(keys))
return SparseMatrix(rhi-rlo, chi-clo, lambda i,j: self[i+rlo, j+clo])
def reshape(self, _rows, _cols):
if len(self) != _rows*_cols:
print "Invalid reshape parameters %d %d" % (_rows, _cols)
newD = {}
for i in range(_rows):
for j in range(_cols):
m,n = self.rowdecomp(i*_cols + j)
if (m,n) in self.mat:
newD[(i,j)] = self.mat[(m,n)]
return SparseMatrix(_rows, _cols, newD)
def cross(self, b):
if not is_sequence(b, include=Matrix):
raise TypeError("`b` must be an ordered iterable or Matrix, not %s." %
type(b))
if not (self.rows == 1 and self.cols == 3 or \
self.rows == 3 and self.cols == 1 ) and \
(b.rows == 1 and b.cols == 3 or \
b.rows == 3 and b.cols == 1):
raise ShapeError("Dimensions incorrect for cross product")
else:
return SparseMatrix(1,3,((self[1]*b[2] - self[2]*b[1]),
(self[2]*b[0] - self[0]*b[2]),
(self[0]*b[1] - self[1]*b[0])))
def zeros(self, dims):
"""Returns a dims = (d1,d2) matrix of zeros."""
n, m = _dims_to_nm( dims )
return SparseMatrix(n,m,{})
def eye(self, n):
tmp = SparseMatrix(n,n,lambda i,j:0)
for i in range(tmp.rows):
tmp[i,i] = 1
return tmp
def list2numpy(l):
"""Converts python list of SymPy expressions to a NumPy array."""
from numpy import empty
a = empty(len(l), dtype=object)
for i, s in enumerate(l):
a[i] = s
return a
def matrix2numpy(m):
"""Converts SymPy's matrix to a NumPy array."""
from numpy import empty
a = empty(m.shape, dtype=object)
for i in range(m.rows):
for j in range(m.cols):
a[i, j] = m[i, j]
return a
def a2idx(a):
"""
Tries to convert "a" to an index, returns None on failure.
The result of a2idx() (if not None) can be safely used as an index to
arrays/matrices.
"""
if hasattr(a, "__int__"):
return int(a)
if hasattr(a, "__index__"):
return a.__index__()
def symarray(prefix, shape):
"""Create a numpy ndarray of symbols (as an object array).
The created symbols are named prefix_i1_i2_... You should thus provide a
non-empty prefix if you want your symbols to be unique for different output
arrays, as Sympy symbols with identical names are the same object.
Parameters
----------
prefix : string
A prefix prepended to the name of every symbol.
shape : int or tuple
Shape of the created array. If an int, the array is one-dimensional; for
more than one dimension the shape must be a tuple.
Examples
--------
>> from sympy import symarray
>> symarray('', 3)
[_0 _1 _2]
If you want multiple symarrays to contain distinct symbols, you *must*
provide unique prefixes:
>> a = symarray('', 3)
>> b = symarray('', 3)
>> a[0] is b[0]
True
>> a = symarray('a', 3)
>> b = symarray('b', 3)
>> a[0] is b[0]
False
Creating symarrays with a prefix:
>> symarray('a', 3)
[a_0 a_1 a_2]
For more than one dimension, the shape must be given as a tuple:
>> symarray('a', (2,3))
[[a_0_0 a_0_1 a_0_2]
[a_1_0 a_1_1 a_1_2]]
>> symarray('a', (2,3,2))
[[[a_0_0_0 a_0_0_1]
[a_0_1_0 a_0_1_1]
[a_0_2_0 a_0_2_1]]
<BLANKLINE>
[[a_1_0_0 a_1_0_1]
[a_1_1_0 a_1_1_1]
[a_1_2_0 a_1_2_1]]]
"""
try:
import numpy as np
except ImportError:
raise ImportError("symarray requires numpy to be installed")
arr = np.empty(shape, dtype=object)
for index in np.ndindex(shape):
arr[index] = Symbol('%s_%s' % (prefix, '_'.join(map(str, index))))
return arr
def _separate_eig_results(res):
eigvals = [item[0] for item in res]
multiplicities = [item[1] for item in res]
eigvals = flatten([[val]*mult for val, mult in zip(eigVals, multiplicities)])
eigvects = flatten([item[2] for item in res])
return eigvals, eigvects
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sympy/matrices/matrices.py
|
Python
|
agpl-3.0
| 103,046
|
[
"DIRAC",
"Gaussian"
] |
0a35b942d30310507bceeeb9f85455e68b135579a2bc43a8923d6f9f3878013a
|
import sys
import random
import time
import readline
import obj
import utils
import entities
def personInteraction():
entities.player.location = entities.getLocation('Interact')
personType = random.randint(1, 3)
if personType == 1:
person = [random.choice(entities.enemies), random.choice(entities.weapons)]
if utils.confirm('You see a mean-looking person in the distance. Do you choose to approach?'):
utils.fight(person[0], person[1])
else:
print('You run away in fear.')
elif personType == 2:
if entities.worldEntities:
person = random.choice(entities.worldEntities)
person.inventory.append(random.choice(entities.weapons))
if utils.confirm('You see a familiar, mean-looking person in the distance. Do you choose to approach?'):
utils.fight(person, person.inventory[0])
else:
print('You run away in fear.')
else:
person = random.choice(entities.enemies)
person.inventory.append(random.choice(entities.weapons))
if utils.confirm('You see a mean-looking person in the distance. Do you choose to approach?'):
utils.fight(person, person.inventory[0])
else:
print('You run away in fear.')
else:
person = [random.choice(entities.helpers), random.choice(entities.helperItems)]
if utils.confirm('You see a kind-looking person in the distance. Do you choose to approach?'):
print('The person is a(n) ' + person[0].name + '!')
if person[0] == entities.getHelper('old lady'):
if random.randint(0,1) == 0:
utils.fight(entities.getEnemy('old lady'), entities.getWeapon('cane'))
else:
time.sleep(0.5)
print('The %s smiles and holds a(n) %s out in her hand.' % (person[0].name, person[1].name))
entities.player.inventory.append(person[1])
time.sleep(0.2)
print(person[1].name + ' added to your inventory!')
else:
time.sleep(0.5)
print('The %s smiles and holds a(n) %s out in her hand.' % (person[0].name, person[1].name))
entities.player.inventory.append(person[1])
time.sleep(0.2)
print(person[1].name + ' added to your inventory!')
else:
print('You walk away')
time.sleep(2)
def memory():
while True:
try:
rand = random.randint(0,1)
print(rand, end='')
except KeyboardInterrupt:
try:
print('\nRegaining Train of Thought...\n')
time.sleep(10)
except KeyboardInterrupt:
try:
print('\nEntering Subconcience...\n')
time.sleep(10)
except KeyboardInterrupt:
print('\nProgress Lost.')
sys.exit(0)
try:
while True:
command = input('ZZZ : ').split(' ')
if command[0].upper() == 'WAKE':
print('It cost 10 coins to wake.')
entities.player.spend(10)
return
elif command[0].upper() == 'LOADMOD':
if len(command) > 1:
os.system(command[1])
else:
print('Usage: loadmod <mod>')
elif command[0].upper() == 'VIEWMATRIX':
os.system('cat textbasedgame.py')
print('You are not here...')
time.sleep(5)
return
while True:
command = input('ZZZ/Matrix : ').split(' ')
if command[0].upper() == 'GOTO':
print('Whoops')
#print(str(open('textbasedgame.py', newline=None)).split('\n')[int(command[1])])
elif command[0].upper() == 'GET':
if len(command) > 2:
if len(command) < 4:
if command[1].upper() == 'FOOD':
entities.player.inventory.append(entities.getFood(command[2]))
elif command[1].upper() == 'WEAPON':
entities.player.inventory.append(entities.getWeapon(command[2]))
else:
try:
if command[1].upper() == 'FOOD':
i = 0
while i < int(command[3]):
entities.player.inventory.append(entities.getFood(command[2]))
i += 1
elif command[1].upper() == 'WEAPON':
i = 0
while i < int(command[3]):
entities.player.inventory.append(entities.getWeapon(command[2]))
i += 1
except ValueError:
print('Usage:\tget <type> <object>\n\tget <type:food/weapon> <object> <amount>\nAmount must be integer.')
else:
print('"get" requires 3 arguments. Maximum: 4.')
elif command[0].upper() == 'GOTO':
if command[1].upper() == 'SLEEP':
print('Before you may sleep...')
time.sleep(2.5)
print('You must fight me...')
time.sleep(2.5)
print('I am you...')
time.sleep(2.5)
print('But you are not me.')
time.sleep(10)
utils.fight(entities.you, utils.getBestInventoryWeapon()[1])
sleep()
except KeyboardInterrupt:
try:
print('\nRegaining Train of Thought...\n')
time.sleep(10)
except KeyboardInterrupt:
print('\nSuffered Memory Loss.\n')
rand = random.randint(0,2)
if rand == 0:
entities.player.inventory = []
elif rand == 1:
entities.player.money = 0
else:
entities.player.power = float(0)
return
def sleep():
print('Welcome to the Sleep, but you can\'t come here yet.')
return
def market():
entities.player.location = entities.getLocation('Market')
print('''
+-----------------------------------------------------+
| Welcome to the Market! |
| Type an item\'s name to purchase it. |
| Type "info <item>" for more information on an item. |
| Type "exit" to leave the store. |
+-----------------------------------------------------+''')
isVendor = False
while not isVendor:
vendors = []
for vendor in entities.vendors:
vendors.append(vendor.name)
command = utils.choose('\nPlease type the vendor you want to visit.', vendors)
for vendor in entities.vendors:
if vendor.name == command:
vendorToVisit = vendor
isVendor = True
break
if command == 'exit':
print('You left the store.')
return
else:
print('Vendor or command not found.')
break
utils.goToVendor(vendorToVisit)
def inventory():
entities.player.location = entities.getLocation('Inventory')
while True:
command = input('Inventory : ').split(' ')
if command[0] == '?' or command[0].upper() == 'HELP':
entities.getHelpMsg('Inventory').printMsg()
elif command[0].upper() == 'LIST':
if len(command) > 1:
if command[1].upper() == 'WEAPONS':
utils.listItems(listedItems=entities.player.inventory, objType=obj.Weapon)
elif command[1].upper() == 'FOOD':
utils.listItems(listedItems=entities.player.inventory, objType=obj.Food)
elif command[1].upper() == 'HEALTH':
print(entities.player.health)
elif command[1].upper() == 'MONEY':
print(entities.player.money)
else:
print('Usage: list\nlist <type>')
else:
utils.listItems(listedItems=entities.player.inventory)
elif command[0].upper() == 'EAT':
failed = False
for item in entities.player.inventory:
if item.name.upper() == command[1].upper():
if isinstance(item, obj.Food):
entities.player.inventory.remove(item)
entities.player.health += item.hp
print('%s points added to health!' % item.hp)
failed = False
break
if failed:
print('Food not in Inventory.')
elif command[0].upper() == 'EXIT':
print('You left your Inventory.')
break
else:
print('Inventory command "' + command[0] + '" not found. Type "help" for help.')
|
V1Soft/textbasedgame
|
locations.py
|
Python
|
gpl-3.0
| 10,127
|
[
"VisIt"
] |
f62e9f14d4038fe8a3bbacb39a8c88c33fca0dd4ed9e346676110d6637bf931c
|
""" ProxyManagementAPI has the functions to "talk" to the ProxyManagement service
"""
import os
import datetime
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Core.Utilities import ThreadSafe, DIRACSingleton
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.Core.Security.ProxyFile import multiProxyArgument, deleteMultiProxy
from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error
from DIRAC.Core.Security.X509Request import X509Request # pylint: disable=import-error
from DIRAC.Core.Security.VOMS import VOMS
from DIRAC.Core.Security import Locations
from DIRAC.Core.DISET.RPCClient import RPCClient
__RCSID__ = "$Id$"
gUsersSync = ThreadSafe.Synchronizer()
gProxiesSync = ThreadSafe.Synchronizer()
gVOMSProxiesSync = ThreadSafe.Synchronizer()
class ProxyManagerClient(object):
__metaclass__ = DIRACSingleton.DIRACSingleton
def __init__(self):
self.__usersCache = DictCache()
self.__proxiesCache = DictCache()
self.__vomsProxiesCache = DictCache()
self.__pilotProxiesCache = DictCache()
self.__filesCache = DictCache(self.__deleteTemporalFile)
def __deleteTemporalFile(self, filename):
try:
os.unlink(filename)
except BaseException:
pass
def clearCaches(self):
self.__usersCache.purgeAll()
self.__proxiesCache.purgeAll()
self.__vomsProxiesCache.purgeAll()
self.__pilotProxiesCache.purgeAll()
def __getSecondsLeftToExpiration(self, expiration, utc=True):
if utc:
td = expiration - datetime.datetime.utcnow()
else:
td = expiration - datetime.datetime.now()
return td.days * 86400 + td.seconds
def __refreshUserCache(self, validSeconds=0):
rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
retVal = rpcClient.getRegisteredUsers(validSeconds)
if not retVal['OK']:
return retVal
data = retVal['Value']
# Update the cache
for record in data:
cacheKey = (record['DN'], record['group'])
self.__usersCache.add(cacheKey,
self.__getSecondsLeftToExpiration(record['expirationtime']),
record)
return S_OK()
@gUsersSync
def userHasProxy(self, userDN, userGroup, validSeconds=0):
"""
Check if a user(DN-group) has a proxy in the proxy management
- Updates internal cache if needed to minimize queries to the
service
"""
cacheKey = (userDN, userGroup)
if self.__usersCache.exists(cacheKey, validSeconds):
return S_OK(True)
# Get list of users from the DB with proxys at least 300 seconds
gLogger.verbose("Updating list of users in proxy management")
retVal = self.__refreshUserCache(validSeconds)
if not retVal['OK']:
return retVal
return S_OK(self.__usersCache.exists(cacheKey, validSeconds))
@gUsersSync
def getUserPersistence(self, userDN, userGroup, validSeconds=0):
"""
Check if a user(DN-group) has a proxy in the proxy management
- Updates internal cache if needed to minimize queries to the
service
"""
cacheKey = (userDN, userGroup)
userData = self.__usersCache.get(cacheKey, validSeconds)
if userData:
if userData['persistent']:
return S_OK(True)
# Get list of users from the DB with proxys at least 300 seconds
gLogger.verbose("Updating list of users in proxy management")
retVal = self.__refreshUserCache(validSeconds)
if not retVal['OK']:
return retVal
userData = self.__usersCache.get(cacheKey, validSeconds)
if userData:
return S_OK(userData['persistent'])
return S_OK(False)
def setPersistency(self, userDN, userGroup, persistent):
"""
Set the persistency for user/group
"""
# Hack to ensure bool in the rpc call
persistentFlag = True
if not persistent:
persistentFlag = False
rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
retVal = rpcClient.setPersistency(userDN, userGroup, persistentFlag)
if not retVal['OK']:
return retVal
# Update internal persistency cache
cacheKey = (userDN, userGroup)
record = self.__usersCache.get(cacheKey, 0)
if record:
record['persistent'] = persistentFlag
self.__usersCache.add(cacheKey,
self.__getSecondsLeftToExpiration(record['expirationtime']),
record)
return retVal
def uploadProxy(self, proxy=False, diracGroup=False, chainToConnect=False, restrictLifeTime=0, rfcIfPossible=False):
"""
Upload a proxy to the proxy management service using delegation
"""
# Discover proxy location
if isinstance(proxy, X509Chain):
chain = proxy
proxyLocation = ""
else:
if not proxy:
proxyLocation = Locations.getProxyLocation()
if not proxyLocation:
return S_ERROR("Can't find a valid proxy")
elif isinstance(proxy, basestring):
proxyLocation = proxy
else:
return S_ERROR("Can't find a valid proxy")
chain = X509Chain()
result = chain.loadProxyFromFile(proxyLocation)
if not result['OK']:
return S_ERROR("Can't load %s: %s " % (proxyLocation, result['Message']))
if not chainToConnect:
chainToConnect = chain
# Make sure it's valid
if chain.hasExpired()['Value']:
return S_ERROR("Proxy %s has expired" % proxyLocation)
# rpcClient = RPCClient( "Framework/ProxyManager", proxyChain = chainToConnect )
rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
# Get a delegation request
result = rpcClient.requestDelegationUpload(chain.getRemainingSecs()['Value'], diracGroup)
if not result['OK']:
return result
# Check if the delegation has been granted
if 'Value' not in result or not result['Value']:
if 'proxies' in result:
return S_OK(result['proxies'])
else:
return S_OK()
reqDict = result['Value']
# Generate delegated chain
chainLifeTime = chain.getRemainingSecs()['Value'] - 60
if restrictLifeTime and restrictLifeTime < chainLifeTime:
chainLifeTime = restrictLifeTime
retVal = chain.generateChainFromRequestString(reqDict['request'],
lifetime=chainLifeTime,
diracGroup=diracGroup,
rfc=rfcIfPossible)
if not retVal['OK']:
return retVal
# Upload!
result = rpcClient.completeDelegationUpload(reqDict['id'], retVal['Value'])
if not result['OK']:
return result
if 'proxies' in result:
return S_OK(result['proxies'])
return S_OK()
@gProxiesSync
def downloadProxy(self, userDN, userGroup, limited=False, requiredTimeLeft=1200,
cacheTime=14400, proxyToConnect=False, token=False):
"""
Get a proxy Chain from the proxy management
"""
cacheKey = (userDN, userGroup)
if self.__proxiesCache.exists(cacheKey, requiredTimeLeft):
return S_OK(self.__proxiesCache.get(cacheKey))
req = X509Request()
req.generateProxyRequest(limited=limited)
if proxyToConnect:
rpcClient = RPCClient("Framework/ProxyManager", proxyChain=proxyToConnect, timeout=120)
else:
rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
if token:
retVal = rpcClient.getProxyWithToken(userDN, userGroup, req.dumpRequest()['Value'],
long(cacheTime + requiredTimeLeft), token)
else:
retVal = rpcClient.getProxy(userDN, userGroup, req.dumpRequest()['Value'],
long(cacheTime + requiredTimeLeft))
if not retVal['OK']:
return retVal
chain = X509Chain(keyObj=req.getPKey())
retVal = chain.loadChainFromString(retVal['Value'])
if not retVal['OK']:
return retVal
self.__proxiesCache.add(cacheKey, chain.getRemainingSecs()['Value'], chain)
return S_OK(chain)
def downloadProxyToFile(self, userDN, userGroup, limited=False, requiredTimeLeft=1200,
cacheTime=14400, filePath=False, proxyToConnect=False, token=False):
"""
Get a proxy Chain from the proxy management and write it to file
"""
retVal = self.downloadProxy(userDN, userGroup, limited, requiredTimeLeft, cacheTime, proxyToConnect, token)
if not retVal['OK']:
return retVal
chain = retVal['Value']
retVal = self.dumpProxyToFile(chain, filePath)
if not retVal['OK']:
return retVal
retVal['chain'] = chain
return retVal
@gVOMSProxiesSync
def downloadVOMSProxy(self, userDN, userGroup, limited=False, requiredTimeLeft=1200,
cacheTime=14400, requiredVOMSAttribute=False,
proxyToConnect=False, token=False):
"""
Download a proxy if needed and transform it into a VOMS one
"""
cacheKey = (userDN, userGroup, requiredVOMSAttribute, limited)
if self.__vomsProxiesCache.exists(cacheKey, requiredTimeLeft):
return S_OK(self.__vomsProxiesCache.get(cacheKey))
req = X509Request()
req.generateProxyRequest(limited=limited)
if proxyToConnect:
rpcClient = RPCClient("Framework/ProxyManager", proxyChain=proxyToConnect, timeout=120)
else:
rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
if token:
retVal = rpcClient.getVOMSProxyWithToken(userDN, userGroup, req.dumpRequest()['Value'],
long(cacheTime + requiredTimeLeft), token, requiredVOMSAttribute)
else:
retVal = rpcClient.getVOMSProxy(userDN, userGroup, req.dumpRequest()['Value'],
long(cacheTime + requiredTimeLeft), requiredVOMSAttribute)
if not retVal['OK']:
return retVal
chain = X509Chain(keyObj=req.getPKey())
retVal = chain.loadChainFromString(retVal['Value'])
if not retVal['OK']:
return retVal
self.__vomsProxiesCache.add(cacheKey, chain.getRemainingSecs()['Value'], chain)
return S_OK(chain)
def downloadVOMSProxyToFile(self, userDN, userGroup, limited=False, requiredTimeLeft=1200,
cacheTime=14400, requiredVOMSAttribute=False, filePath=False,
proxyToConnect=False, token=False):
"""
Download a proxy if needed, transform it into a VOMS one and write it to file
"""
retVal = self.downloadVOMSProxy(userDN, userGroup, limited, requiredTimeLeft, cacheTime,
requiredVOMSAttribute, proxyToConnect, token)
if not retVal['OK']:
return retVal
chain = retVal['Value']
retVal = self.dumpProxyToFile(chain, filePath)
if not retVal['OK']:
return retVal
retVal['chain'] = chain
return retVal
def getPilotProxyFromDIRACGroup(self, userDN, userGroup, requiredTimeLeft=43200, proxyToConnect=False):
"""
Download a pilot proxy with VOMS extensions depending on the group
"""
# Assign VOMS attribute
vomsAttr = Registry.getVOMSAttributeForGroup(userGroup)
if not vomsAttr:
gLogger.verbose("No voms attribute assigned to group %s when requested pilot proxy" % userGroup)
return self.downloadProxy(userDN, userGroup, limited=False, requiredTimeLeft=requiredTimeLeft,
proxyToConnect=proxyToConnect)
else:
return self.downloadVOMSProxy(userDN, userGroup, limited=False, requiredTimeLeft=requiredTimeLeft,
requiredVOMSAttribute=vomsAttr, proxyToConnect=proxyToConnect)
def getPilotProxyFromVOMSGroup(self, userDN, vomsAttr, requiredTimeLeft=43200, proxyToConnect=False):
"""
Download a pilot proxy with VOMS extensions depending on the group
"""
groups = Registry.getGroupsWithVOMSAttribute(vomsAttr)
if not groups:
return S_ERROR("No group found that has %s as voms attrs" % vomsAttr)
for userGroup in groups:
result = self.downloadVOMSProxy(userDN, userGroup,
limited=False,
requiredTimeLeft=requiredTimeLeft,
requiredVOMSAttribute=vomsAttr,
proxyToConnect=proxyToConnect)
if result['OK']:
return result
return result
def getPayloadProxyFromDIRACGroup(self, userDN, userGroup, requiredTimeLeft, token=False, proxyToConnect=False):
"""
Download a payload proxy with VOMS extensions depending on the group
"""
# Assign VOMS attribute
vomsAttr = Registry.getVOMSAttributeForGroup(userGroup)
if not vomsAttr:
gLogger.verbose("No voms attribute assigned to group %s when requested payload proxy" % userGroup)
return self.downloadProxy(userDN, userGroup, limited=True, requiredTimeLeft=requiredTimeLeft,
proxyToConnect=proxyToConnect, token=token)
else:
return self.downloadVOMSProxy(userDN, userGroup, limited=True, requiredTimeLeft=requiredTimeLeft,
requiredVOMSAttribute=vomsAttr, proxyToConnect=proxyToConnect,
token=token)
def getPayloadProxyFromVOMSGroup(self, userDN, vomsAttr, token, requiredTimeLeft, proxyToConnect=False):
"""
Download a payload proxy with VOMS extensions depending on the VOMS attr
"""
groups = Registry.getGroupsWithVOMSAttribute(vomsAttr)
if not groups:
return S_ERROR("No group found that has %s as voms attrs" % vomsAttr)
userGroup = groups[0]
return self.downloadVOMSProxy(userDN,
userGroup,
limited=True,
requiredTimeLeft=requiredTimeLeft,
requiredVOMSAttribute=vomsAttr,
proxyToConnect=proxyToConnect,
token=token)
def dumpProxyToFile(self, chain, destinationFile=False, requiredTimeLeft=600):
"""
Dump a proxy to a file. It's cached so multiple calls won't generate extra files
"""
result = chain.hash()
if not result['OK']:
return result
cHash = result['Value']
if self.__filesCache.exists(cHash, requiredTimeLeft):
filepath = self.__filesCache.get(cHash)
if filepath and os.path.isfile(filepath):
return S_OK(filepath)
self.__filesCache.delete(cHash)
retVal = chain.dumpAllToFile(destinationFile)
if not retVal['OK']:
return retVal
filename = retVal['Value']
self.__filesCache.add(cHash, chain.getRemainingSecs()['Value'], filename)
return S_OK(filename)
def deleteGeneratedProxyFile(self, chain):
"""
Delete a file generated by a dump
"""
self.__filesCache.delete(chain)
return S_OK()
def requestToken(self, requesterDN, requesterGroup, numUses=1):
"""
Request a number of tokens. usesList must be a list of integers and each integer is the number of uses a token
must have
"""
rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
return rpcClient.generateToken(requesterDN, requesterGroup, numUses)
def renewProxy(self, proxyToBeRenewed=False, minLifeTime=3600, newProxyLifeTime=43200, proxyToConnect=False):
"""
Renew a proxy using the ProxyManager
Arguments:
proxyToBeRenewed: proxy to renew
minLifeTime: if proxy life time is less than this, renew. Skip otherwise
newProxyLifeTime: life time of new proxy
proxyToConnect: proxy to use for connecting to the service
"""
retVal = multiProxyArgument(proxyToBeRenewed)
if not retVal['Value']:
return retVal
proxyToRenewDict = retVal['Value']
secs = proxyToRenewDict['chain'].getRemainingSecs()['Value']
if secs > minLifeTime:
deleteMultiProxy(proxyToRenewDict)
return S_OK()
if not proxyToConnect:
proxyToConnectDict = {'chain': False, 'tempFile': False}
else:
retVal = multiProxyArgument(proxyToConnect)
if not retVal['Value']:
deleteMultiProxy(proxyToRenewDict)
return retVal
proxyToConnectDict = retVal['Value']
userDN = proxyToRenewDict['chain'].getIssuerCert()['Value'].getSubjectDN()['Value']
retVal = proxyToRenewDict['chain'].getDIRACGroup()
if not retVal['OK']:
deleteMultiProxy(proxyToRenewDict)
deleteMultiProxy(proxyToConnectDict)
return retVal
userGroup = retVal['Value']
limited = proxyToRenewDict['chain'].isLimitedProxy()['Value']
voms = VOMS()
retVal = voms.getVOMSAttributes(proxyToRenewDict['chain'])
if not retVal['OK']:
deleteMultiProxy(proxyToRenewDict)
deleteMultiProxy(proxyToConnectDict)
return retVal
vomsAttrs = retVal['Value']
if vomsAttrs:
retVal = self.downloadVOMSProxy(userDN,
userGroup,
limited=limited,
requiredTimeLeft=newProxyLifeTime,
requiredVOMSAttribute=vomsAttrs[0],
proxyToConnect=proxyToConnectDict['chain'])
else:
retVal = self.downloadProxy(userDN,
userGroup,
limited=limited,
requiredTimeLeft=newProxyLifeTime,
proxyToConnect=proxyToConnectDict['chain'])
deleteMultiProxy(proxyToRenewDict)
deleteMultiProxy(proxyToConnectDict)
if not retVal['OK']:
return retVal
chain = retVal['Value']
if not proxyToRenewDict['tempFile']:
return chain.dumpAllToFile(proxyToRenewDict['file'])
return S_OK(chain)
def getDBContents(self, condDict={}):
"""
Get the contents of the db
"""
rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
return rpcClient.getContents(condDict, [['UserDN', 'DESC']], 0, 0)
def getVOMSAttributes(self, chain):
"""
Get the voms attributes for a chain
"""
return VOMS().getVOMSAttributes(chain)
def getUploadedProxyLifeTime(self, DN, group):
"""
Get the remaining seconds for an uploaded proxy
"""
result = self.getDBContents({'UserDN': [DN], 'UserGroup': [group]})
if not result['OK']:
return result
data = result['Value']
if len(data['Records']) == 0:
return S_OK(0)
pNames = list(data['ParameterNames'])
dnPos = pNames.index('UserDN')
groupPos = pNames.index('UserGroup')
expiryPos = pNames.index('ExpirationTime')
for row in data['Records']:
if DN == row[dnPos] and group == row[groupPos]:
td = row[expiryPos] - datetime.datetime.utcnow()
secondsLeft = td.days * 86400 + td.seconds
return S_OK(max(0, secondsLeft))
return S_OK(0)
def getUserProxiesInfo(self):
""" Get the user proxies uploaded info
"""
result = RPCClient("Framework/ProxyManager", timeout=120).getUserProxiesInfo()
if 'rpcStub' in result:
result.pop('rpcStub')
return result
gProxyManager = ProxyManagerClient()
|
fstagni/DIRAC
|
FrameworkSystem/Client/ProxyManagerClient.py
|
Python
|
gpl-3.0
| 19,222
|
[
"DIRAC"
] |
29428fb95f26f5c7fdc640b5de59da95499ca9911086ef2c94c7636ba87999c4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Christoph Reimann
crystalExe = '/home/chr/sys/bin/Pcrystal09'
crystalVersion = 'CRYSTAL09 v2.0.1'
mpi = '/home/chr/sys/openmpi-1.4.1/bin/mpirun -np 8'
copyCmd = 'cp'
grepCmd = 'grep'
# basis set restrictions
exponentLowerLimit = 0.2
import minuit
import subprocess as sp
import re
import sys
import os
import shutil
from optparse import OptionParser
# collect parameters in jobConfig dict
jobConfig = {
'mpi' : mpi,
'executable' : crystalExe,
'program' : crystalVersion,
'template' : None,
'required_files' : [],
'exponentLowerLimit' : exponentLowerLimit,
'grep' : grepCmd,
'defaultInputFile' : 'INPUT',
'defaultOutputFile' : 'OUTPUT',
}
def cleanup(scratchdir, status, job_config):
# remove scratch directories
print('Removing temporary directory %s...' % scratchdir)
print(' rm -rf %s' % scratchdir)
sp.call(['rm', '-r', '-f', scratchdir])
def igrep(text, filename):
pipe = sp.Popen([grepCmd, "-i", text, filename], stdout=sp.PIPE, stderr=sp.PIPE)
pipe.wait()
return pipe.stdout.readlines()
class BSOptimizer(object):
"Base class for basis set optimizer"
parameters = {}
def __init__(self, job_config):
self.template_file = job_config['template']
self.job_config = job_config
self.N_runs = 0
self.initial_value = 0.0
self.lowest_value = 0.0
self.last_value = 0.0
self.cmd = self.job_config['mpi'].split(' ')+self.job_config['executable'].split(' ')
# succession is important within the parameters, so sort them first according to their name
def get_keys(self):
return sorted(self.parameters.keys())
def get_parameters(self):
keys = self.get_keys()
return [self.parameters[k] for k in keys]
def run_calculation(self, *new_parameters):
# default: return 0.0 - all suitable energies will be negative
retval = 0.0
self.N_runs += 1
print("Optimization run %d:" % self.N_runs)
self.write_input(new_parameters, open(self.job_config['defaultInputFile'], "w"))
print("Parameters (initial +++ best +++ current):")
for index, k in enumerate(self.get_keys()):
print(" %-5s %12.7f %12.7f %12.7f" % (k, self.parameters[k], self.best_params[k], new_parameters[index]))
# call external program
output = open(self.job_config['defaultOutputFile'], 'w')
try:
returncode = sp.check_call(args=self.cmd, stdout=output, stderr=output)
except sp.CalledProcessError, e:
returncode = e.returncode
output.close()
return returncode
# fin: class BSOptimizer
class crystalOptimizer(BSOptimizer):
"Basis set optimizer for use with CRYSTAL"
def __init__(self, job_config):
BSOptimizer.__init__(self, job_config)
# regular expressions
# template parameter are specified according to
# exponents: ${as1= 1.0}
# coefficients: ${ds1= 1.0
self.parameterKeyValueRegexp = re.compile("\${([^=}]+)=([^}]+)}")
self.energyRegexp = re.compile("-?\d*\.\d*E(?:\+|-)\d*")
self.cyclesRegexp = re.compile("CYCLES\s*(\d+)")
self.parameterKeyRegexp = re.compile("\${([^=}]+)}")
self.parameterRegexp = re.compile("\${([^}]+)}")
# preparse template file
self.template = self._load_initial_parameters(self.template_file)
self.best_params = dict(self.parameters)
# load initial parameters from the input template
def _load_initial_parameters(self, input_template):
input = open(input_template)
template = input.read()
input.close()
# determine initial parameters
regexp = self.parameterKeyValueRegexp
while(1):
m = regexp.search(template)
if m is None:
break
else:
key = m.groups()[0]
if self.parameters.has_key(key):
print("Error: Parameter '%s' is defined more than once!" % key)
sys.exit(1)
template = template.replace("%s=%s" % m.groups(), key)
self.parameters[key] = float(m.groups()[1])
# fin: while(1)
# prepare internal variables
keys = self.get_keys()
if len(keys) == 0:
print("Error: No parameters to optimize defined in '%s'!" % str(input_template))
sys.exit(1)
# hack: the run_stub function returns a function with the proper number of arguments
arguments = reduce(lambda x,y: "%s, %s" % (x,y), keys)
exec("""def run_stub(obj):
def func(%s):
return obj.run_calculation(%s)
return func
""" % (arguments, arguments))
self.run = run_stub(self)
return template
def write_template_file(self):
name = os.path.splitext(self.template_file)[0] + ".new_template"
print("\nWriting best performing parameter set to new template file %s..." % name)
template_file = open(name, "w")
for line in self.template.split('\n'):
while(1):
# FIXME - use self.parameterRegexp here instead?
m = self.parameterKeyRegexp.search(line)
if m is None:
break
else:
key = m.groups()[0]
line = line.replace('${%s}' % key, "${%s=%14.8f}" % (key, float(self.best_params[key])))
template_file.write("%s\n" % line)
template_file.close()
print("Writing new input file INPUT.best...")
parameters = [self.best_params[k] for k in self.get_keys()]
self.write_input(parameters, open("%s.best" % self.job_config['defaultInputFile'], "w"))
print("Copying %s to %s.best..." % (self.job_config['defaultOutputFile'], self.job_config['defaultInputFile']))
sp.call([copyCmd, '%s' % self.job_config['defaultOutputFile'], '%s.best' % self.job_config['defaultOutputFile']])
def write_input(self, parameters, input):
template = self.template
keys = sorted(self.parameters.keys())
# look for parameter declarations
while(1):
m = self.parameterRegexp.search(template)
if m is None:
break
else:
key = m.groups()[0]
template = template.replace('${%s}' % key, "%14.8f" % float(parameters[keys.index(key)]))
input.write(template)
input.close()
def run_calculation(self, *new_parameters):
if self.N_runs == 0:
# provide initial guess
if not os.path.exists('fort.20'):
shutil.copyfile(self.job_config['fort.20'], 'fort.20')
returncode = BSOptimizer.run_calculation(self, *new_parameters)
if returncode != 0:
print(" --> %s returncode: %s" % (self.job_config['program'], str(returncode)))
# get the final total energy
lines = igrep("SCF ENDED", self.job_config['defaultOutputFile'])
if len(lines) != 1:
print(" ==> Calculation failed!!")
else:
m = self.energyRegexp.search(lines[0])
if m is not None:
total_energy = float(m.group(0))
if self.N_runs == 1:
self.initial_value = total_energy
# determine no. of SCF cycles for statistics
m = self.cyclesRegexp.search(lines[0])
cycles = "XX" if m is None else m.group(1)
print(" --> Total energy: %12.7f (%s cycles)" % (total_energy, cycles))
retval = total_energy
else:
print(" ==> Calculation failed: %s" % lines[0])
print("Mysterious error when looking for total energy!?")
sys.exit(1)
# check that the actual result is not too different from the last one
# this is necessary due to a "feature" in CRYSTAL that sometimes leads to unrealistically large steps
if abs(retval - self.last_value) > abs(self.last_value/10.0):
# ignore special case: start condition
if self.last_value != 0.0:
retval = 0.0
print(" ==> WARNING: Result is too far off and will be ignored")
else:
self.last_value = retval
# save only the best result
if retval < self.lowest_value:
self.lowest_value = retval
shutil.copyfile("fort.9", "fort.20")
self.best_params = {}
for index, k in enumerate(self.get_keys()):
self.best_params[k] = new_parameters[index]
self.write_template_file()
print("")
return retval
# fin: class crystalOptimizer
class optimizationController(object):
name = 'Basis set optimization'
version = 'July 2013'
def __init__(self, optimizer):
print(self.name)
print("Version: %s\n" % self.version)
self.optimizer = optimizer
# function that starts the optimization
def run(self):
# prepare limits and initial errors
# max_exponent: get the largest exponent and double it
max_exponent = max(self.optimizer.best_params.values())*2
steps = {}
for k in self.optimizer.get_keys():
steps[k] = self.optimizer.parameters[k]
# start with huge step sizes
steps['err_%s' % k] = self.optimizer.best_params[k]/100.0*40
# in crystal, orbital exponents should not fall below exponentLowerLimit!
if k.startswith('a'):
steps['limit_%s' % k] = (exponentLowerLimit, max_exponent)
# uncomment in case restrictions are wanted for the coefficients as well:
# if k.startwith('d'):
# steps['limit_%s' % k] = (-10, 10)
steps.update(self.optimizer.parameters)
m = minuit.Minuit(self.optimizer.run, strategy=0, **steps)
print("Initial values (parameter +++ stepsize +++ limits):")
for k in self.optimizer.get_keys():
v = steps[k]
e = steps["err_%s" % k]
lim = None
if steps.has_key('limit_%s' % k):
lim = steps['limit_%s' % k]
if lim is None:
print("% 5s %12.7f %12.7f" % (k, v, e))
else:
print("% 5s %12.7f %12.7f (%f,%f) " % (k, v, e, lim[0], lim[1]))
print('')
# perform the optimization
try:
m.migrad()
except Exception, e:
print("Exception raised: %s" % str(e))
print("Please check your results carefully!")
print("")
print("Initial total energy: %.7f" % self.optimizer.initial_value)
print("Lowest total energy: %.7f" % self.optimizer.lowest_value)
# fin: class optimizationController
if __name__ == '__main__':
options = {
'-t' : ('--template=', 'template file with initial parameters',
{ 'dest' : 'template' } ),
'-i' : ('--generate-input', 'generate a crystal input from the template and print it',
{ 'dest' : 'generate_input', 'action' : 'store_true', 'default' : False } ),
'-g' : ('--guess=', 'file to use as fort.20 required for GUESSP directive',
{ 'dest' : 'guess', 'default' : None } ),
'-l' : ('--lowerlimit=', 'lower limit to employ during optimization of exponents (default: %.2f)' % exponentLowerLimit,
{ 'dest' : 'lower_limit', 'default' : exponentLowerLimit, 'type' : 'float' } ),
}
parser = OptionParser(usage='usage: %s [options] files' % sys.argv[0])
for k,v in options.iteritems():
parser.add_option(k, v[0], help=v[1], **v[2])
opts, args = parser.parse_args(sys.argv)
if opts.template is None:
print("Error: You must supply a template file that will be used to create the CRYSTAL input!")
sys.exit(1)
else:
jobConfig['template'] = opts.template
files = set(args[1:]+[opts.template])
jobConfig['required_files'] = list(files)
jobConfig['cmdline_options'] = opts
jobConfig['cmdline_arguments'] = args
jobConfig['exponentLowerLimit'] = opts.lower_limit
c = optimizationController(crystalOptimizer(jobConfig))
if opts.generate_input == True:
c.optimizer.write_input(c.optimizer.get_parameters(), sys.stdout)
sys.exit(0)
# check for GUESSP keyword
if 0 == len(igrep('guessp', jobConfig['template'])):
print("Warning: The template file does not contain the GUESSP keyword. It is recommended to restart from fort.20 in order to accelerate the optimization!")
# check for SCFDIR keyword
if 0 != len(igrep('scfdir', jobConfig['template'])):
print("Warning: The template file contains the SCFDIR keyword. However if the system is small enough it is recommended to perform a conventional SCF.")
# check for OPTGEOM
if 0 != len(igrep('optgeom', jobConfig['template'])):
print("Warning: The template file contains the OPTGEOM keyword. Using structure relaxation in combination with the basis set optimization is not supported and thus a waste of time.")
# check if fort.20 file has been specified
if opts.guess is None:
print("Error: You must supply a file containing wave function data that will be used as the initial guess!")
sys.exit(1)
else:
jobConfig['fort.20'] = opts.guess
# make sure that all required files are available
for i in jobConfig['required_files'] + [opts.guess]:
if not os.path.exists(i):
print("Error: Required file '%s' is not accessible" % i)
sys.exit(11)
# start the optimization
print("Performing basis set optimization using the template '%s'.\n" % jobConfig['template'])
c.run()
|
chrr/Pcrystal09_bsopt
|
Pcrystal09_bsopt.py
|
Python
|
bsd-2-clause
| 14,025
|
[
"CRYSTAL"
] |
59bff4bda24fbbdfa39fa1e50d9f6065e236fda5e2621d671051fcefb8823d5c
|
#!/usr/bin/env python3
"""
Clean IMGT germline fasta files for IgBLAST database build
"""
import Bio
from pkg_resources import parse_version
from sys import argv
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
# Get input and output file names
in_file = argv[1]
out_file = argv[2]
# Load sequences into memory and process them
name_set = set()
seq_list = list()
for rec in SeqIO.parse(in_file, 'fasta'):
name = rec.description.split('|')[1]
if name not in name_set:
name_set.add(name)
seq = SeqRecord(rec.seq.ungap('.').upper(), id=name, name=name, description=name)
seq_list.append(seq)
# Overwrite file
with open(out_file, 'w') as out_handle:
if parse_version(Bio.__version__) >= parse_version('1.71'):
# Biopython >= v1.71
SeqIO.write(seq_list, out_handle, format='fasta-2line')
else:
# Biopython < v1.71
writer = SeqIO.FastaIO.FastaWriter(out_handle, wrap=None)
writer.write_file(seq_list)
|
psathyrella/partis
|
packages/immcantation/scripts/clean_imgtdb.py
|
Python
|
gpl-3.0
| 987
|
[
"Biopython"
] |
df740f45227292cf32a5aa362de410f185ef5e95b3eefa2df6320dfd914eae2d
|
#!/usr/bin/python
# -*- mode: python; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*-
#
# $Id$
#
# Copyright (c) Erik Lindahl, David van der Spoel 2003-2007.
# Coordinate compression (c) by Frans van Hoesel.
# Python wrapper (c) by Roland Schulz
#
# IN contrast to the rest of Gromacs, XDRFILE is distributed under the
# BSD license, so you can use it any way you wish, including closed source:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
from xdrfile import *
import sys
#you have to compile with --enable-shared
#and have libxdrfile.so in the LD_LIBRARY_PATH
if len(sys.argv)!=2:
print "Missing file argument\nUsage: sample.py FILE"
sys.exit()
x=xdrfile(sys.argv[1])
for f in x: #iterates frames
print "%8s %8s %8s %8s Step: %8d "%("Atom","X","Y","Z",f.step) #print header
for i,a in enumerate(f.x): #iterate atoms
print "%8d %8.1f %8.1f %8.1f"%(i+1,a[0],a[1],a[2]) #print atom number, x, y, z
|
kmtu/xdrfile-d
|
src/python/sample.py
|
Python
|
bsd-2-clause
| 1,977
|
[
"Gromacs"
] |
b976279f16423f3f3b0084559a1a0a0f1cd14de7c3f56067b8f835f697d5dfaf
|
# maxentropy.py: Routines for fitting maximum entropy models.
# Copyright: Ed Schofield, 2003-2006
# License: BSD-style (see LICENSE.txt in main source directory)
# Future imports must come before any code in 2.5
from __future__ import division
__author__ = "Ed Schofield"
__version__ = '2.1'
__changelog__ = """
This module is an adaptation of "ftwmaxent" by Ed Schofield, first posted
on SourceForge as part of the "textmodeller" project in 2002. The
official repository is now SciPy (since Nov 2005); the SourceForge
ftwmaxent code will not be developed further.
------------
Change log:
Since 2.0:
* Code simplification. Removed dualapprox(), gradapprox() and other
alias methods for bigmodel objects. Use dual(), grad() etc. instead.
* Added support for testing on an external sample during optimization.
* Removed incomplete support for the (slow) GIS algorithm
Since 2.0-alpha4:
* Name change maxent -> maxentropy
* Removed online (sequential) estimation of feature expectations and
variances.
Since v2.0-alpha3:
(1) Name change ftwmaxent -> scipy/maxent
(2) Modification for inclusion in scipy tree. Broke one big class into
two smaller classes, one for small models, the other for large models.
Here a 'small' model is one defined on a sample space small enough to sum
over in practice, whereas a 'large' model is on a sample space that is
high-dimensional and continuous or discrete but too large to sum over,
and requires Monte Carlo simulation.
(3) Refactoring:
self.Eapprox -> self.mu
p_0 -> aux_dist
p0 -> aux_dist
p_dot -> aux_dist_dot
qdot -> p_dot
q_dot -> p_dot
q_theta -> p_theta
E_p -> E_p_tilde
E_q -> E_p
Since v2.0-alpha2:
Using multiple static feature matrices is now supported. The generator
function supplied to generate feature matrices is called matrixtrials'
times each iteration. This is useful for variance estimation of the E
and log Z estimators across the trials, without drawing another sample
each iteration (when staticsample = True).
Since v2.0-alpha1:
Sample feature matrices, if used, are sampled on the fly with a supplied
generator function, optionally multiple times to estimate the sample
variance of the feature expectation estimates. An alternative is the
online estimation alg.
Since v0.8.5:
Added code for online (sequential) estimation of feature expectations and
variances.
"""
import math, types, cPickle
import numpy as np
from scipy import optimize
from scipy.linalg import norm
from scipy.maxentropy.maxentutils import *
class basemodel(object):
"""A base class providing generic functionality for both small and
large maximum entropy models. Cannot be instantiated.
"""
def __init__(self):
self.format = self.__class__.__name__[:4]
if self.format == 'base':
raise ValueError, "this class cannot be instantiated directly"
self.verbose = False
self.maxgtol = 1e-5
# Required tolerance of gradient on average (closeness to zero,axis=0) for
# CG optimization:
self.avegtol = 1e-3
# Default tolerance for the other optimization algorithms:
self.tol = 1e-4
# Default tolerance for stochastic approximation: stop if
# ||params_k - params_{k-1}|| < paramstol:
self.paramstol = 1e-5
self.maxiter = 1000
self.maxfun = 1500
self.mindual = -100. # The entropy dual must actually be
# non-negative, but the estimate may be
# slightly out with bigmodel instances
# without implying divergence to -inf
self.callingback = False
self.iters = 0 # the number of iterations so far of the
# optimization algorithm
self.fnevals = 0
self.gradevals = 0
# Variances for a Gaussian prior on the parameters for smoothing
self.sigma2 = None
# Store the duals for each fn evaluation during fitting?
self.storeduals = False
self.duals = {}
self.storegradnorms = False
self.gradnorms = {}
# Do we seek to minimize the KL divergence between the model and a
# prior density p_0? If not, set this to None; then we maximize the
# entropy. If so, set this to an array of the log probability densities
# p_0(x) for each x in the sample space. For bigmodel objects, set this
# to an array of the log probability densities p_0(x) for each x in the
# random sample from the auxiliary distribution.
self.priorlogprobs = None
# By default, use the sample matrix sampleF to estimate the
# entropy dual and its gradient. Otherwise, set self.external to
# the index of the sample feature matrix in the list self.externalFs.
# This applies to 'bigmodel' objects only, but setting this here
# simplifies the code in dual() and grad().
self.external = None
self.externalpriorlogprobs = None
def fit(self, K, algorithm='CG'):
"""Fit the maxent model p whose feature expectations are given
by the vector K.
Model expectations are computed either exactly or using Monte
Carlo simulation, depending on the 'func' and 'grad' parameters
passed to this function.
For 'model' instances, expectations are computed exactly, by summing
over the given sample space. If the sample space is continuous or too
large to iterate over, use the 'bigmodel' class instead.
For 'bigmodel' instances, the model expectations are not computed
exactly (by summing or integrating over a sample space) but
approximately (by Monte Carlo simulation). Simulation is necessary
when the sample space is too large to sum or integrate over in
practice, like a continuous sample space in more than about 4
dimensions or a large discrete space like all possible sentences in a
natural language.
Approximating the expectations by sampling requires an instrumental
distribution that should be close to the model for fast convergence.
The tails should be fatter than the model. This instrumental
distribution is specified by calling setsampleFgen() with a
user-supplied generator function that yields a matrix of features of a
random sample and its log pdf values.
The algorithm can be 'CG', 'BFGS', 'LBFGSB', 'Powell', or
'Nelder-Mead'.
The CG (conjugate gradients) method is the default; it is quite fast
and requires only linear space in the number of parameters, (not
quadratic, like Newton-based methods).
The BFGS (Broyden-Fletcher-Goldfarb-Shanno) algorithm is a
variable metric Newton method. It is perhaps faster than the CG
method but requires O(N^2) instead of O(N) memory, so it is
infeasible for more than about 10^3 parameters.
The Powell algorithm doesn't require gradients. For small models
it is slow but robust. For big models (where func and grad are
simulated) with large variance in the function estimates, this
may be less robust than the gradient-based algorithms.
"""
dual = self.dual
grad = self.grad
if isinstance(self, bigmodel):
# Ensure the sample matrix has been set
if not hasattr(self, 'sampleF') and hasattr(self, 'samplelogprobs'):
raise AttributeError, "first specify a sample feature matrix" \
" using sampleFgen()"
else:
# Ensure the feature matrix for the sample space has been set
if not hasattr(self, 'F'):
raise AttributeError, "first specify a feature matrix" \
" using setfeaturesandsamplespace()"
# First convert K to a numpy array if necessary
K = np.asarray(K, float)
# Store the desired feature expectations as a member variable
self.K = K
# Sanity checks
try:
self.params
except AttributeError:
self.reset(len(K))
else:
assert len(self.params) == len(K)
# Don't reset the number of function and gradient evaluations to zero
# self.fnevals = 0
# self.gradevals = 0
# Make a copy of the parameters
oldparams = np.array(self.params)
callback = self.log
if algorithm == 'CG':
retval = optimize.fmin_cg(dual, oldparams, grad, (), self.avegtol, \
maxiter=self.maxiter, full_output=1, \
disp=self.verbose, retall=0,
callback=callback)
(newparams, fopt, func_calls, grad_calls, warnflag) = retval
elif algorithm == 'LBFGSB':
if callback is not None:
raise NotImplementedError, "L-BFGS-B optimization algorithm"\
" does not yet support callback functions for"\
" testing with an external sample"
retval = optimize.fmin_l_bfgs_b(dual, oldparams, \
grad, args=(), bounds=self.bounds, pgtol=self.maxgtol,
maxfun=self.maxfun)
(newparams, fopt, d) = retval
warnflag, func_calls = d['warnflag'], d['funcalls']
if self.verbose:
print algorithm + " optimization terminated successfully."
print "\tFunction calls: " + str(func_calls)
# We don't have info on how many gradient calls the LBFGSB
# algorithm makes
elif algorithm == 'BFGS':
retval = optimize.fmin_bfgs(dual, oldparams, \
grad, (), self.tol, \
maxiter=self.maxiter, full_output=1, \
disp=self.verbose, retall=0, \
callback=callback)
(newparams, fopt, gopt, Lopt, func_calls, grad_calls, warnflag) = retval
elif algorithm == 'Powell':
retval = optimize.fmin_powell(dual, oldparams, args=(), \
xtol=self.tol, ftol = self.tol, \
maxiter=self.maxiter, full_output=1, \
disp=self.verbose, retall=0, \
callback=callback)
(newparams, fopt, direc, numiter, func_calls, warnflag) = retval
elif algorithm == 'Nelder-Mead':
retval = optimize.fmin(dual, oldparams, args=(), \
xtol=self.tol, ftol = self.tol, \
maxiter=self.maxiter, full_output=1, \
disp=self.verbose, retall=0, \
callback=callback)
(newparams, fopt, numiter, func_calls, warnflag) = retval
else:
raise AttributeError, "the specified algorithm '" + str(algorithm) \
+ "' is unsupported. Options are 'CG', 'LBFGSB', " \
"'Nelder-Mead', 'Powell', and 'BFGS'"
if np.any(self.params != newparams):
self.setparams(newparams)
self.func_calls = func_calls
def dual(self, params=None, ignorepenalty=False, ignoretest=False):
"""Computes the Lagrangian dual L(theta) of the entropy of the
model, for the given vector theta=params. Minimizing this
function (without constraints) should fit the maximum entropy
model subject to the given constraints. These constraints are
specified as the desired (target) values self.K for the
expectations of the feature statistic.
This function is computed as:
L(theta) = log(Z) - theta^T . K
For 'bigmodel' objects, it estimates the entropy dual without
actually computing p_theta. This is important if the sample
space is continuous or innumerable in practice. We approximate
the norm constant Z using importance sampling as in
[Rosenfeld01whole]. This estimator is deterministic for any
given sample. Note that the gradient of this estimator is equal
to the importance sampling *ratio estimator* of the gradient of
the entropy dual [see my thesis], justifying the use of this
estimator in conjunction with grad() in optimization methods that
use both the function and gradient. Note, however, that
convergence guarantees break down for most optimization
algorithms in the presence of stochastic error.
Note that, for 'bigmodel' objects, the dual estimate is
deterministic for any given sample. It is given as:
L_est = log Z_est - sum_i{theta_i K_i}
where
Z_est = 1/m sum_{x in sample S_0} p_dot(x) / aux_dist(x),
and m = # observations in sample S_0, and K_i = the empirical
expectation E_p_tilde f_i (X) = sum_x {p(x) f_i(x)}.
"""
if self.external is None and not self.callingback:
if self.verbose:
print "Function eval #", self.fnevals
if params is not None:
self.setparams(params)
# Subsumes both small and large cases:
L = self.lognormconst() - np.dot(self.params, self.K)
if self.verbose and self.external is None:
print " dual is ", L
# Use a Gaussian prior for smoothing if requested.
# This adds the penalty term \sum_{i=1}^m \params_i^2 / {2 \sigma_i^2}.
# Define 0 / 0 = 0 here; this allows a variance term of
# sigma_i^2==0 to indicate that feature i should be ignored.
if self.sigma2 is not None and ignorepenalty==False:
ratios = np.nan_to_num(self.params**2 / self.sigma2)
# Why does the above convert inf to 1.79769e+308?
L += 0.5 * ratios.sum()
if self.verbose and self.external is None:
print " regularized dual is ", L
if not self.callingback and self.external is None:
if hasattr(self, 'callback_dual') \
and self.callback_dual is not None:
# Prevent infinite recursion if the callback function
# calls dual():
self.callingback = True
self.callback_dual(self)
self.callingback = False
if self.external is None and not self.callingback:
self.fnevals += 1
# (We don't reset self.params to its prior value.)
return L
# An alias for the dual function:
entropydual = dual
def log(self, params):
"""This method is called every iteration during the optimization
process. It calls the user-supplied callback function (if any),
logs the evolution of the entropy dual and gradient norm, and
checks whether the process appears to be diverging, which would
indicate inconsistent constraints (or, for bigmodel instances,
too large a variance in the estimates).
"""
if self.external is None and not self.callingback:
if self.verbose:
print "Iteration #", self.iters
# Store new dual and/or gradient norm
if not self.callingback:
if self.storeduals:
self.duals[self.iters] = self.dual()
if self.storegradnorms:
self.gradnorms[self.iters] = norm(self.grad())
if not self.callingback and self.external is None:
if hasattr(self, 'callback'):
# Prevent infinite recursion if the callback function
# calls dual():
self.callingback = True
self.callback(self)
self.callingback = False
# Do we perform a test on external sample(s) every iteration?
# Only relevant to bigmodel objects
if hasattr(self, 'testevery') and self.testevery > 0:
if (self.iters + 1) % self.testevery != 0:
if self.verbose:
print "Skipping test on external sample(s) ..."
else:
self.test()
if not self.callingback and self.external is None:
if self.mindual > -np.inf and self.dual() < self.mindual:
raise DivergenceError, "dual is below the threshold 'mindual'" \
" and may be diverging to -inf. Fix the constraints" \
" or lower the threshold!"
self.iters += 1
def grad(self, params=None, ignorepenalty=False):
"""Computes or estimates the gradient of the entropy dual.
"""
if self.verbose and self.external is None and not self.callingback:
print "Grad eval #" + str(self.gradevals)
if params is not None:
self.setparams(params)
G = self.expectations() - self.K
if self.verbose and self.external is None:
print " norm of gradient =", norm(G)
# (We don't reset params to its prior value.)
# Use a Gaussian prior for smoothing if requested. The ith
# partial derivative of the penalty term is \params_i /
# \sigma_i^2. Define 0 / 0 = 0 here; this allows a variance term
# of sigma_i^2==0 to indicate that feature i should be ignored.
if self.sigma2 is not None and ignorepenalty==False:
penalty = self.params / self.sigma2
G += penalty
features_to_kill = np.where(np.isnan(penalty))[0]
G[features_to_kill] = 0.0
if self.verbose and self.external is None:
normG = norm(G)
print " norm of regularized gradient =", normG
if not self.callingback and self.external is None:
if hasattr(self, 'callback_grad') \
and self.callback_grad is not None:
# Prevent infinite recursion if the callback function
# calls grad():
self.callingback = True
self.callback_grad(self)
self.callingback = False
if self.external is None and not self.callingback:
self.gradevals += 1
return G
def crossentropy(self, fx, log_prior_x=None, base=np.e):
"""Returns the cross entropy H(q, p) of the empirical
distribution q of the data (with the given feature matrix fx)
with respect to the model p. For discrete distributions this is
defined as:
H(q, p) = - n^{-1} \sum_{j=1}^n log p(x_j)
where x_j are the data elements assumed drawn from q whose
features are given by the matrix fx = {f(x_j)}, j=1,...,n.
The 'base' argument specifies the base of the logarithm, which
defaults to e.
For continuous distributions this makes no sense!
"""
H = -self.logpdf(fx, log_prior_x).mean()
if base != np.e:
# H' = H * log_{base} (e)
return H / np.log(base)
else:
return H
def normconst(self):
"""Returns the normalization constant, or partition function, for
the current model. Warning -- this may be too large to represent;
if so, this will result in numerical overflow. In this case use
lognormconst() instead.
For 'bigmodel' instances, estimates the normalization term as
Z = E_aux_dist [{exp (params.f(X))} / aux_dist(X)] using a sample
from aux_dist.
"""
return np.exp(self.lognormconst())
def setsmooth(sigma):
"""Speficies that the entropy dual and gradient should be
computed with a quadratic penalty term on magnitude of the
parameters. This 'smooths' the model to account for noise in the
target expectation values or to improve robustness when using
simulation to fit models and when the sampling distribution has
high variance. The smoothing mechanism is described in Chen and
Rosenfeld, 'A Gaussian prior for smoothing maximum entropy
models' (1999).
The parameter 'sigma' will be squared and stored as self.sigma2.
"""
self.sigma2 = sigma**2
def setparams(self, params):
"""Set the parameter vector to params, replacing the existing
parameters. params must be a list or numpy array of the same
length as the model's feature vector f.
"""
self.params = np.array(params, float) # make a copy
# Log the new params to disk
self.logparams()
# Delete params-specific stuff
self.clearcache()
def clearcache(self):
"""Clears the interim results of computations depending on the
parameters and the sample.
"""
for var in ['mu', 'logZ', 'logZapprox', 'logv']:
if hasattr(self, var):
exec('del self.' + var)
def reset(self, numfeatures=None):
"""Resets the parameters self.params to zero, clearing the cache
variables dependent on them. Also resets the number of function
and gradient evaluations to zero.
"""
if numfeatures:
m = numfeatures
else:
# Try to infer the number of parameters from existing state
if hasattr(self, 'params'):
m = len(self.params)
elif hasattr(self, 'F'):
m = self.F.shape[0]
elif hasattr(self, 'sampleF'):
m = self.sampleF.shape[0]
elif hasattr(self, 'K'):
m = len(self.K)
else:
raise ValueError, "specify the number of features / parameters"
# Set parameters, clearing cache variables
self.setparams(np.zeros(m, float))
# These bounds on the param values are only effective for the
# L-BFGS-B optimizer:
self.bounds = [(-100., 100.)]*len(self.params)
self.fnevals = 0
self.gradevals = 0
self.iters = 0
self.callingback = False
# Clear the stored duals and gradient norms
self.duals = {}
self.gradnorms = {}
if hasattr(self, 'external_duals'):
self.external_duals = {}
if hasattr(self, 'external_gradnorms'):
self.external_gradnorms = {}
if hasattr(self, 'external'):
self.external = None
def setcallback(self, callback=None, callback_dual=None, \
callback_grad=None):
"""Sets callback functions to be called every iteration, every
function evaluation, or every gradient evaluation. All callback
functions are passed one argument, the current model object.
Note that line search algorithms in e.g. CG make potentially
several function and gradient evaluations per iteration, some of
which we expect to be poor.
"""
self.callback = callback
self.callback_dual = callback_dual
self.callback_grad = callback_grad
def logparams(self):
"""Saves the model parameters if logging has been
enabled and the # of iterations since the last save has reached
self.paramslogfreq.
"""
if not hasattr(self, 'paramslogcounter'):
# Assume beginlogging() was never called
return
self.paramslogcounter += 1
if not (self.paramslogcounter % self.paramslogfreq == 0):
return
# Check whether the params are NaN
if not np.all(self.params == self.params):
raise FloatingPointError, "some of the parameters are NaN"
if self.verbose:
print "Saving parameters ..."
paramsfile = open(self.paramslogfilename + '.' + \
str(self.paramslogcounter) + '.pickle', 'wb')
cPickle.dump(self.params, paramsfile, cPickle.HIGHEST_PROTOCOL)
paramsfile.close()
#self.paramslog += 1
#self.paramslogcounter = 0
if self.verbose:
print "Done."
def beginlogging(self, filename, freq=10):
"""Enable logging params for each fn evaluation to files named
'filename.freq.pickle', 'filename.(2*freq).pickle', ... each
'freq' iterations.
"""
if self.verbose:
print "Logging to files " + filename + "*"
self.paramslogcounter = 0
self.paramslogfilename = filename
self.paramslogfreq = freq
#self.paramslog = 1
def endlogging(self):
"""Stop logging param values whenever setparams() is called.
"""
del self.paramslogcounter
del self.paramslogfilename
del self.paramslogfreq
class model(basemodel):
"""A maximum-entropy (exponential-form) model on a discrete sample
space.
"""
def __init__(self, f=None, samplespace=None):
super(model, self).__init__()
if f is not None and samplespace is not None:
self.setfeaturesandsamplespace(f, samplespace)
elif f is not None and samplespace is None:
raise ValueError, "not supported: specify both features and" \
" sample space or neither"
def setfeaturesandsamplespace(self, f, samplespace):
"""Creates a new matrix self.F of features f of all points in the
sample space. f is a list of feature functions f_i mapping the
sample space to real values. The parameter vector self.params is
initialized to zero.
We also compute f(x) for each x in the sample space and store
them as self.F. This uses lots of memory but is much faster.
This is only appropriate when the sample space is finite.
"""
self.f = f
self.reset(numfeatures=len(f))
self.samplespace = samplespace
self.F = sparsefeaturematrix(f, samplespace, 'csr_matrix')
def lognormconst(self):
"""Compute the log of the normalization constant (partition
function) Z=sum_{x \in samplespace} p_0(x) exp(params . f(x)).
The sample space must be discrete and finite.
"""
# See if it's been precomputed
if hasattr(self, 'logZ'):
return self.logZ
# Has F = {f_i(x_j)} been precomputed?
if not hasattr(self, 'F'):
raise AttributeError, "first create a feature matrix F"
# Good, assume the feature matrix exists
log_p_dot = innerprodtranspose(self.F, self.params)
# Are we minimizing KL divergence?
if self.priorlogprobs is not None:
log_p_dot += self.priorlogprobs
self.logZ = logsumexp(log_p_dot)
return self.logZ
def expectations(self):
"""The vector E_p[f(X)] under the model p_params of the vector of
feature functions f_i over the sample space.
"""
# For discrete models, use the representation E_p[f(X)] = p . F
if not hasattr(self, 'F'):
raise AttributeError, "first set the feature matrix F"
# A pre-computed matrix of features exists
p = self.pmf()
return innerprod(self.F, p)
def logpmf(self):
"""Returns an array indexed by integers representing the
logarithms of the probability mass function (pmf) at each point
in the sample space under the current model (with the current
parameter vector self.params).
"""
# Have the features already been computed and stored?
if not hasattr(self, 'F'):
raise AttributeError, "first set the feature matrix F"
# Yes:
# p(x) = exp(params.f(x)) / sum_y[exp params.f(y)]
# = exp[log p_dot(x) - logsumexp{log(p_dot(y))}]
log_p_dot = innerprodtranspose(self.F, self.params)
# Do we have a prior distribution p_0?
if self.priorlogprobs is not None:
log_p_dot += self.priorlogprobs
if not hasattr(self, 'logZ'):
# Compute the norm constant (quickly!)
self.logZ = logsumexp(log_p_dot)
return log_p_dot - self.logZ
def pmf(self):
"""Returns an array indexed by integers representing the values
of the probability mass function (pmf) at each point in the
sample space under the current model (with the current parameter
vector self.params).
Equivalent to exp(self.logpmf())
"""
return arrayexp(self.logpmf())
# An alias for pmf
probdist = pmf
def pmf_function(self, f=None):
"""Returns the pmf p_theta(x) as a function taking values on the
model's sample space. The returned pmf is defined as:
p_theta(x) = exp(theta.f(x) - log Z)
where theta is the current parameter vector self.params. The
returned function p_theta also satisfies
all([p(x) for x in self.samplespace] == pmf()).
The feature statistic f should be a list of functions
[f1(),...,fn(x)]. This must be passed unless the model already
contains an equivalent attribute 'model.f'.
Requires that the sample space be discrete and finite, and stored
as self.samplespace as a list or array.
"""
if hasattr(self, 'logZ'):
logZ = self.logZ
else:
logZ = self.lognormconst()
if f is None:
try:
f = self.f
except AttributeError:
raise AttributeError, "either pass a list f of feature" \
" functions or set this as a member variable self.f"
# Do we have a prior distribution p_0?
priorlogpmf = None
if self.priorlogprobs is not None:
try:
priorlogpmf = self.priorlogpmf
except AttributeError:
raise AttributeError, "prior probability mass function not set"
def p(x):
f_x = np.array([f[i](x) for i in range(len(f))], float)
# Do we have a prior distribution p_0?
if priorlogpmf is not None:
priorlogprob_x = priorlogpmf(x)
return math.exp(np.dot(self.params, f_x) + priorlogprob_x \
- logZ)
else:
return math.exp(np.dot(self.params, f_x) - logZ)
return p
class conditionalmodel(model):
"""A conditional maximum-entropy (exponential-form) model p(x|w) on a
discrete sample space. This is useful for classification problems:
given the context w, what is the probability of each class x?
The form of such a model is
p(x | w) = exp(theta . f(w, x)) / Z(w; theta)
where Z(w; theta) is a normalization term equal to
Z(w; theta) = sum_x exp(theta . f(w, x)).
The sum is over all classes x in the set Y, which must be supplied to
the constructor as the parameter 'samplespace'.
Such a model form arises from maximizing the entropy of a conditional
model p(x | w) subject to the constraints:
K_i = E f_i(W, X)
where the expectation is with respect to the distribution
q(w) p(x | w)
where q(w) is the empirical probability mass function derived from
observations of the context w in a training set. Normally the vector
K = {K_i} of expectations is set equal to the expectation of f_i(w,
x) with respect to the empirical distribution.
This method minimizes the Lagrangian dual L of the entropy, which is
defined for conditional models as
L(theta) = sum_w q(w) log Z(w; theta)
- sum_{w,x} q(w,x) [theta . f(w,x)]
Note that both sums are only over the training set {w,x}, not the
entire sample space, since q(w,x) = 0 for all w,x not in the training
set.
The partial derivatives of L are:
dL / dtheta_i = K_i - E f_i(X, Y)
where the expectation is as defined above.
"""
def __init__(self, F, counts, numcontexts):
"""The F parameter should be a (sparse) m x size matrix, where m
is the number of features and size is |W| * |X|, where |W| is the
number of contexts and |X| is the number of elements X in the
sample space.
The 'counts' parameter should be a row vector stored as a (1 x
|W|*|X|) sparse matrix, whose element i*|W|+j is the number of
occurrences of x_j in context w_i in the training set.
This storage format allows efficient multiplication over all
contexts in one operation.
"""
# Ideally, the 'counts' parameter could be represented as a sparse
# matrix of size C x X, whose ith row # vector contains all points x_j
# in the sample space X in context c_i. For example:
# N = sparse.lil_matrix((len(contexts), len(samplespace)))
# for (c, x) in corpus:
# N[c, x] += 1
# This would be a nicer input format, but computations are more
# efficient internally with one long row vector. What we really need is
# for sparse matrices to offer a .reshape method so this conversion
# could be done internally and transparently. Then the numcontexts
# argument to the conditionalmodel constructor could also be inferred
# from the matrix dimensions.
super(conditionalmodel, self).__init__()
self.F = F
self.numcontexts = numcontexts
S = F.shape[1] // numcontexts # number of sample point
assert isinstance(S, int)
# Set the empirical pmf: p_tilde(w, x) = N(w, x) / \sum_c \sum_y N(c, y).
# This is always a rank-2 beast with only one row (to support either
# arrays or dense/sparse matrices.
if not hasattr(counts, 'shape'):
# Not an array or dense/sparse matrix
p_tilde = asarray(counts).reshape(1, len(counts))
else:
if counts.ndim == 1:
p_tilde = counts.reshape(1, len(counts))
elif counts.ndim == 2:
# It needs to be flat (a row vector)
if counts.shape[0] > 1:
try:
# Try converting to a row vector
p_tilde = count.reshape((1, size))
except AttributeError:
raise ValueError, "the 'counts' object needs to be a"\
" row vector (1 x n) rank-2 array/matrix) or have"\
" a .reshape method to convert it into one"
else:
p_tilde = counts
# Make a copy -- don't modify 'counts'
self.p_tilde = p_tilde / p_tilde.sum()
# As an optimization, p_tilde need not be copied or stored at all, since
# it is only used by this function.
self.p_tilde_context = np.empty(numcontexts, float)
for w in xrange(numcontexts):
self.p_tilde_context[w] = self.p_tilde[0, w*S : (w+1)*S].sum()
# Now compute the vector K = (K_i) of expectations of the
# features with respect to the empirical distribution p_tilde(w, x).
# This is given by:
#
# K_i = \sum_{w, x} q(w, x) f_i(w, x)
#
# This is independent of the model parameters.
self.K = flatten(innerprod(self.F, self.p_tilde.transpose()))
self.numsamplepoints = S
def lognormconst(self):
"""Compute the elementwise log of the normalization constant
(partition function) Z(w)=sum_{y \in Y(w)} exp(theta . f(w, y)).
The sample space must be discrete and finite. This is a vector
with one element for each context w.
"""
# See if it's been precomputed
if hasattr(self, 'logZ'):
return self.logZ
numcontexts = self.numcontexts
S = self.numsamplepoints
# Has F = {f_i(x_j)} been precomputed?
if not hasattr(self, 'F'):
raise AttributeError, "first create a feature matrix F"
# Good, assume F has been precomputed
log_p_dot = innerprodtranspose(self.F, self.params)
# Are we minimizing KL divergence?
if self.priorlogprobs is not None:
log_p_dot += self.priorlogprobs
self.logZ = np.zeros(numcontexts, float)
for w in xrange(numcontexts):
self.logZ[w] = logsumexp(log_p_dot[w*S: (w+1)*S])
return self.logZ
def dual(self, params=None, ignorepenalty=False):
"""The entropy dual function is defined for conditional models as
L(theta) = sum_w q(w) log Z(w; theta)
- sum_{w,x} q(w,x) [theta . f(w,x)]
or equivalently as
L(theta) = sum_w q(w) log Z(w; theta) - (theta . k)
where K_i = \sum_{w, x} q(w, x) f_i(w, x), and where q(w) is the
empirical probability mass function derived from observations of the
context w in a training set. Normally q(w, x) will be 1, unless the
same class label is assigned to the same context more than once.
Note that both sums are only over the training set {w,x}, not the
entire sample space, since q(w,x) = 0 for all w,x not in the training
set.
The entropy dual function is proportional to the negative log
likelihood.
Compare to the entropy dual of an unconditional model:
L(theta) = log(Z) - theta^T . K
"""
if not self.callingback:
if self.verbose:
print "Function eval #", self.fnevals
if params is not None:
self.setparams(params)
logZs = self.lognormconst()
L = np.dot(self.p_tilde_context, logZs) - np.dot(self.params, self.K)
if self.verbose and self.external is None:
print " dual is ", L
# Use a Gaussian prior for smoothing if requested.
# This adds the penalty term \sum_{i=1}^m \theta_i^2 / {2 \sigma_i^2}
if self.sigma2 is not None and ignorepenalty==False:
penalty = 0.5 * (self.params**2 / self.sigma2).sum()
L += penalty
if self.verbose and self.external is None:
print " regularized dual is ", L
if not self.callingback:
if hasattr(self, 'callback_dual'):
# Prevent infinite recursion if the callback function calls
# dual():
self.callingback = True
self.callback_dual(self)
self.callingback = False
self.fnevals += 1
# (We don't reset params to its prior value.)
return L
# These do not need to be overridden:
# grad
# pmf
# probdist
def fit(self, algorithm='CG'):
"""Fits the conditional maximum entropy model subject to the
constraints
sum_{w, x} p_tilde(w) p(x | w) f_i(w, x) = k_i
for i=1,...,m, where k_i is the empirical expectation
k_i = sum_{w, x} p_tilde(w, x) f_i(w, x).
"""
# Call base class method
return model.fit(self, self.K, algorithm)
def expectations(self):
"""The vector of expectations of the features with respect to the
distribution p_tilde(w) p(x | w), where p_tilde(w) is the
empirical probability mass function value stored as
self.p_tilde_context[w].
"""
if not hasattr(self, 'F'):
raise AttributeError, "need a pre-computed feature matrix F"
# A pre-computed matrix of features exists
numcontexts = self.numcontexts
S = self.numsamplepoints
p = self.pmf()
# p is now an array representing p(x | w) for each class w. Now we
# multiply the appropriate elements by p_tilde(w) to get the hybrid pmf
# required for conditional modelling:
for w in xrange(numcontexts):
p[w*S : (w+1)*S] *= self.p_tilde_context[w]
# Use the representation E_p[f(X)] = p . F
return flatten(innerprod(self.F, p))
# # We only override to modify the documentation string. The code
# # is the same as for the model class.
# return model.expectations(self)
def logpmf(self):
"""Returns a (sparse) row vector of logarithms of the conditional
probability mass function (pmf) values p(x | c) for all pairs (c,
x), where c are contexts and x are points in the sample space.
The order of these is log p(x | c) = logpmf()[c * numsamplepoints
+ x].
"""
# Have the features already been computed and stored?
if not hasattr(self, 'F'):
raise AttributeError, "first set the feature matrix F"
# p(x | c) = exp(theta.f(x, c)) / sum_c[exp theta.f(x, c)]
# = exp[log p_dot(x) - logsumexp{log(p_dot(y))}]
numcontexts = self.numcontexts
S = self.numsamplepoints
log_p_dot = flatten(innerprodtranspose(self.F, self.params))
# Do we have a prior distribution p_0?
if self.priorlogprobs is not None:
log_p_dot += self.priorlogprobs
if not hasattr(self, 'logZ'):
# Compute the norm constant (quickly!)
self.logZ = np.zeros(numcontexts, float)
for w in xrange(numcontexts):
self.logZ[w] = logsumexp(log_p_dot[w*S : (w+1)*S])
# Renormalize
for w in xrange(numcontexts):
log_p_dot[w*S : (w+1)*S] -= self.logZ[w]
return log_p_dot
class bigmodel(basemodel):
"""A maximum-entropy (exponential-form) model on a large sample
space.
The model expectations are not computed exactly (by summing or
integrating over a sample space) but approximately (by Monte Carlo
estimation). Approximation is necessary when the sample space is too
large to sum or integrate over in practice, like a continuous sample
space in more than about 4 dimensions or a large discrete space like
all possible sentences in a natural language.
Approximating the expectations by sampling requires an instrumental
distribution that should be close to the model for fast convergence.
The tails should be fatter than the model.
"""
def __init__(self):
super(bigmodel, self).__init__()
# Number of sample matrices to generate and use to estimate E and logZ
self.matrixtrials = 1
# Store the lowest dual estimate observed so far in the fitting process
self.bestdual = float('inf')
# Most of the attributes below affect only the stochastic
# approximation procedure. They should perhaps be removed, and made
# arguments of stochapprox() instead.
# Use Kersten-Deylon accelerated convergence fo stoch approx
self.deylon = False
# By default, use a stepsize decreasing as k^(-3/4)
self.stepdecreaserate = 0.75
# If true, check convergence using the exact model. Only useful for
# testing small problems (e.g. with different parameters) when
# simulation is unnecessary.
self.exacttest = False
# By default use Ruppert-Polyak averaging for stochastic approximation
self.ruppertaverage = True
# Use the stoch approx scaling modification of Andradottir (1996)
self.andradottir = False
# Number of iterations to hold the stochastic approximation stepsize
# a_k at a_0 for before decreasing it
self.a_0_hold = 0
# Whether or not to use the same sample for all iterations
self.staticsample = True
# How many iterations of stochastic approximation between testing for
# convergence
self.testconvergefreq = 0
# How many sample matrices to average over when testing for convergence
# in stochastic approx
self.testconvergematrices = 10
# Test for convergence every 'testevery' iterations, using one or
# more external samples. If None, don't test.
self.testevery = None
# self.printevery = 1000
def resample(self):
"""(Re)samples the matrix F of sample features.
"""
if self.verbose >= 3:
print "(sampling)"
# First delete the existing sample matrix to save memory
# This matters, since these can be very large
for var in ['sampleF, samplelogprobs, sample']:
if hasattr(self, var):
exec('del self.' + var)
# Now generate a new sample
output = self.sampleFgen.next()
try:
len(output)
except TypeError:
raise ValueError, "output of sampleFgen.next() not recognized"
if len(output) == 2:
# Assume the format is (F, lp)
(self.sampleF, self.samplelogprobs) = output
elif len(output) == 3:
# Assume the format is (F, lp, sample)
(self.sampleF, self.samplelogprobs, self.sample) = output
else:
raise ValueError, "output of sampleFgen.next() not recognized"
# Check whether the number m of features is correct
try:
# The number of features is defined as the length of
# self.params, so first check if it exists:
self.params
m = len(self.params)
except AttributeError:
(m, n) = self.sampleF.shape
self.reset(m)
else:
if self.sampleF.shape[0] != m:
raise ValueError, "the sample feature generator returned" \
" a feature matrix of incorrect dimensions"
if self.verbose >= 3:
print "(done)"
# Now clear the temporary variables that are no longer correct for this
# sample
self.clearcache()
def lognormconst(self):
"""Estimate the normalization constant (partition function) using
the current sample matrix F.
"""
# First see whether logZ has been precomputed
if hasattr(self, 'logZapprox'):
return self.logZapprox
# Compute log v = log [p_dot(s_j)/aux_dist(s_j)] for
# j=1,...,n=|sample| using a precomputed matrix of sample
# features.
logv = self._logv()
# Good, we have our logv. Now:
n = len(logv)
self.logZapprox = logsumexp(logv) - math.log(n)
return self.logZapprox
def expectations(self):
"""Estimates the feature expectations E_p[f(X)] under the current
model p = p_theta using the given sample feature matrix. If
self.staticsample is True, uses the current feature matrix
self.sampleF. If self.staticsample is False or self.matrixtrials
is > 1, draw one or more sample feature matrices F afresh using
the generator function supplied to sampleFgen().
"""
# See if already computed
if hasattr(self, 'mu'):
return self.mu
self.estimate()
return self.mu
def _logv(self):
"""This function helps with caching of interim computational
results. It is designed to be called internally, not by a user.
This is defined as the array of unnormalized importance sampling
weights corresponding to the sample x_j whose features are
represented as the columns of self.sampleF.
logv_j = p_dot(x_j) / q(x_j),
where p_dot(x_j) = p_0(x_j) exp(theta . f(x_j)) is the
unnormalized pdf value of the point x_j under the current model.
"""
# First see whether logv has been precomputed
if hasattr(self, 'logv'):
return self.logv
# Compute log v = log [p_dot(s_j)/aux_dist(s_j)] for
# j=1,...,n=|sample| using a precomputed matrix of sample
# features.
if self.external is None:
paramsdotF = innerprodtranspose(self.sampleF, self.params)
logv = paramsdotF - self.samplelogprobs
# Are we minimizing KL divergence between the model and a prior
# density p_0?
if self.priorlogprobs is not None:
logv += self.priorlogprobs
else:
e = self.external
paramsdotF = innerprodtranspose(self.externalFs[e], self.params)
logv = paramsdotF - self.externallogprobs[e]
# Are we minimizing KL divergence between the model and a prior
# density p_0?
if self.externalpriorlogprobs is not None:
logv += self.externalpriorlogprobs[e]
# Good, we have our logv. Now:
self.logv = logv
return logv
def estimate(self):
"""This function approximates both the feature expectation vector
E_p f(X) and the log of the normalization term Z with importance
sampling.
It also computes the sample variance of the component estimates
of the feature expectations as: varE = var(E_1, ..., E_T) where T
is self.matrixtrials and E_t is the estimate of E_p f(X)
approximated using the 't'th auxiliary feature matrix.
It doesn't return anything, but stores the member variables
logZapprox, mu and varE. (This is done because some optimization
algorithms retrieve the dual fn and gradient fn in separate
function calls, but we can compute them more efficiently
together.)
It uses a supplied generator sampleFgen whose .next() method
returns features of random observations s_j generated according
to an auxiliary distribution aux_dist. It uses these either in a
matrix (with multiple runs) or with a sequential procedure, with
more updating overhead but potentially stopping earlier (needing
fewer samples). In the matrix case, the features F={f_i(s_j)}
and vector [log_aux_dist(s_j)] of log probabilities are generated
by calling resample().
We use [Rosenfeld01Wholesentence]'s estimate of E_p[f_i] as:
{sum_j p(s_j)/aux_dist(s_j) f_i(s_j) }
/ {sum_j p(s_j) / aux_dist(s_j)}.
Note that this is consistent but biased.
This equals:
{sum_j p_dot(s_j)/aux_dist(s_j) f_i(s_j) }
/ {sum_j p_dot(s_j) / aux_dist(s_j)}
Compute the estimator E_p f_i(X) in log space as:
num_i / denom,
where
num_i = exp(logsumexp(theta.f(s_j) - log aux_dist(s_j)
+ log f_i(s_j)))
and
denom = [n * Zapprox]
where Zapprox = exp(self.lognormconst()).
We can compute the denominator n*Zapprox directly as:
exp(logsumexp(log p_dot(s_j) - log aux_dist(s_j)))
= exp(logsumexp(theta.f(s_j) - log aux_dist(s_j)))
"""
if self.verbose >= 3:
print "(estimating dual and gradient ...)"
# Hereafter is the matrix code
mus = []
logZs = []
for trial in range(self.matrixtrials):
if self.verbose >= 2 and self.matrixtrials > 1:
print "(trial " + str(trial) + " ...)"
# Resample if necessary
if (not self.staticsample) or self.matrixtrials > 1:
self.resample()
logv = self._logv()
n = len(logv)
logZ = self.lognormconst()
logZs.append(logZ)
# We don't need to handle negative values separately,
# because we don't need to take the log of the feature
# matrix sampleF. See my thesis, Section 4.4
logu = logv - logZ
if self.external is None:
averages = innerprod(self.sampleF, arrayexp(logu))
else:
averages = innerprod(self.externalFs[self.external], \
arrayexp(logu))
averages /= n
mus.append(averages)
# Now we have T=trials vectors of the sample means. If trials > 1,
# estimate st dev of means and confidence intervals
ttrials = len(mus) # total number of trials performed
if ttrials == 1:
self.mu = mus[0]
self.logZapprox = logZs[0]
try:
del self.varE # make explicit that this has no meaning
except AttributeError:
pass
return
else:
# The log of the variance of logZ is:
# -log(n-1) + logsumexp(2*log|Z_k - meanZ|)
self.logZapprox = logsumexp(logZs) - math.log(ttrials)
stdevlogZ = np.array(logZs).std()
mus = np.array(mus)
self.varE = columnvariances(mus)
self.mu = columnmeans(mus)
return
def setsampleFgen(self, sampler, staticsample=True):
"""Initializes the Monte Carlo sampler to use the supplied
generator of samples' features and log probabilities. This is an
alternative to defining a sampler in terms of a (fixed size)
feature matrix sampleF and accompanying vector samplelogprobs of
log probabilities.
Calling sampler.next() should generate tuples (F, lp), where F is
an (m x n) matrix of features of the n sample points x_1,...,x_n,
and lp is an array of length n containing the (natural) log
probability density (pdf or pmf) of each point under the
auxiliary sampling distribution.
The output of sampler.next() can optionally be a 3-tuple (F, lp,
sample) instead of a 2-tuple (F, lp). In this case the value
'sample' is then stored as a class variable self.sample. This is
useful for inspecting the output and understanding the model
characteristics.
If matrixtrials > 1 and staticsample = True, (which is useful for
estimating variance between the different feature estimates),
sampler.next() will be called once for each trial
(0,...,matrixtrials) for each iteration. This allows using a set
of feature matrices, each of which stays constant over all
iterations.
We now insist that sampleFgen.next() return the entire sample
feature matrix to be used each iteration to avoid overhead in
extra function calls and memory copying (and extra code).
An alternative was to supply a list of samplers,
sampler=[sampler0, sampler1, ..., sampler_{m-1}, samplerZ], one
for each feature and one for estimating the normalization
constant Z. But this code was unmaintained, and has now been
removed (but it's in Ed's CVS repository :).
Example use:
>>> import spmatrix
>>> model = bigmodel()
>>> def sampler():
... n = 0
... while True:
... f = spmatrix.ll_mat(1,3)
... f[0,0] = n+1; f[0,1] = n+1; f[0,2] = n+1
... yield f, 1.0
... n += 1
...
>>> model.setsampleFgen(sampler())
>>> type(model.sampleFgen)
<type 'generator'>
>>> [model.sampleF[0,i] for i in range(3)]
[1.0, 1.0, 1.0]
We now set matrixtrials as a class property instead, rather than
passing it as an argument to this function, where it can be
written over (perhaps with the default function argument by
accident) when we re-call this func (e.g. to change the matrix
size.)
"""
# if not sequential:
assert type(sampler) is types.GeneratorType
self.sampleFgen = sampler
self.staticsample = staticsample
if staticsample:
self.resample()
def pdf(self, fx):
"""Returns the estimated density p_theta(x) at the point x with
feature statistic fx = f(x). This is defined as
p_theta(x) = exp(theta.f(x)) / Z(theta),
where Z is the estimated value self.normconst() of the partition
function.
"""
return exp(self.logpdf(fx))
def pdf_function(self):
"""Returns the estimated density p_theta(x) as a function p(f)
taking a vector f = f(x) of feature statistics at any point x.
This is defined as:
p_theta(x) = exp(theta.f(x)) / Z
"""
log_Z_est = self.lognormconst()
def p(fx):
return np.exp(innerprodtranspose(fx, self.params) - log_Z_est)
return p
def logpdf(self, fx, log_prior_x=None):
"""Returns the log of the estimated density p(x) = p_theta(x) at
the point x. If log_prior_x is None, this is defined as:
log p(x) = theta.f(x) - log Z
where f(x) is given by the (m x 1) array fx.
If, instead, fx is a 2-d (m x n) array, this function interprets
each of its rows j=0,...,n-1 as a feature vector f(x_j), and
returns an array containing the log pdf value of each point x_j
under the current model.
log Z is estimated using the sample provided with
setsampleFgen().
The optional argument log_prior_x is the log of the prior density
p_0 at the point x (or at each point x_j if fx is 2-dimensional).
The log pdf of the model is then defined as
log p(x) = log p0(x) + theta.f(x) - log Z
and p then represents the model of minimum KL divergence D(p||p0)
instead of maximum entropy.
"""
log_Z_est = self.lognormconst()
if len(fx.shape) == 1:
logpdf = np.dot(self.params, fx) - log_Z_est
else:
logpdf = innerprodtranspose(fx, self.params) - log_Z_est
if log_prior_x is not None:
logpdf += log_prior_x
return logpdf
def stochapprox(self, K):
"""Tries to fit the model to the feature expectations K using
stochastic approximation, with the Robbins-Monro stochastic
approximation algorithm: theta_{k+1} = theta_k + a_k g_k - a_k
e_k where g_k is the gradient vector (= feature expectations E -
K) evaluated at the point theta_k, a_k is the sequence a_k = a_0
/ k, where a_0 is some step size parameter defined as self.a_0 in
the model, and e_k is an unknown error term representing the
uncertainty of the estimate of g_k. We assume e_k has nice
enough properties for the algorithm to converge.
"""
if self.verbose:
print "Starting stochastic approximation..."
# If we have resumed fitting, adopt the previous parameter k
try:
k = self.paramslogcounter
#k = (self.paramslog-1)*self.paramslogfreq
except:
k = 0
try:
a_k = self.a_0
except AttributeError:
raise AttributeError, "first define the initial step size a_0"
avgparams = self.params
if self.exacttest:
# store exact error each testconvergefreq iterations
self.SAerror = []
while True:
k += 1
if k > self.a_0_hold:
if not self.deylon:
n = k - self.a_0_hold
elif k <= 2 + self.a_0_hold: # why <= 2?
# Initialize n for the first non-held iteration
n = k - self.a_0_hold
else:
# Use Kersten-Deylon accelerated SA, based on the rate of
# changes of sign of the gradient. (If frequent swaps, the
# stepsize is too large.)
#n += (np.dot(y_k, y_kminus1) < 0) # an indicator fn
if np.dot(y_k, y_kminus1) < 0:
n += 1
else:
# Store iterations of sign switches (for plotting
# purposes)
try:
self.nosignswitch.append(k)
except AttributeError:
self.nosignswitch = [k]
print "No sign switch at iteration " + str(k)
if self.verbose >= 2:
print "(using Deylon acceleration. n is " + str(n) + " instead of " + str(k - self.a_0_hold) + "...)"
if self.ruppertaverage:
if self.stepdecreaserate is None:
# Use log n / n as the default. Note: this requires a
# different scaling of a_0 than a stepsize decreasing
# as, e.g., n^(-1/2).
a_k = 1.0 * self.a_0 * math.log(n) / n
else:
# I think that with Ruppert averaging, we need a
# stepsize decreasing as n^(-p), where p is in the open
# interval (0.5, 1) for almost sure convergence.
a_k = 1.0 * self.a_0 / (n ** self.stepdecreaserate)
else:
# I think we need a stepsize decreasing as n^-1 for almost
# sure convergence
a_k = 1.0 * self.a_0 / (n ** self.stepdecreaserate)
# otherwise leave step size unchanged
if self.verbose:
print " step size is: " + str(a_k)
self.matrixtrials = 1
self.staticsample = False
if self.andradottir: # use Andradottir (1996)'s scaling?
self.estimate() # resample and reestimate
y_k_1 = self.mu - K
self.estimate() # resample and reestimate
y_k_2 = self.mu - K
y_k = y_k_1 / max(1.0, norm(y_k_2)) + \
y_k_2 / max(1.0, norm(y_k_1))
else:
# Standard Robbins-Monro estimator
if not self.staticsample:
self.estimate() # resample and reestimate
try:
y_kminus1 = y_k # store this for the Deylon acceleration
except NameError:
pass # if we're on iteration k=1, ignore this
y_k = self.mu - K
norm_y_k = norm(y_k)
if self.verbose:
print "SA: after iteration " + str(k)
print " approx dual fn is: " + str(self.logZapprox \
- np.dot(self.params, K))
print " norm(mu_est - k) = " + str(norm_y_k)
# Update params (after the convergence tests too ... don't waste the
# computation.)
if self.ruppertaverage:
# Use a simple average of all estimates so far, which
# Ruppert and Polyak show can converge more rapidly
newparams = self.params - a_k*y_k
avgparams = (k-1.0)/k*avgparams + 1.0/k * newparams
if self.verbose:
print " new params[0:5] are: " + str(avgparams[0:5])
self.setparams(avgparams)
else:
# Use the standard Robbins-Monro estimator
self.setparams(self.params - a_k*y_k)
if k >= self.maxiter:
print "Reached maximum # iterations during stochastic" \
" approximation without convergence."
break
def settestsamples(self, F_list, logprob_list, testevery=1, priorlogprob_list=None):
"""Requests that the model be tested every 'testevery' iterations
during fitting using the provided list F_list of feature
matrices, each representing a sample {x_j} from an auxiliary
distribution q, together with the corresponding log probabiltiy
mass or density values log {q(x_j)} in logprob_list. This is
useful as an external check on the fitting process with sample
path optimization, which could otherwise reflect the vagaries of
the single sample being used for optimization, rather than the
population as a whole.
If self.testevery > 1, only perform the test every self.testevery
calls.
If priorlogprob_list is not None, it should be a list of arrays
of log(p0(x_j)) values, j = 0,. ..., n - 1, specifying the prior
distribution p0 for the sample points x_j for each of the test
samples.
"""
# Sanity check
assert len(F_list) == len(logprob_list)
self.testevery = testevery
self.externalFs = F_list
self.externallogprobs = logprob_list
self.externalpriorlogprobs = priorlogprob_list
# Store the dual and mean square error based on the internal and
# external (test) samples. (The internal sample is used
# statically for sample path optimization; the test samples are
# used as a control for the process.) The hash keys are the
# number of function or gradient evaluations that have been made
# before now.
# The mean entropy dual and mean square error estimates among the
# t external (test) samples, where t = len(F_list) =
# len(logprob_list).
self.external_duals = {}
self.external_gradnorms = {}
def test(self):
"""Estimate the dual and gradient on the external samples,
keeping track of the parameters that yield the minimum such dual.
The vector of desired (target) feature expectations is stored as
self.K.
"""
if self.verbose:
print " max(params**2) = " + str((self.params**2).max())
if self.verbose:
print "Now testing model on external sample(s) ..."
# Estimate the entropy dual and gradient for each sample. These
# are not regularized (smoothed).
dualapprox = []
gradnorms = []
for e in xrange(len(self.externalFs)):
self.external = e
self.clearcache()
if self.verbose >= 2:
print "(testing with sample %d)" % e
dualapprox.append(self.dual(ignorepenalty=True, ignoretest=True))
gradnorms.append(norm(self.grad(ignorepenalty=True)))
# Reset to using the normal sample matrix sampleF
self.external = None
self.clearcache()
meandual = np.average(dualapprox,axis=0)
self.external_duals[self.iters] = dualapprox
self.external_gradnorms[self.iters] = gradnorms
if self.verbose:
print "** Mean (unregularized) dual estimate from the %d" \
" external samples is %f" % \
(len(self.externalFs), meandual)
print "** Mean mean square error of the (unregularized) feature" \
" expectation estimates from the external samples =" \
" mean(|| \hat{\mu_e} - k ||,axis=0) =", np.average(gradnorms,axis=0)
# Track the parameter vector params with the lowest mean dual estimate
# so far:
if meandual < self.bestdual:
self.bestdual = meandual
self.bestparams = self.params
if self.verbose:
print "\n\t\t\tStored new minimum entropy dual: %f\n" % meandual
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
stefanv/scipy3
|
scipy/maxentropy/maxentropy.py
|
Python
|
bsd-3-clause
| 67,591
|
[
"Gaussian"
] |
4f7c9b06ff19268b92d97f3b356b491510d35b3000fffcaa3afd86a068f6e1d4
|
#pylint: disable=C0111
#pylint: disable=W0621
from lettuce import world, step
from django.contrib.auth.models import User
from lettuce.django import django_url
from student.models import CourseEnrollment
from common import course_id, course_location
from problems_setup import PROBLEM_DICT
TEST_SECTION_NAME = 'Test Section'
TEST_SUBSECTION_NAME = 'Test Subsection'
@step(u'I am viewing a course with multiple sections')
def view_course_multiple_sections(step):
create_course()
# Add a section to the course to contain problems
section1 = world.ItemFactory.create(parent_location=course_location(world.scenario_dict['COURSE'].number),
display_name=section_name(1))
# Add a section to the course to contain problems
section2 = world.ItemFactory.create(parent_location=course_location(world.scenario_dict['COURSE'].number),
display_name=section_name(2))
place1 = world.ItemFactory.create(parent_location=section1.location,
category='sequential',
display_name=subsection_name(1))
place2 = world.ItemFactory.create(parent_location=section2.location,
category='sequential',
display_name=subsection_name(2))
add_problem_to_course_section('model_course', 'multiple choice', place1.location)
add_problem_to_course_section('model_course', 'drop down', place2.location)
create_user_and_visit_course()
@step(u'I am viewing a section with multiple subsections')
def view_course_multiple_subsections(step):
create_course()
# Add a section to the course to contain problems
section1 = world.ItemFactory.create(parent_location=course_location(world.scenario_dict['COURSE'].number),
display_name=section_name(1))
place1 = world.ItemFactory.create(parent_location=section1.location,
category='sequential',
display_name=subsection_name(1))
place2 = world.ItemFactory.create(parent_location=section1.location,
display_name=subsection_name(2))
add_problem_to_course_section('model_course', 'multiple choice', place1.location)
add_problem_to_course_section('model_course', 'drop down', place2.location)
create_user_and_visit_course()
@step(u'I am viewing a section with multiple sequences')
def view_course_multiple_sequences(step):
create_course()
# Add a section to the course to contain problems
section1 = world.ItemFactory.create(parent_location=course_location(world.scenario_dict['COURSE'].number),
display_name=section_name(1))
place1 = world.ItemFactory.create(parent_location=section1.location,
category='sequential',
display_name=subsection_name(1))
add_problem_to_course_section('model_course', 'multiple choice', place1.location)
add_problem_to_course_section('model_course', 'drop down', place1.location)
create_user_and_visit_course()
@step(u'I click on section "([^"]*)"$')
def click_on_section(step, section):
section_css = 'h3[tabindex="-1"]'
world.css_click(section_css)
subid = "ui-accordion-accordion-panel-{}".format(str(int(section) - 1))
subsection_css = "ul.ui-accordion-content-active[id='{}'] > li > a".format(subid)
world.css_click(subsection_css)
@step(u'I click on subsection "([^"]*)"$')
def click_on_subsection(step, subsection):
subsection_css = 'ul[id="ui-accordion-accordion-panel-0"]> li > a'
world.css_click(subsection_css, index=(int(subsection) - 1))
@step(u'I click on sequence "([^"]*)"$')
def click_on_sequence(step, sequence):
sequence_css = 'a[data-element="%s"]' % sequence
world.css_click(sequence_css)
@step(u'I should see the content of (?:sub)?section "([^"]*)"$')
def see_section_content(step, section):
if section == "2":
text = 'The correct answer is Option 2'
elif section == "1":
text = 'The correct answer is Choice 3'
step.given('I should see "' + text + '" somewhere on the page')
@step(u'I should see the content of sequence "([^"]*)"$')
def see_sequence_content(step, sequence):
step.given('I should see the content of section "2"')
@step(u'I return later')
def return_to_course(step):
step.given('I visit the homepage')
world.click_link("View Course")
world.click_link("Courseware")
@step(u'I should see that I was most recently in section "([^"]*)"$')
def see_recent_section(step, section):
step.given('I should see "You were most recently in %s" somewhere on the page' % subsection_name(int(section)))
#####################
# HELPERS
#####################
def section_name(section):
return TEST_SECTION_NAME + str(section)
def subsection_name(section):
return TEST_SUBSECTION_NAME + str(section)
def create_course():
world.clear_courses()
world.scenario_dict['COURSE'] = world.CourseFactory.create(org='edx', number='model_course', display_name='Test Course')
def create_user_and_visit_course():
world.create_user('robot', 'test')
u = User.objects.get(username='robot')
CourseEnrollment.enroll(u, course_id(world.scenario_dict['COURSE'].number))
world.log_in(username='robot', password='test')
chapter_name = (TEST_SECTION_NAME + "1").replace(" ", "_")
section_name = (TEST_SUBSECTION_NAME + "1").replace(" ", "_")
url = django_url('/courses/edx/model_course/Test_Course/courseware/%s/%s' %
(chapter_name, section_name))
world.browser.visit(url)
def add_problem_to_course_section(course, problem_type, parent_location, extraMeta=None):
'''
Add a problem to the course we have created using factories.
'''
assert(problem_type in PROBLEM_DICT)
# Generate the problem XML using capa.tests.response_xml_factory
factory_dict = PROBLEM_DICT[problem_type]
problem_xml = factory_dict['factory'].build_xml(**factory_dict['kwargs'])
metadata = {'rerandomize': 'always'} if not 'metadata' in factory_dict else factory_dict['metadata']
if extraMeta:
metadata = dict(metadata, **extraMeta)
# Create a problem item using our generated XML
# We set rerandomize=always in the metadata so that the "Reset" button
# will appear.
world.ItemFactory.create(parent_location=parent_location,
category='problem',
display_name=str(problem_type),
data=problem_xml,
metadata=metadata)
|
syjeon/new_edx
|
lms/djangoapps/courseware/features/navigation.py
|
Python
|
agpl-3.0
| 6,853
|
[
"VisIt"
] |
db5f36a8d58f6c85d6e3907257524be6a7c9840c4154bd0d28364d69deb3a4bc
|
"""Transform a string with Python-like source code into SymPy expression. """
from __future__ import print_function, division
from .sympy_tokenize import \
generate_tokens, untokenize, TokenError, \
NUMBER, STRING, NAME, OP, ENDMARKER
from keyword import iskeyword
import ast
import re
import unicodedata
import sympy
from sympy.core.compatibility import exec_, StringIO
from sympy.core.basic import Basic, C
_re_repeated = re.compile(r"^(\d*)\.(\d*)\[(\d+)\]$")
def _token_splittable(token):
"""
Predicate for whether a token name can be split into multiple tokens.
A token is splittable if it does not contain an underscore character and
it is not the name of a Greek letter. This is used to implicitly convert
expressions like 'xyz' into 'x*y*z'.
"""
if '_' in token:
return False
else:
try:
return not unicodedata.lookup('GREEK SMALL LETTER ' + token)
except KeyError:
pass
if len(token) > 1:
return True
return False
def _token_callable(token, local_dict, global_dict, nextToken=None):
"""
Predicate for whether a token name represents a callable function.
Essentially wraps ``callable``, but looks up the token name in the
locals and globals.
"""
func = local_dict.get(token[1])
if not func:
func = global_dict.get(token[1])
return callable(func) and not isinstance(func, sympy.Symbol)
def _add_factorial_tokens(name, result):
if result == [] or result[-1][1] == '(':
raise TokenError()
beginning = [(NAME, name), (OP, '(')]
end = [(OP, ')')]
diff = 0
length = len(result)
for index, token in enumerate(result[::-1]):
toknum, tokval = token
i = length - index - 1
if tokval == ')':
diff += 1
elif tokval == '(':
diff -= 1
if diff == 0:
if i - 1 >= 0 and result[i - 1][0] == NAME:
return result[:i - 1] + beginning + result[i - 1:] + end
else:
return result[:i] + beginning + result[i:] + end
return result
class AppliedFunction(object):
"""
A group of tokens representing a function and its arguments.
`exponent` is for handling the shorthand sin^2, ln^2, etc.
"""
def __init__(self, function, args, exponent=None):
if exponent is None:
exponent = []
self.function = function
self.args = args
self.exponent = exponent
self.items = ['function', 'args', 'exponent']
def expand(self):
"""Return a list of tokens representing the function"""
result = []
result.append(self.function)
result.extend(self.args)
return result
def __getitem__(self, index):
return getattr(self, self.items[index])
def __repr__(self):
return "AppliedFunction(%s, %s, %s)" % (self.function, self.args,
self.exponent)
class ParenthesisGroup(list):
"""List of tokens representing an expression in parentheses."""
pass
def _flatten(result):
result2 = []
for tok in result:
if isinstance(tok, AppliedFunction):
result2.extend(tok.expand())
else:
result2.append(tok)
return result2
def _group_parentheses(recursor):
def _inner(tokens, local_dict, global_dict):
"""Group tokens between parentheses with ParenthesisGroup.
Also processes those tokens recursively.
"""
result = []
stacks = []
stacklevel = 0
for token in tokens:
if token[0] == OP:
if token[1] == '(':
stacks.append(ParenthesisGroup([]))
stacklevel += 1
elif token[1] == ')':
stacks[-1].append(token)
stack = stacks.pop()
if len(stacks) > 0:
# We don't recurse here since the upper-level stack
# would reprocess these tokens
stacks[-1].extend(stack)
else:
# Recurse here to handle nested parentheses
# Strip off the outer parentheses to avoid an infinite loop
inner = stack[1:-1]
inner = recursor(inner,
local_dict,
global_dict)
parenGroup = [stack[0]] + inner + [stack[-1]]
result.append(ParenthesisGroup(parenGroup))
stacklevel -= 1
continue
if stacklevel:
stacks[-1].append(token)
else:
result.append(token)
return result
return _inner
def _apply_functions(tokens, local_dict, global_dict):
"""Convert a NAME token + ParenthesisGroup into an AppliedFunction.
Note that ParenthesisGroups, if not applied to any function, are
converted back into lists of tokens.
"""
result = []
symbol = None
for tok in tokens:
if tok[0] == NAME:
symbol = tok
result.append(tok)
elif isinstance(tok, ParenthesisGroup):
if symbol and _token_callable(symbol, local_dict, global_dict):
result[-1] = AppliedFunction(symbol, tok)
symbol = None
else:
result.extend(tok)
else:
symbol = None
result.append(tok)
return result
def _implicit_multiplication(tokens, local_dict, global_dict):
"""Implicitly adds '*' tokens.
Cases:
- Two AppliedFunctions next to each other ("sin(x)cos(x)")
- AppliedFunction next to an open parenthesis ("sin x (cos x + 1)")
- A close parenthesis next to an AppliedFunction ("(x+2)sin x")\
- A close parenthesis next to an open parenthesis ("(x+2)(x+3)")
- AppliedFunction next to an implicitly applied function ("sin(x)cos x")
"""
result = []
for tok, nextTok in zip(tokens, tokens[1:]):
result.append(tok)
if (isinstance(tok, AppliedFunction) and
isinstance(nextTok, AppliedFunction)):
result.append((OP, '*'))
elif (isinstance(tok, AppliedFunction) and
nextTok[0] == OP and nextTok[1] == '('):
# Applied function followed by an open parenthesis
result.append((OP, '*'))
elif (tok[0] == OP and tok[1] == ')' and
isinstance(nextTok, AppliedFunction)):
# Close parenthesis followed by an applied function
result.append((OP, '*'))
elif (tok[0] == OP and tok[1] == ')' and
nextTok[0] == NAME):
# Close parenthesis followed by an implicitly applied function
result.append((OP, '*'))
elif (tok[0] == nextTok[0] == OP
and tok[1] == ')' and nextTok[1] == '('):
# Close parenthesis followed by an open parenthesis
result.append((OP, '*'))
elif (isinstance(tok, AppliedFunction) and nextTok[0] == NAME):
# Applied function followed by implicitly applied function
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
nextTok[0] == OP and nextTok[1] == '('):
# Constant followed by parenthesis
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
nextTok[0] == NAME and
not _token_callable(nextTok, local_dict, global_dict)):
# Constant followed by constant
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
(isinstance(nextTok, AppliedFunction) or nextTok[0] == NAME)):
# Constant followed by (implicitly applied) function
result.append((OP, '*'))
if tokens:
result.append(tokens[-1])
return result
def _implicit_application(tokens, local_dict, global_dict):
"""Adds parentheses as needed after functions."""
result = []
appendParen = 0 # number of closing parentheses to add
skip = 0 # number of tokens to delay before adding a ')' (to
# capture **, ^, etc.)
exponentSkip = False # skipping tokens before inserting parentheses to
# work with function exponentiation
for tok, nextTok in zip(tokens, tokens[1:]):
result.append(tok)
if (tok[0] == NAME and
nextTok[0] != OP and
nextTok[0] != ENDMARKER):
if _token_callable(tok, local_dict, global_dict, nextTok):
result.append((OP, '('))
appendParen += 1
# name followed by exponent - function exponentiation
elif (tok[0] == NAME and nextTok[0] == OP and nextTok[1] == '**'):
if _token_callable(tok, local_dict, global_dict):
exponentSkip = True
elif exponentSkip:
# if the last token added was an applied function (i.e. the
# power of the function exponent) OR a multiplication (as
# implicit multiplication would have added an extraneous
# multiplication)
if (isinstance(tok, AppliedFunction)
or (tok[0] == OP and tok[1] == '*')):
# don't add anything if the next token is a multiplication
# or if there's already a parenthesis (if parenthesis, still
# stop skipping tokens)
if not (nextTok[0] == OP and nextTok[1] == '*'):
if not(nextTok[0] == OP and nextTok[1] == '('):
result.append((OP, '('))
appendParen += 1
exponentSkip = False
elif appendParen:
if nextTok[0] == OP and nextTok[1] in ('^', '**', '*'):
skip = 1
continue
if skip:
skip -= 1
continue
result.append((OP, ')'))
appendParen -= 1
if tokens:
result.append(tokens[-1])
if appendParen:
result.extend([(OP, ')')] * appendParen)
return result
def function_exponentiation(tokens, local_dict, global_dict):
"""Allows functions to be exponentiated, e.g. ``cos**2(x)``.
Example:
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, function_exponentiation)
>>> transformations = standard_transformations + (function_exponentiation,)
>>> parse_expr('sin**4(x)', transformations=transformations)
sin(x)**4
"""
result = []
exponent = []
consuming_exponent = False
level = 0
for tok, nextTok in zip(tokens, tokens[1:]):
if tok[0] == NAME and nextTok[0] == OP and nextTok[1] == '**':
if _token_callable(tok, local_dict, global_dict):
consuming_exponent = True
elif consuming_exponent:
exponent.append(tok)
# only want to stop after hitting )
if tok[0] == nextTok[0] == OP and tok[1] == ')' and nextTok[1] == '(':
consuming_exponent = False
# if implicit multiplication was used, we may have )*( instead
if tok[0] == nextTok[0] == OP and tok[1] == '*' and nextTok[1] == '(':
consuming_exponent = False
del exponent[-1]
continue
elif exponent and not consuming_exponent:
if tok[0] == OP:
if tok[1] == '(':
level += 1
elif tok[1] == ')':
level -= 1
if level == 0:
result.append(tok)
result.extend(exponent)
exponent = []
continue
result.append(tok)
if tokens:
result.append(tokens[-1])
if exponent:
result.extend(exponent)
return result
def split_symbols_custom(predicate):
"""Creates a transformation that splits symbol names.
``predicate`` should return True if the symbol name is to be split.
For instance, to retain the default behavior but avoid splitting certain
symbol names, a predicate like this would work:
>>> from sympy.parsing.sympy_parser import (parse_expr, _token_splittable,
... standard_transformations, implicit_multiplication,
... split_symbols_custom)
>>> def can_split(symbol):
... if symbol not in ('list', 'of', 'unsplittable', 'names'):
... return _token_splittable(symbol)
... return False
...
>>> transformation = split_symbols_custom(can_split)
>>> parse_expr('unsplittable', transformations=standard_transformations +
... (transformation, implicit_multiplication))
unsplittable
"""
def _split_symbols(tokens, local_dict, global_dict):
result = []
split = False
for tok in tokens:
if tok[0] == NAME and tok[1] == 'Symbol':
split = True
elif split and tok[0] == NAME:
symbol = tok[1][1:-1]
if predicate(symbol):
for char in symbol:
if char in local_dict or char in global_dict:
# Get rid of the call to Symbol
del result[-2:]
result.extend([(OP, '('), (NAME, "%s" % char), (OP, ')'),
(NAME, 'Symbol'), (OP, '(')])
else:
result.extend([(NAME, "'%s'" % char), (OP, ')'),
(NAME, 'Symbol'), (OP, '(')])
# Delete the last three tokens: get rid of the extraneous
# Symbol( we just added, and also get rid of the last )
# because the closing parenthesis of the original Symbol is
# still there
del result[-3:]
split = False
continue
else:
split = False
result.append(tok)
return result
return _split_symbols
#: Splits symbol names for implicit multiplication.
#:
#: Intended to let expressions like ``xyz`` be parsed as ``x*y*z``. Does not
#: split Greek character names, so ``theta`` will *not* become
#: ``t*h*e*t*a``. Generally this should be used with
#: ``implicit_multiplication``.
split_symbols = split_symbols_custom(_token_splittable)
def implicit_multiplication(result, local_dict, global_dict):
"""Makes the multiplication operator optional in most cases.
Use this before :func:`implicit_application`, otherwise expressions like
``sin 2x`` will be parsed as ``x * sin(2)`` rather than ``sin(2*x)``.
Example:
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_multiplication)
>>> transformations = standard_transformations + (implicit_multiplication,)
>>> parse_expr('3 x y', transformations=transformations)
3*x*y
"""
# These are interdependent steps, so we don't expose them separately
for step in (_group_parentheses(implicit_multiplication),
_apply_functions,
_implicit_multiplication):
result = step(result, local_dict, global_dict)
result = _flatten(result)
return result
def implicit_application(result, local_dict, global_dict):
"""Makes parentheses optional in some cases for function calls.
Use this after :func:`implicit_multiplication`, otherwise expressions
like ``sin 2x`` will be parsed as ``x * sin(2)`` rather than
``sin(2*x)``.
Example:
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_application)
>>> transformations = standard_transformations + (implicit_application,)
>>> parse_expr('cot z + csc z', transformations=transformations)
cot(z) + csc(z)
"""
for step in (_group_parentheses(implicit_application),
_apply_functions,
_implicit_application,):
result = step(result, local_dict, global_dict)
result = _flatten(result)
return result
def implicit_multiplication_application(result, local_dict, global_dict):
"""Allows a slightly relaxed syntax.
- Parentheses for single-argument method calls are optional.
- Multiplication is implicit.
- Symbol names can be split (i.e. spaces are not needed between
symbols).
- Functions can be exponentiated.
Example:
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_multiplication_application)
>>> parse_expr("10sin**2 x**2 + 3xyz + tan theta",
... transformations=(standard_transformations +
... (implicit_multiplication_application,)))
3*x*y*z + 10*sin(x**2)**2 + tan(theta)
"""
for step in (split_symbols, implicit_multiplication,
implicit_application, function_exponentiation):
result = step(result, local_dict, global_dict)
return result
def auto_symbol(tokens, local_dict, global_dict):
"""Inserts calls to ``Symbol`` for undefined variables."""
result = []
prevTok = (None, None)
tokens.append((None, None)) # so zip traverses all tokens
for tok, nextTok in zip(tokens, tokens[1:]):
tokNum, tokVal = tok
nextTokNum, nextTokVal = nextTok
if tokNum == NAME:
name = tokVal
if (name in ['True', 'False', 'None']
or iskeyword(name)
or name in local_dict
# Don't convert attribute access
or (prevTok[0] == OP and prevTok[1] == '.')
# Don't convert keyword arguments
or (prevTok[0] == OP and prevTok[1] in ('(', ',')
and nextTokNum == OP and nextTokVal == '=')):
result.append((NAME, name))
continue
elif name in global_dict:
obj = global_dict[name]
if isinstance(obj, (Basic, type)) or callable(obj):
result.append((NAME, name))
continue
result.extend([
(NAME, 'Symbol'),
(OP, '('),
(NAME, repr(str(name))),
(OP, ')'),
])
else:
result.append((tokNum, tokVal))
prevTok = (tokNum, tokVal)
return result
def factorial_notation(tokens, local_dict, global_dict):
"""Allows standard notation for factorial."""
result = []
prevtoken = ''
for toknum, tokval in tokens:
if toknum == OP:
op = tokval
if op == '!!':
if prevtoken == '!' or prevtoken == '!!':
raise TokenError
result = _add_factorial_tokens('factorial2', result)
elif op == '!':
if prevtoken == '!' or prevtoken == '!!':
raise TokenError
result = _add_factorial_tokens('factorial', result)
else:
result.append((OP, op))
else:
result.append((toknum, tokval))
prevtoken = tokval
return result
def convert_xor(tokens, local_dict, global_dict):
"""Treats XOR, ``^``, as exponentiation, ``**``."""
result = []
for toknum, tokval in tokens:
if toknum == OP:
if tokval == '^':
result.append((OP, '**'))
else:
result.append((toknum, tokval))
else:
result.append((toknum, tokval))
return result
def auto_number(tokens, local_dict, global_dict):
"""Converts numeric literals to use SymPy equivalents.
Complex numbers use ``I``; integer literals use ``Integer``, float
literals use ``Float``, and repeating decimals use ``Rational``.
"""
result = []
prevtoken = ''
for toknum, tokval in tokens:
if toknum == NUMBER:
number = tokval
postfix = []
if number.endswith('j') or number.endswith('J'):
number = number[:-1]
postfix = [(OP, '*'), (NAME, 'I')]
if '.' in number or (('e' in number or 'E' in number) and
not (number.startswith('0x') or number.startswith('0X'))):
match = _re_repeated.match(number)
if match is not None:
# Clear repeating decimals, e.g. 3.4[31] -> (3 + 4/10 + 31/990)
pre, post, repetend = match.groups()
zeros = '0'*len(post)
post, repetends = [w.lstrip('0') for w in [post, repetend]]
# or else interpreted as octal
a = pre or '0'
b, c = post or '0', '1' + zeros
d, e = repetends, ('9'*len(repetend)) + zeros
seq = [
(OP, '('),
(NAME,
'Integer'), (OP, '('), (NUMBER, a), (OP, ')'),
(OP, '+'),
(NAME, 'Rational'), (OP, '('), (
NUMBER, b), (OP, ','), (NUMBER, c), (OP, ')'),
(OP, '+'),
(NAME, 'Rational'), (OP, '('), (
NUMBER, d), (OP, ','), (NUMBER, e), (OP, ')'),
(OP, ')'),
]
else:
seq = [(NAME, 'Float'), (OP, '('),
(NUMBER, repr(str(number))), (OP, ')')]
else:
seq = [(NAME, 'Integer'), (OP, '('), (
NUMBER, number), (OP, ')')]
result.extend(seq + postfix)
else:
result.append((toknum, tokval))
return result
def rationalize(tokens, local_dict, global_dict):
"""Converts floats into ``Rational``. Run AFTER ``auto_number``."""
result = []
passed_float = False
for toknum, tokval in tokens:
if toknum == NAME:
if tokval == 'Float':
passed_float = True
tokval = 'Rational'
result.append((toknum, tokval))
elif passed_float == True and toknum == NUMBER:
passed_float = False
result.append((STRING, tokval))
else:
result.append((toknum, tokval))
return result
#: Standard transformations for :func:`parse_expr`.
#: Inserts calls to :class:`Symbol`, :class:`Integer`, and other SymPy
#: datatypes and allows the use of standard factorial notation (e.g. ``x!``).
standard_transformations = (auto_symbol, auto_number, factorial_notation)
def stringify_expr(s, local_dict, global_dict, transformations):
"""
Converts the string ``s`` to Python code, in ``local_dict``
Generally, ``parse_expr`` should be used.
"""
tokens = []
input_code = StringIO(s.strip())
for toknum, tokval, _, _, _ in generate_tokens(input_code.readline):
tokens.append((toknum, tokval))
for transform in transformations:
tokens = transform(tokens, local_dict, global_dict)
return untokenize(tokens)
def eval_expr(code, local_dict, global_dict):
"""
Evaluate Python code generated by ``stringify_expr``.
Generally, ``parse_expr`` should be used.
"""
expr = eval(
code, global_dict, local_dict) # take local objects in preference
return expr
def parse_expr(s, local_dict=None, transformations=standard_transformations,
global_dict=None, evaluate=True):
"""Converts the string ``s`` to a SymPy expression, in ``local_dict``
Parameters
==========
s : str
The string to parse.
local_dict : dict, optional
A dictionary of local variables to use when parsing.
global_dict : dict, optional
A dictionary of global variables. By default, this is initialized
with ``from sympy import *``; provide this parameter to override
this behavior (for instance, to parse ``"Q & S"``).
transformations : tuple, optional
A tuple of transformation functions used to modify the tokens of the
parsed expression before evaluation. The default transformations
convert numeric literals into their SymPy equivalents, convert
undefined variables into SymPy symbols, and allow the use of standard
mathematical factorial notation (e.g. ``x!``).
Examples
========
>>> from sympy.parsing.sympy_parser import parse_expr
>>> parse_expr("1/2")
1/2
>>> type(_)
<class 'sympy.core.numbers.Half'>
>>> from sympy.parsing.sympy_parser import standard_transformations,\\
... implicit_multiplication_application
>>> transformations = (standard_transformations +
... (implicit_multiplication_application,))
>>> parse_expr("2x", transformations=transformations)
2*x
See Also
========
stringify_expr, eval_expr, standard_transformations,
implicit_multiplication_application
"""
if local_dict is None:
local_dict = {}
if global_dict is None:
global_dict = {}
exec_('from sympy import *', global_dict)
code = stringify_expr(s, local_dict, global_dict, transformations)
if evaluate is False:
code = compile(evaluateFalse(code), '<string>', 'eval')
return eval_expr(code, local_dict, global_dict)
def evaluateFalse(s):
"""
Replaces operators with the SymPy equivalent and sets evaluate=False.
"""
node = ast.parse(s)
node = EvaluateFalseTransformer().visit(node)
# node is a Module, we want an Expression
node = ast.Expression(node.body[0].value)
return ast.fix_missing_locations(node)
class EvaluateFalseTransformer(ast.NodeTransformer):
operators = {
ast.Add: 'Add',
ast.Mult: 'Mul',
ast.Pow: 'Pow',
ast.Sub: 'Add',
ast.Div: 'Mul',
ast.BitOr: 'Or',
ast.BitAnd: 'And',
ast.BitXor: 'Not',
}
def flatten(self, args, func):
result = []
for arg in args:
if isinstance(arg, ast.Call) and arg.func.id == func:
result.extend(self.flatten(arg.args, func))
else:
result.append(arg)
return result
def visit_BinOp(self, node):
if node.op.__class__ in self.operators:
sympy_class = self.operators[node.op.__class__]
right = self.visit(node.right)
if isinstance(node.op, ast.Sub):
right = ast.UnaryOp(op=ast.USub(), operand=right)
elif isinstance(node.op, ast.Div):
right = ast.Call(
func=ast.Name(id='Pow', ctx=ast.Load()),
args=[right, ast.UnaryOp(op=ast.USub(), operand=ast.Num(1))],
keywords=[ast.keyword(arg='evaluate', value=ast.Name(id='False', ctx=ast.Load()))],
starargs=None,
kwargs=None
)
new_node = ast.Call(
func=ast.Name(id=sympy_class, ctx=ast.Load()),
args=[self.visit(node.left), right],
keywords=[ast.keyword(arg='evaluate', value=ast.Name(id='False', ctx=ast.Load()))],
starargs=None,
kwargs=None
)
if sympy_class in ('Add', 'Mul'):
# Denest Add or Mul as appropriate
new_node.args = self.flatten(new_node.args, sympy_class)
return new_node
return node
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/sympy/parsing/sympy_parser.py
|
Python
|
gpl-3.0
| 27,927
|
[
"VisIt"
] |
5a134f7e5f72ad3612abba19102e951365535d9196e47cb5c93ceab130f8695d
|
# Copyright 2013 Cloudbase Solutions SRL
# Copyright 2013 Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
import time
from hyperv.common.i18n import _, _LE, _LI # noqa
from hyperv.common import log as logging
from hyperv.neutron import constants
from hyperv.neutron import utils
from hyperv.neutron import utilsfactory
LOG = logging.getLogger(__name__)
class HyperVNeutronAgentMixin(object):
def __init__(self, conf=None):
"""Initializes local configuration of the Hyper-V Neutron Agent.
:param conf: dict or dict-like object containing the configuration
details used by this Agent. If None is specified, default
values are used instead. conf format is as follows:
{
'host': string,
'AGENT': {'polling_interval': int,
'local_network_vswitch': string,
'physical_network_vswitch_mappings': array,
'enable_metrics_collection': boolean,
'metrics_max_retries': int},
'SECURITYGROUP': {'enable_security_group': boolean}
}
For more information on the arguments, their meaning and their default
values, visit: http://docs.openstack.org/juno/config-reference/content/
networking-plugin-hyperv_agent.html
"""
super(HyperVNeutronAgentMixin, self).__init__()
self._utils = utilsfactory.get_hypervutils()
self._network_vswitch_map = {}
self._port_metric_retries = {}
self.plugin_rpc = None
conf = conf or {}
agent_conf = conf.get('AGENT', {})
security_conf = conf.get('SECURITYGROUP', {})
self._host = conf.get('host', None)
self._polling_interval = agent_conf.get('polling_interval', 2)
self._local_network_vswitch = agent_conf.get('local_network_vswitch',
'private')
self._phys_net_map = agent_conf.get(
'physical_network_vswitch_mappings', [])
self.enable_metrics_collection = agent_conf.get(
'enable_metrics_collection', False)
self._metrics_max_retries = agent_conf.get('metrics_max_retries', 100)
self.enable_security_groups = security_conf.get(
'enable_security_group', False)
self._load_physical_network_mappings(self._phys_net_map)
def _load_physical_network_mappings(self, phys_net_vswitch_mappings):
self._physical_network_mappings = collections.OrderedDict()
for mapping in phys_net_vswitch_mappings:
parts = mapping.split(':')
if len(parts) != 2:
LOG.debug('Invalid physical network mapping: %s', mapping)
else:
pattern = re.escape(parts[0].strip()).replace('\\*', '.*')
vswitch = parts[1].strip()
self._physical_network_mappings[pattern] = vswitch
def get_agent_configurations(self):
configurations = {'vswitch_mappings': self._physical_network_mappings}
return configurations
def _get_vswitch_for_physical_network(self, phys_network_name):
for pattern in self._physical_network_mappings:
if phys_network_name is None:
phys_network_name = ''
if re.match(pattern, phys_network_name):
return self._physical_network_mappings[pattern]
# Not found in the mappings, the vswitch has the same name
return phys_network_name
def _get_network_vswitch_map_by_port_id(self, port_id):
for network_id, map in self._network_vswitch_map.iteritems():
if port_id in map['ports']:
return (network_id, map)
def network_delete(self, context, network_id=None):
LOG.debug("network_delete received. "
"Deleting network %s", network_id)
# The network may not be defined on this agent
if network_id in self._network_vswitch_map:
self._reclaim_local_network(network_id)
else:
LOG.debug("Network %s not defined on agent.", network_id)
def port_delete(self, context, port_id=None):
LOG.debug("port_delete received")
self._port_unbound(port_id)
def port_update(self, context, port=None, network_type=None,
segmentation_id=None, physical_network=None):
LOG.debug("port_update received")
if self.enable_security_groups:
if 'security_groups' in port:
self.sec_groups_agent.refresh_firewall()
self._treat_vif_port(
port['id'], port['network_id'],
network_type, physical_network,
segmentation_id, port['admin_state_up'])
def _get_vswitch_name(self, network_type, physical_network):
if network_type != constants.TYPE_LOCAL:
vswitch_name = self._get_vswitch_for_physical_network(
physical_network)
else:
vswitch_name = self.local_network_vswitch
return vswitch_name
def _provision_network(self, port_id,
net_uuid, network_type,
physical_network,
segmentation_id):
LOG.info(_LI("Provisioning network %s"), net_uuid)
vswitch_name = self._get_vswitch_name(network_type, physical_network)
if network_type == constants.TYPE_VLAN:
self._utils.set_switch_external_port_trunk_vlan(
vswitch_name, segmentation_id, constants.TRUNK_ENDPOINT_MODE)
elif network_type == constants.TYPE_FLAT:
# Nothing to do
pass
elif network_type == constants.TYPE_LOCAL:
# TODO(alexpilotti): Check that the switch type is private
# or create it if not existing
pass
else:
raise utils.HyperVException(
msg=(_("Cannot provision unknown network type %(network_type)s"
" for network %(net_uuid)s") %
dict(network_type=network_type, net_uuid=net_uuid)))
map = {
'network_type': network_type,
'vswitch_name': vswitch_name,
'ports': [],
'vlan_id': segmentation_id}
self._network_vswitch_map[net_uuid] = map
def _reclaim_local_network(self, net_uuid):
LOG.info(_LI("Reclaiming local network %s"), net_uuid)
del self._network_vswitch_map[net_uuid]
def _port_bound(self, port_id,
net_uuid,
network_type,
physical_network,
segmentation_id):
LOG.debug("Binding port %s", port_id)
if net_uuid not in self._network_vswitch_map:
self._provision_network(
port_id, net_uuid, network_type,
physical_network, segmentation_id)
map = self._network_vswitch_map[net_uuid]
map['ports'].append(port_id)
self._utils.connect_vnic_to_vswitch(map['vswitch_name'], port_id)
if network_type == constants.TYPE_VLAN:
LOG.info(_LI('Binding VLAN ID %(segmentation_id)s '
'to switch port %(port_id)s'),
dict(segmentation_id=segmentation_id, port_id=port_id))
self._utils.set_vswitch_port_vlan_id(
segmentation_id,
port_id)
elif network_type == constants.TYPE_FLAT:
# Nothing to do
pass
elif network_type == constants.TYPE_LOCAL:
# Nothing to do
pass
else:
LOG.error(_LE('Unsupported network type %s'), network_type)
if self.enable_metrics_collection:
self._utils.enable_port_metrics_collection(port_id)
self._port_metric_retries[port_id] = self._metrics_max_retries
def _port_unbound(self, port_id, vnic_deleted=False):
(net_uuid, map) = self._get_network_vswitch_map_by_port_id(port_id)
if net_uuid not in self._network_vswitch_map:
LOG.info(_LI('Network %s is not avalailable on this agent'),
net_uuid)
return
LOG.debug("Unbinding port %s", port_id)
self._utils.disconnect_switch_port(port_id, vnic_deleted, True)
if not map['ports']:
self._reclaim_local_network(net_uuid)
def _port_enable_control_metrics(self):
if not self.enable_metrics_collection:
return
for port_id in self._port_metric_retries.keys():
if self._utils.can_enable_control_metrics(port_id):
self._utils.enable_control_metrics(port_id)
LOG.info(_LI('Port metrics enabled for port: %s'), port_id)
del self._port_metric_retries[port_id]
elif self._port_metric_retries[port_id] < 1:
self._utils.enable_control_metrics(port_id)
LOG.error(_LE('Port metrics raw enabling for port: %s'),
port_id)
del self._port_metric_retries[port_id]
else:
self._port_metric_retries[port_id] -= 1
def _update_ports(self, registered_ports):
ports = self._utils.get_vnic_ids()
if ports == registered_ports:
return
added = ports - registered_ports
removed = registered_ports - ports
return {'current': ports,
'added': added,
'removed': removed}
def _treat_vif_port(self, port_id, network_id, network_type,
physical_network, segmentation_id,
admin_state_up):
if self._utils.vnic_port_exists(port_id):
if admin_state_up:
self._port_bound(port_id, network_id, network_type,
physical_network, segmentation_id)
else:
self._port_unbound(port_id)
else:
LOG.debug("No port %s defined on agent.", port_id)
def _treat_devices_added(self, devices):
try:
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context,
devices,
self.agent_id)
except Exception as e:
LOG.debug("Unable to get ports details for "
"devices %(devices)s: %(e)s",
{'devices': devices, 'e': e})
# resync is needed
return True
for device_details in devices_details_list:
device = device_details['device']
LOG.info(_LI("Adding port %s"), device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: "
"%(device_details)s"),
{'device': device, 'device_details': device_details})
self._treat_vif_port(
device_details['port_id'],
device_details['network_id'],
device_details['network_type'],
device_details['physical_network'],
device_details['segmentation_id'],
device_details['admin_state_up'])
# check if security groups is enabled.
# if not, teardown the security group rules
if self.enable_security_groups:
self.sec_groups_agent.prepare_devices_filter([device])
else:
self._utils.remove_all_security_rules(
device_details['port_id'])
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
self._host)
return False
def _treat_devices_removed(self, devices):
resync = False
for device in devices:
LOG.info(_LI("Removing port %s"), device)
try:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
self._host)
except Exception as e:
LOG.debug("Removing port failed for device %(device)s: %(e)s",
dict(device=device, e=e))
resync = True
continue
self._port_unbound(device, vnic_deleted=True)
return resync
def _process_network_ports(self, port_info):
resync_a = False
resync_b = False
if 'added' in port_info:
resync_a = self._treat_devices_added(port_info['added'])
if 'removed' in port_info:
resync_b = self._treat_devices_removed(port_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def daemon_loop(self):
sync = True
ports = set()
while True:
try:
start = time.time()
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
ports.clear()
sync = False
port_info = self._update_ports(ports)
# notify plugin about port deltas
if port_info:
LOG.debug("Agent loop has new devices!")
# If treat devices fails - must resync with plugin
sync = self._process_network_ports(port_info)
ports = port_info['current']
self._port_enable_control_metrics()
except Exception:
LOG.exception(_LE("Error in agent event loop"))
sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self._polling_interval):
time.sleep(self._polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)",
{'polling_interval': self._polling_interval,
'elapsed': elapsed})
|
javacruft/networking-hyperv-debian
|
hyperv/neutron/hyperv_neutron_agent.py
|
Python
|
apache-2.0
| 14,932
|
[
"VisIt"
] |
d1fdf7849476b22d3b29392311db5712b605b97cac9fb3c735dbacf5ad1ff357
|
import sublime
import sublime_plugin
import traceback
import codecs
import os
from datetime import datetime
from threading import Thread
import signal, subprocess
import json
import webbrowser
is_sublime_text_3 = int(sublime.version()) >= 3000
if is_sublime_text_3:
from .base_command import BaseCommand
from .progress_notifier import ProgressNotifier
from .cross_platform_codecs import CrossPlaformCodecs
from .hasher import Hasher
from .gulp_version import GulpVersion
from .dir_context import Dir
from .plugins import PluginList, PluginRegistryCall
else:
from base_command import BaseCommand
from progress_notifier import ProgressNotifier
from cross_platform_codecs import CrossPlaformCodecs
from hasher import Hasher
from gulp_version import GulpVersion
from dir_context import Dir
from plugins import PluginList, PluginRegistryCall
#
# Commands
#
class GulpCommand(BaseCommand):
cache_file_name = ".sublime-gulp.cache"
log_file_name = 'sublime-gulp.log'
allowed_extensions = [".babel.js", ".js"]
def work(self):
self.gulp_files = []
self.list_gulp_files()
def list_gulp_files(self):
self.append_paths()
if len(self.gulp_files) > 0:
self.choose_file()
else:
sufix = "on:\n- %s" % "\n- ".join(self.sercheable_folders) if len(self.sercheable_folders) > 0 else ""
self.error_message("gulpfile not found %s" % sufix)
def append_paths(self):
self.folders = []
for folder_path in self.sercheable_folders:
self.append_to_gulp_files(folder_path)
for inner_folder in self.settings.get("gulpfile_paths", []):
self.append_to_gulp_files(os.path.join(folder_path, inner_folder))
def append_to_gulp_files(self, folder_path):
gulpfile_path = self.get_gulpfile_path(folder_path)
self.folders.append(folder_path)
if os.path.exists(gulpfile_path):
self.gulp_files.append(gulpfile_path)
def choose_file(self):
if len(self.gulp_files) == 1:
self.show_tasks_from_gulp_file(0)
else:
self.show_quick_panel(self.gulp_files, self.show_tasks_from_gulp_file)
def show_tasks_from_gulp_file(self, file_index):
if file_index > -1:
self.working_dir = os.path.dirname(self.gulp_files[file_index])
if self.task_name is not None:
self.run_gulp_task()
else:
self.defer(self.show_tasks)
def show_tasks(self):
self.tasks = self.list_tasks()
if self.tasks is not None:
self.show_quick_panel(self.tasks, self.task_list_callback)
def list_tasks(self):
try:
self.callcount = 0
json_result = self.fetch_json()
except TypeError as e:
self.error_message("Could not read available tasks.\nMaybe the JSON cache (.sublime-gulp.cache) is malformed?")
except Exception as e:
print(traceback.format_exc())
self.error_message(str(e))
else:
tasks = [[name, self.dependencies_text(task)] for name, task in json_result.items()]
return sorted(tasks, key = lambda task: task)
def dependencies_text(self, task):
return "Dependencies: " + task['dependencies'] if task['dependencies'] else ""
def fetch_json(self):
jsonfilename = os.path.join(self.working_dir, GulpCommand.cache_file_name)
gulpfile = self.get_gulpfile_path(self.working_dir)
data = None
if os.path.exists(jsonfilename):
filesha1 = Hasher.sha1(gulpfile)
json_data = codecs.open(jsonfilename, "r", "utf-8", errors='replace')
try:
data = json.load(json_data)
if gulpfile in data and data[gulpfile]["sha1"] == filesha1:
return data[gulpfile]["tasks"]
finally:
json_data.close()
self.callcount += 1
if self.callcount == 1:
return self.write_to_cache()
if data is None:
raise Exception("Could not write to cache gulpfile.")
raise Exception("Sha1 from gulp cache ({0}) is not equal to calculated ({1}).\nTry erasing the cache and running Gulp again.".format(data[gulpfile]["sha1"], filesha1))
def write_to_cache(self):
package_path = os.path.join(sublime.packages_path(), self.package_name)
process = CrossPlatformProcess(self)
(stdout, stderr) = process.run_sync(r'node "%s/write_tasks_to_cache.js"' % package_path)
if process.failed:
try:
self.write_to_cache_without_js()
except:
if process.returncode() == 127:
raise Exception("\"node\" command not found.\nPlease be sure to have nodejs installed on your system and in your PATH (more info in the README).")
elif stderr:
self.log_errors(stderr)
raise Exception("There was an error running gulp, make sure gulp is running correctly in your project.\nFor more info check the sublime-gulp.log file")
return self.fetch_json()
def write_to_cache_without_js(self):
process = CrossPlatformProcess(self)
(stdout, stderr) = process.run_sync(r'gulp -v')
if process.failed or not GulpVersion(stdout).supports_tasks_simple():
raise Exception("Gulp: Could not get the current gulp version or your gulp CLI version is lower than 3.7.0")
(stdout, stderr) = process.run_sync(r'gulp --tasks-simple')
gulpfile = self.get_gulpfile_path(self.working_dir)
if not stdout:
raise Exception("Gulp: The result of `gulp --tasks-simple` was empty")
self.write_cache_file({
gulpfile: {
"sha1": Hasher.sha1(gulpfile),
"tasks": dict((task, { "name": task, "dependencies": "" }) for task in stdout.split("\n") if task)
}
})
def write_cache_file(self, cache):
cache_path = os.path.join(self.working_dir, GulpCommand.cache_file_name)
with codecs.open(cache_path, "w", "utf-8", errors='replace') as cache_file:
json_cache = json.dumps(cache, ensure_ascii=False)
cache_file.write(json_cache)
def get_gulpfile_path(self, base_path):
for extension in GulpCommand.allowed_extensions:
gulpfile_path = os.path.join(base_path, "gulpfile" + extension)
if os.path.exists(gulpfile_path):
return gulpfile_path
return gulpfile_path
def log_errors(self, text):
if not self.settings.get("log_errors", True):
return
log_path = os.path.join(self.working_dir, GulpCommand.log_file_name)
header = "Remember that you can report errors and get help in https://github.com/NicoSantangelo/sublime-gulp" if not os.path.isfile(log_path) else ""
timestamp = str(datetime.now().strftime("%m-%d-%Y %H:%M"))
with codecs.open(log_path, "a", "utf-8", errors='replace') as log_file:
log_file.write(header + "\n\n" + timestamp + ":\n" + text)
def task_list_callback(self, task_index):
if task_index > -1:
self.task_name = self.tasks[task_index][0]
self.task_flag = self.get_flag_from_task_name()
self.run_gulp_task()
def run_gulp_task(self):
task = self.construct_gulp_task()
Thread(target = self.run_process, args = (task, )).start()
def construct_gulp_task(self):
f = open('sublime-gulp-last.cache', 'w' )
f.write( self.task_name + ':' + self.working_dir )
f.close()
self.show_running_status_in_output_panel()
return r"gulp %s %s" % (self.task_name, self.task_flag)
def run_process(self, task):
process = CrossPlatformProcess(self)
process.run(task)
stdout, stderr = process.communicate(self.append_to_output_view_in_main_thread)
self.defer_sync(lambda: self.finish(stdout, stderr))
def finish(self, stdout, stderr):
finish_message = "gulp %s %s finished %s" % (self.task_name, self.task_flag, "with some errors." if stderr else "!")
self.status_message(finish_message)
if not self.silent:
self.set_output_close_on_timeout()
elif stderr and self.settings.get("show_silent_errors", False):
self.silent = False
self.show_running_status_in_output_panel()
self.append_to_output_view(stdout)
self.append_to_output_view(stderr)
self.silent = True
def show_running_status_in_output_panel(self):
with_flag_text = (' with %s' % self.task_flag) if self.task_flag else ''
self.show_output_panel("Running '%s'%s...\n" % (self.task_name, with_flag_text))
class GulpArbitraryCommand(GulpCommand):
def show_tasks_from_gulp_file(self, file_index):
if file_index > -1:
self.working_dir = os.path.dirname(self.gulp_files[file_index])
self.show_input_panel(caption="gulp", on_done=self.after_task_input)
def after_task_input(self, task_name=None):
if task_name:
self.task_name = task_name
self.task_flag = ''
self.run_gulp_task()
class GulpLastCommand(GulpCommand):
def work(self):
f = open('sublime-gulp-last.cache', 'r' )
last = f.read()
f.close()
last = last.split(':')
if len(last) == 2:
self.task_name = last[0]
self.working_dir = last[1]
self.task_flag = ''
self.run_gulp_task()
else:
self.error_message('No task found.')
class GulpKillCommand(BaseCommand):
def work(self):
if ProcessCache.empty():
self.status_message("There are no running tasks")
else:
self.show_output_panel("\nFinishing the following running tasks:\n")
ProcessCache.each(lambda process: self.append_to_output_view("$ %s\n" % process.last_command.rstrip()))
ProcessCache.kill_all()
self.append_to_output_view("\nAll running tasks killed!\n")
class GulpShowPanelCommand(BaseCommand):
def work(self):
self.show_panel()
class GulpHidePanelCommand(BaseCommand):
def work(self):
self.close_panel()
class GulpPluginsCommand(BaseCommand):
def work(self):
self.plugins = None
self.request_plugin_list()
def request_plugin_list(self):
progress = ProgressNotifier("Gulp: Working")
thread = PluginRegistryCall()
thread.start()
self.handle_thread(thread, progress)
def handle_thread(self, thread, progress):
if thread.is_alive() and not thread.error:
sublime.set_timeout(lambda: self.handle_thread(thread, progress), 100)
else:
progress.stop()
if thread.result:
plugin_response = json.loads(thread.result.decode('utf-8'))
self.plugins = PluginList(plugin_response)
self.show_quick_panel(self.plugins.quick_panel_list(), self.open_in_browser, font = 0)
else:
self.error_message(self.error_text_for(thread))
def error_text_for(self, thread):
error_tuple = (
"The plugin repository seems to be down.",
"If http://gulpjs.com/plugins is working, please report this issue at the Sublime Gulp repo (https://github.com/NicoSantangelo/sublime-gulp).",
"Thanks!",
thread.error
)
return "\n\n%s\n\n%s\n\n%s\n\n%s" % error_tuple
def open_in_browser(self, index=-1):
if index >= 0 and index < self.plugins.length:
webbrowser.open_new(self.plugins.get(index).get('homepage'))
class GulpDeleteCacheCommand(GulpCommand):
def choose_file(self):
if len(self.gulp_files) == 1:
self.delete_cache(0)
else:
self.show_quick_panel(self.gulp_files, self.delete_cache)
def delete_cache(self, file_index):
if file_index > -1:
self.working_dir = os.path.dirname(self.gulp_files[file_index])
try:
jsonfilename = os.path.join(self.working_dir, GulpCommand.cache_file_name)
if os.path.exists(jsonfilename):
os.remove(jsonfilename)
self.status_message('Cache removed successfully')
except Exception as e:
self.status_message("Could not remove cache: %s" % str(e))
class GulpExitCommand(sublime_plugin.WindowCommand):
def run(self):
try:
self.window.run_command("gulp_kill")
finally:
self.window.run_command("exit")
#
# General purpose Classes.
#
class CrossPlatformProcess():
def __init__(self, sublime_command):
self.working_dir = sublime_command.working_dir
self.nonblocking = sublime_command.nonblocking
self.path = Env.get_path(sublime_command.exec_args)
self.last_command = ""
self.failed = False
def run(self, command):
with Dir.cd(self.working_dir):
self.process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.path, shell=True, preexec_fn=self._preexec_val())
self.last_command = command
ProcessCache.add(self)
return self
def run_sync(self, command):
command = CrossPlaformCodecs.encode_process_command(command)
with Dir.cd(self.working_dir):
self.process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.path, shell=True)
(stdout, stderr) = self.process.communicate()
self.failed = self.process.returncode == 127 or stderr
return (CrossPlaformCodecs.force_decode(stdout), CrossPlaformCodecs.force_decode(stderr))
def _preexec_val(self):
return os.setsid if sublime.platform() != "windows" else None
def communicate(self, fn = lambda x:None):
stdout, stderr = self.pipe(fn)
self.process.communicate()
self.terminate()
return (stdout, stderr)
def pipe(self, fn):
streams = [self.process.stdout, self.process.stderr]
streams_text = []
if self.nonblocking:
threads = [ThreadWithResult(target=self._pipe_stream, args=(stream, fn)) for stream in streams]
[t.join() for t in threads]
streams_text = [t.result for t in threads]
else:
streams_text = [self._pipe_stream(stream, fn) for stream in streams]
return streams_text
def _pipe_stream(self, stream, fn):
output_text = ""
while True:
line = stream.readline()
if not line: break
output_line = CrossPlaformCodecs.decode_line(line)
output_text += output_line
fn(output_line)
return output_text
def terminate(self):
if self.is_alive():
self.process.terminate()
ProcessCache.remove(self)
def is_alive(self):
return self.process.poll() is None
def returncode(self):
return self.process.returncode
def kill(self):
pid = self.process.pid
if sublime.platform() == "windows":
kill_process = subprocess.Popen(['taskkill', '/F', '/T', '/PID', str(pid)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
kill_process.communicate()
else:
os.killpg(pid, signal.SIGTERM)
ProcessCache.remove(self)
class ProcessCache():
_procs = []
@classmethod
def add(cls, process):
cls._procs.append(process)
@classmethod
def remove(cls, process):
if process in cls._procs:
cls._procs.remove(process)
@classmethod
def kill_all(cls):
cls.each(lambda process: process.kill())
cls.clear()
@classmethod
def each(cls, fn):
for process in cls._procs:
fn(process)
@classmethod
def empty(cls):
return len(cls._procs) == 0
@classmethod
def clear(cls):
del cls._procs[:]
class Env():
@classmethod
def get_path(self, exec_args=False):
env = os.environ.copy()
if exec_args:
path = str(exec_args.get('path', ''))
if path:
env['PATH'] = path
return env
class ThreadWithResult(Thread):
def __init__(self, target, args):
self.result = None
self.target = target
self.args = args
Thread.__init__(self)
self.start()
def run(self):
self.result = self.target(*self.args)
|
nickgzzjr/sublime-gulp
|
gulp.py
|
Python
|
mit
| 16,770
|
[
"GULP"
] |
adf62676d903a4f3ca89f90147cbd9fb03507a293152eb2e0a1304fda44ae904
|
#!/usr/bin/env python
__author__ = 'Mike McCann,Duane Edgington,Reiko Michisaki,Danelle Cline'
__copyright__ = '2017'
__license__ = 'GPL v3'
__contact__ = 'duane at mbari.org'
__doc__ = '''
Master loader for all KISS/CANON April season activities in 2017
Mike McCann, Duane Edgington, Danelle Cline
MBARI 7 April 2017
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
import datetime # needed for glider data
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir) # So that CANON is found
from CANON import CANONLoader
import timing
cl = CANONLoader('stoqs_canon_april2017', 'KISS CANON Spring 2017',
description='KISS CANON Spring 2017 Experiment in Monterey Bay',
x3dTerrains={
'https://stoqs.mbari.org/x3d/Monterey25_10x/Monterey25_10x_scene.x3d': {
'position': '-2822317.31255 -4438600.53640 3786150.85474',
'orientation': '0.89575 -0.31076 -0.31791 1.63772',
'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
'VerticalExaggeration': '10',
},
'https://stoqs.mbari.org/x3d/Monterey25_1x/Monterey25_1x_src_scene.x3d': {
'name': 'Monterey25_1x',
'position': '-2822317.31255 -4438600.53640 3786150.85474',
'orientation': '0.89575 -0.31076 -0.31791 1.63772',
'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
'VerticalExaggeration': '1',
},
},
grdTerrain=os.path.join(parentDir, 'Monterey25.grd')
)
# Set start and end dates for all loads from sources that contain data
# beyond the temporal bounds of the campaign
#
startdate = datetime.datetime(2017, 4, 7) # Fixed start. April 7, 2017
enddate = datetime.datetime(2017, 5, 15) # Fixed end. May 15, 2017
# default location of thredds and dods data:
cl.tdsBase = 'http://odss.mbari.org/thredds/'
cl.dodsBase = cl.tdsBase + 'dodsC/'
#####################################################################
# DORADO
#####################################################################
# special location for dorado data
cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2017/netcdf/'
cl.dorado_files = [
'Dorado389_2017_108_01_108_01_decim.nc',
'Dorado389_2017_121_00_121_00_decim.nc',
'Dorado389_2017_124_00_124_00_decim.nc',
]
cl.dorado_parms = [ 'temperature', 'oxygen', 'nitrate', 'bbp420', 'bbp700',
'fl700_uncorr', 'salinity', 'biolume',
'sepCountList', 'mepCountList',
'roll', 'pitch', 'yaw',
]
#####################################################################
# LRAUV
#####################################################################
# Load netCDF files produced (binned, etc.) by Danelle Cline
# These binned files are created with the makeLRAUVNetCDFs.sh script in the
# toNetCDF directory. You must first edit and run that script once to produce
# the binned files before this will work
# Use the default parameters provided by loadLRAUV() calls below
######################################################################
# GLIDERS
######################################################################
# Glider data files from CeNCOOS thredds server
# L_662
cl.l_662_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/Line67/'
cl.l_662_files = [
'OS_Glider_L_662_20170328_TS.nc' ]
cl.l_662_parms = ['TEMP', 'PSAL', 'FLU2']
cl.l_662_startDatetime = startdate
cl.l_662_endDatetime = enddate
# Glider data files from CeNCOOS thredds server
# L_662a updated parameter names in netCDF file
cl.l_662a_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/Line67/'
cl.l_662a_files = [
'OS_Glider_L_662_20170328_TS.nc' ]
cl.l_662a_parms = ['temperature', 'salinity', 'fluorescence','oxygen']
cl.l_662a_startDatetime = startdate
cl.l_662a_endDatetime = enddate
# SG_539 ## KISS glider from Caltech/JPL
cl.sg539_base = cl.dodsBase + 'Activity/canon/2017_Apr/Platforms/Gliders/SG539/'
cl.sg539_files = ['p539{:04d}.nc'.format(i) for i in range(1,291)] ## index needs to be 1 higher than terminal file name
cl.sg539_parms = ['temperature', 'salinity']
cl.sg539_startDatetime = startdate
cl.sg539_endDatetime = enddate
# SG_621 ## KISS glider from Caltech/JPL
cl.sg621_base = cl.dodsBase + 'Activity/canon/2017_Apr/Platforms/Gliders/SG621/'
cl.sg621_files = ['p621{:04d}.nc'.format(i) for i in range(1,291)] ## index needs to be 1 higher than terminal file name
cl.sg621_parms = ['temperature', 'salinity'] # 'aanderaa4330_dissolved_oxygen' throws DAPloader KeyError
cl.sg621_startDatetime = startdate
cl.sg621_endDatetime = enddate
# NPS_34a updated parameter names in netCDF file
## The following loads decimated subset of data telemetered during deployment
cl.nps34a_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/MBARI/'
cl.nps34a_files = [ 'OS_Glider_NPS_G34_20170405_TS.nc' ]
cl.nps34a_parms = ['temperature', 'salinity','fluorescence']
cl.nps34a_startDatetime = startdate
cl.nps34a_endDatetime = enddate
# Slocum Teledyne nemesis Glider
## from ioos site ## these files proved to be not compatible with python loader
## cl.slocum_nemesis_base = 'https://data.ioos.us/gliders/thredds/dodsC/deployments/mbari/Nemesis-20170412T0000/'
## cl.slocum_nemesis_files = [ 'Nemesis-20170412T0000.nc3.nc' ]
## from cencoos directory, single non-aggregated files
cl.slocum_nemesis_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/line66/nemesis_201704/'
cl.slocum_nemesis_files = [
'nemesis_20170426T233417_rt0.nc',
'nemesis_20170426T193433_rt0.nc',
'nemesis_20170426T175101_rt0.nc',
'nemesis_20170426T135031_rt0.nc',
'nemesis_20170426T101456_rt0.nc',
'nemesis_20170426T065328_rt0.nc',
'nemesis_20170426T025437_rt0.nc',
'nemesis_20170425T225257_rt0.nc',
'nemesis_20170425T181501_rt0.nc',
'nemesis_20170425T155145_rt0.nc',
'nemesis_20170425T112030_rt0.nc',
'nemesis_20170425T065720_rt0.nc',
'nemesis_20170425T023329_rt0.nc',
'nemesis_20170425T012718_rt0.nc',
'nemesis_20170424T183523_rt0.nc',
'nemesis_20170424T163853_rt0.nc',
'nemesis_20170424T101051_rt0.nc',
'nemesis_20170424T082924_rt0.nc',
'nemesis_20170424T024219_rt0.nc',
'nemesis_20170424T004146_rt0.nc',
'nemesis_20170423T183602_rt0.nc',
'nemesis_20170423T170338_rt0.nc',
'nemesis_20170423T110527_rt0.nc',
'nemesis_20170423T090902_rt0.nc',
'nemesis_20170423T022952_rt0.nc',
'nemesis_20170423T003332_rt0.nc',
'nemesis_20170422T174553_rt0.nc',
'nemesis_20170422T154625_rt0.nc',
'nemesis_20170422T100914_rt0.nc',
'nemesis_20170422T082446_rt0.nc',
'nemesis_20170422T023332_rt0.nc',
'nemesis_20170422T003714_rt0.nc',
'nemesis_20170421T191814_rt0.nc',
'nemesis_20170421T173951_rt0.nc',
'nemesis_20170421T104922_rt0.nc',
'nemesis_20170421T084951_rt0.nc',
'nemesis_20170421T020423_rt0.nc',
'nemesis_20170421T000452_rt0.nc',
'nemesis_20170420T175634_rt0.nc',
'nemesis_20170420T163615_rt0.nc',
'nemesis_20170420T125233_rt0.nc',
'nemesis_20170420T081202_rt0.nc',
'nemesis_20170420T033108_rt0.nc',
'nemesis_20170419T225941_rt0.nc',
'nemesis_20170419T183219_rt0.nc',
'nemesis_20170419T125701_rt0.nc',
'nemesis_20170419T085215_rt0.nc',
'nemesis_20170419T042720_rt0.nc',
'nemesis_20170418T234312_rt0.nc',
'nemesis_20170418T221752_rt0.nc',
'nemesis_20170418T212940_rt0.nc',
'nemesis_20170418T210333_rt0.nc',
'nemesis_20170418T194024_rt0.nc',
'nemesis_20170418T185432_rt0.nc',
'nemesis_20170418T183124_rt0.nc',
'nemesis_20170418T172154_rt0.nc',
'nemesis_20170418T164352_rt0.nc',
'nemesis_20170418T162547_rt0.nc',
'nemesis_20170418T132214_rt0.nc',
'nemesis_20170418T101901_rt0.nc',
'nemesis_20170418T054425_rt0.nc',
'nemesis_20170418T041209_rt0.nc',
'nemesis_20170417T233719_rt0.nc',
'nemesis_20170417T215856_rt0.nc',
'nemesis_20170417T184524_rt0.nc',
'nemesis_20170417T162824_rt0.nc',
'nemesis_20170417T101213_rt0.nc',
'nemesis_20170417T075255_rt0.nc',
'nemesis_20170417T042017_rt0.nc',
'nemesis_20170417T030853_rt0.nc',
'nemesis_20170417T003843_rt0.nc',
'nemesis_20170416T221424_rt0.nc',
'nemesis_20170416T193428_rt0.nc',
'nemesis_20170416T170011_rt0.nc',
'nemesis_20170416T142835_rt0.nc',
'nemesis_20170416T074059_rt0.nc',
'nemesis_20170416T062946_rt0.nc',
'nemesis_20170415T234216_rt0.nc',
'nemesis_20170415T223406_rt0.nc',
'nemesis_20170415T181901_rt0.nc',
'nemesis_20170415T142326_rt0.nc',
'nemesis_20170414T211726_rt0.nc',
'nemesis_20170414T204237_rt0.nc',
'nemesis_20170414T200204_rt0.nc',
'nemesis_20170414T191127_rt0.nc',
'nemesis_20170414T183517_rt0.nc',
'nemesis_20170414T175658_rt0.nc',
'nemesis_20170414T170838_rt0.nc',
'nemesis_20170414T163826_rt0.nc',
'nemesis_20170414T160550_rt0.nc',
'nemesis_20170414T153128_rt0.nc',
'nemesis_20170414T144546_rt0.nc',
'nemesis_20170414T141553_rt0.nc',
'nemesis_20170414T134419_rt0.nc',
'nemesis_20170414T125048_rt0.nc',
'nemesis_20170414T121126_rt0.nc',
'nemesis_20170414T113140_rt0.nc',
'nemesis_20170414T104022_rt0.nc',
'nemesis_20170414T100220_rt0.nc',
'nemesis_20170414T092320_rt0.nc',
'nemesis_20170414T083639_rt0.nc',
'nemesis_20170414T080001_rt0.nc',
'nemesis_20170414T072333_rt0.nc',
'nemesis_20170414T060450_rt0.nc',
'nemesis_20170414T052723_rt0.nc',
'nemesis_20170414T045256_rt0.nc',
'nemesis_20170414T001407_rt0.nc',
'nemesis_20170413T224113_rt0.nc',
'nemesis_20170413T175449_rt0.nc',
'nemesis_20170413T161622_rt0.nc',
'nemesis_20170413T143646_rt0.nc',
'nemesis_20170413T130648_rt0.nc',
'nemesis_20170413T112821_rt0.nc',
'nemesis_20170413T095841_rt0.nc',
'nemesis_20170413T074545_rt0.nc',
'nemesis_20170413T055613_rt0.nc',
'nemesis_20170413T040950_rt0.nc',
'nemesis_20170413T021706_rt0.nc',
'nemesis_20170413T004402_rt0.nc',
'nemesis_20170412T234033_rt0.nc',
'nemesis_20170412T223941_rt0.nc',
'nemesis_20170412T221251_rt0.nc',
'nemesis_20170412T214343_rt0.nc',
'nemesis_20170412T212116_rt0.nc',
'nemesis_20170412T205615_rt0.nc',
'nemesis_20170412T203242_rt0.nc',
'nemesis_20170412T195346_rt0.nc',
'nemesis_20170412T192201_rt0.nc',
'nemesis_20170412T182659_rt0.nc',
]
cl.slocum_nemesis_parms = [ 'temperature', 'salinity', 'u', 'v' ] #'oxygen', 'cdom', 'opbs', 'fluorescence' not populated
cl.slocum_nemesis_startDatetime = startdate
cl.slocum_nemesis_endDatetime = enddate
######################################################################
# Wavegliders
######################################################################
# WG Tex - All instruments combined into one file - one time coordinate
##cl.wg_tex_base = cl.dodsBase + 'CANON_september2013/Platforms/Gliders/WG_Tex/final/'
##cl.wg_tex_files = [ 'WG_Tex_all_final.nc' ]
##cl.wg_tex_parms = [ 'wind_dir', 'wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'density', 'bb_470', 'bb_650', 'chl' ]
##cl.wg_tex_startDatetime = startdate
##cl.wg_tex_endDatetime = enddate
# WG Tiny - All instruments combined into one file - one time coordinate
cl.wg_Tiny_base = 'http://dods.mbari.org/opendap/data/waveglider/deployment_data/'
cl.wg_Tiny_files = [
'wgTiny/20170412/QC/20170412_QC.nc',
]
cl.wg_Tiny_parms = [ 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'bb_470', 'bb_650', 'chl',
'beta_470', 'beta_650', 'pCO2_water', 'pCO2_air', 'pH', 'O2_conc' ]
cl.wg_Tiny_depths = [ 0 ]
cl.wg_Tiny_startDatetime = startdate
cl.wg_Tiny_endDatetime = enddate
# WG OA - All instruments combined into one file - one time coordinate
##cl.wg_oa_base = cl.dodsBase + 'CANON/2015_OffSeason/Platforms/Waveglider/wgOA/'
##cl.wg_oa_files = [ 'Sept_2013_OAWaveglider_final.nc' ]
##cl.wg_oa_parms = [ 'distance', 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'O2_conc',
## 'O2_sat', 'beta_470', 'bb_470', 'beta_700', 'bb_700', 'chl', 'pCO2_water', 'pCO2_air', 'pH' ]
##cl.wg_oa_startDatetime = startdate
##cl.wg_oa_endDatetime = enddate
######################################################################
# MOORINGS
######################################################################
cl.m1_base = 'http://dods.mbari.org/opendap/data/ssdsdata/deployments/m1/201608/'
cl.m1_files = [
'OS_M1_20160829hourly_CMSTV.nc'
]
cl.m1_parms = [
'eastward_sea_water_velocity_HR', 'northward_sea_water_velocity_HR',
'SEA_WATER_SALINITY_HR', 'SEA_WATER_TEMPERATURE_HR', 'SW_FLUX_HR', 'AIR_TEMPERATURE_HR',
'EASTWARD_WIND_HR', 'NORTHWARD_WIND_HR', 'WIND_SPEED_HR'
]
cl.m1_startDatetime = startdate
cl.m1_endDatetime = enddate
# Mooring 0A1
#cl.oa1_base = 'http://dods.mbari.org/opendap/data/oa_moorings/deployment_data/OA1/201401/'
#cl.oa1_files = [
# 'OA1_201401.nc'
# ]
cl.oa1_base = 'http://dods.mbari.org/opendap/data/oa_moorings/deployment_data/OA1/201607/realTime/'
cl.oa1_files = [
'OA1_201607.nc' ## new deployment
]
cl.oa1_parms = [
'wind_dir', 'avg_wind_spd', 'atm_press', 'air_temp', 'water_temp',
'sal', 'O2_conc', 'chl', 'pCO2_water', 'pCO2_air', 'pH',
]
cl.oa1_startDatetime = startdate
cl.oa1_endDatetime = enddate
# Mooring 0A2
cl.oa2_base = 'http://dods.mbari.org/opendap/data/oa_moorings/deployment_data/OA2/201609/'
cl.oa2_files = [
'realTime/OA2_201609.nc'
]
cl.oa2_parms = [
'wind_dir', 'avg_wind_spd', 'atm_press', 'air_temp', 'water_temp',
'sal', 'O2_conc', 'chl', 'pCO2_water', 'pCO2_air', 'pH',
]
cl.oa2_startDatetime = startdate
cl.oa2_endDatetime = enddate
######################################################################
# RACHEL CARSON: Jan 2017 --
######################################################################
# UCTD
cl.rcuctd_base = cl.dodsBase + 'Other/routine/Platforms/Ships/RachelCarson/uctd/'
cl.rcuctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'wetstar' ]
cl.rcuctd_files = [
# '00917plm01.nc',
# '03917plm01.nc',
]
# PCTD
cl.rcpctd_base = cl.dodsBase + 'Other/routine/Platforms/Ships/RachelCarson/pctd/'
cl.rcpctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'ecofl', 'oxygen' ]
cl.rcpctd_files = [
# '00917c01.nc', '00917c02.nc', '00917c03.nc',
# '03917c01.nc', '03917c02.nc', '03917c03.nc',
]
######################################################################
# WESTERN FLYER: Apr 2017 --
######################################################################
# UCTD
cl.wfuctd_base = cl.dodsBase + 'Other/routine/Platforms/Ships/WesternFlyer/uctd/'
cl.wfuctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'wetstar' ]
cl.wfuctd_files = [
'canon17sm01.nc',
'canon17sm02.nc',
'canon17sm03.nc',
'canon17sm04.nc',
'canon17sm05.nc',
'canon17sm06.nc',
'canon17sm07.nc',
'canon17sm08.nc',
'canon17sm09.nc',
'canon17sm10.nc',
'canon17sm11.nc',
'canon17sm12.nc',
'canon17sm13.nc',
'canon17sm14.nc',
'canon17sm1.nc',
'canon17sm15.nc', 'canon17sm16.nc', 'canon17sm17.nc',
'canon17sm18.nc',
'canon17sm19.nc', 'canon17sm20.nc', 'canon17sm21.nc',
'canon17sm22.nc',
'canon17sm23.nc', 'canon17sm24.nc', 'canon17sm25.nc',
]
# PCTD
cl.wfpctd_base = cl.dodsBase + 'Other/routine/Platforms/Ships/WesternFlyer/pctd/'
cl.wfpctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'ecofl', 'oxygen' ]
cl.wfpctd_files = [
'canon17sc01.nc',
'canon17sc03.nc',
'canon17sc04.nc',
'canon17sc05.nc',
'canon17sc06.nc',
'canon17sc07.nc',
'canon17sc08.nc',
'canon17sc09.nc',
'canon17sc10.nc',
'canon17sc11.nc',
'canon17sc12.nc',
'canon17sc13.nc',
'canon17sc14.nc',
'canon17sc15.nc', 'canon17sc16.nc', 'canon17sc17.nc',
'canon17sc18.nc', 'canon17sc19.nc', 'canon17sc20.nc',
'canon17sc21.nc', 'canon17sc22.nc', 'canon17sc23.nc',
'canon17sc24.nc', 'canon17sc25.nc', 'canon17sc26.nc',
'canon17sc27.nc', 'canon17sc28.nc',
'canon17sc29.nc', 'canon17sc30.nc', 'canon17sc31.nc', 'canon17sc32.nc',
'canon17sc33.nc', 'canon17sc34.nc', 'canon17sc35.nc', 'canon17sc36.nc',
'canon17sc37.nc', 'canon17sc38.nc',
]
###################################################################################################
# SubSample data files from /mbari/BOG_Archive/ReportsForSTOQS/
# copied to local BOG_Data/CANON_OS2107 dir
###################################################################################################
cl.subsample_csv_base = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'BOG_Data/CANON_OS2017/bctd/')
cl.subsample_csv_files = [
## 'STOQS_00917_OXY_PS.csv',
## 'STOQS_00917_CARBON_GFF.csv',
## 'STOQS_00917_CHL_1U.csv', 'STOQS_00917_FLUOR.csv',
## 'STOQS_00917_CHL_5U.csv', 'STOQS_00917_NH4.csv', 'STOQS_00917_PHAEO_1U.csv',
## 'STOQS_00917_CHLA.csv', 'STOQS_00917_O2.csv', 'STOQS_00917_PHAEO_5U.csv',
## 'STOQS_00917_CHL_GFF.csv',
## 'STOQS_00917_PHAEO_GFF.csv',
]
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.stride = 1000
elif cl.args.optimal_stride:
cl.stride = 2
else:
cl.stride = cl.args.stride
cl.loadM1()
cl.loadLRAUV('tethys', startdate, enddate)
cl.loadLRAUV('aku', startdate, enddate)
cl.loadLRAUV('ahi', startdate, enddate)
cl.loadLRAUV('opah', startdate, enddate)
cl.loadLRAUV('daphne', startdate, enddate)
##cl.loadL_662() ## not in this campaign
cl.loadL_662a()
##cl.load_NPS34() ## not in this campaign
cl.load_NPS34a()
cl.load_slocum_nemesis()
cl.load_SG621(stride=2) ## KISS glider
cl.load_SG539(stride=2) ## KISS glider
cl.load_wg_Tiny()
cl.load_oa1()
cl.load_oa2()
cl.loadDorado()
##cl.loadRCuctd() ## not in this campaign
##cl.loadRCpctd() ## not in this campaign
cl.loadWFuctd()
cl.loadWFpctd()
#cl.loadSubSamples() ## no subSamples yet...
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print("All Done.")
|
stoqs/stoqs
|
stoqs/loaders/CANON/loadCANON_april2017.py
|
Python
|
gpl-3.0
| 20,155
|
[
"NetCDF"
] |
45ef867c2d9d66e853ed39c7e233cdbd6804041bb700a8dd512d077a71198cda
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from mdtraj.testing import get_fn, eq
from mdtraj import load
def test_0():
t1 = load(get_fn('native2.xml'), top=get_fn('native2.pdb'))
t2 = load(get_fn('native2.pdb'))
t1.center_coordinates()
t2.center_coordinates()
yield lambda: eq(t1.xyz, t2.xyz)
yield lambda: eq(t1.unitcell_vectors, t2.unitcell_vectors)
|
casawa/mdtraj
|
mdtraj/tests/test_xml.py
|
Python
|
lgpl-2.1
| 1,353
|
[
"MDTraj"
] |
e3ce7a44b5bf40b3dc5caf71ee8154757a88d45b06f815f9db3df302027a4fc7
|
from ase import Atoms
from gpaw import GPAW, FermiDirac
k = 7
kT = 0
h = 0.18
a = 5.4
b = a/2
atoms = Atoms('Si2',
positions=([0, 0, 0],
[b/2, b/2, b/2]),
cell=([0, b, b],
[b, 0, b],
[b, b, 0]),
pbc=True)
calc = GPAW(kpts=(k, k, k),
setups='ah',
usesymm=None,
occupations=FermiDirac(kT),
h=h)
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('Si_AH.gpw', mode='all')
|
qsnake/gpaw
|
gpaw/test/big/dfpt/Si_AH.py
|
Python
|
gpl-3.0
| 548
|
[
"ASE",
"GPAW"
] |
e02ceabe5129f96a98e78bbae6736fc4b404bb3ea980038acc1b024f08b5eada
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import unittest
import logging
import moosetree
import mooseutils
import MooseDocs
from MooseDocs.tree import app_syntax
logging.basicConfig()
class TestSyntaxTree(unittest.TestCase):
def testRemoveDisable(self):
location = os.path.join(MooseDocs.MOOSE_DIR, 'modules', 'combined')
exe = mooseutils.find_moose_executable(location)
root = app_syntax(exe, remove=[])
node = moosetree.find(root, lambda n: n.fullpath == '/Variables/InitialCondition/BoundingBoxIC')
self.assertEqual(node.name, 'BoundingBoxIC')
def testRemove(self):
location = os.path.join(MooseDocs.MOOSE_DIR, 'modules', 'combined')
exe = mooseutils.find_moose_executable(location)
root = app_syntax(exe, remove=['/Variables/InitialCondition'])
node = moosetree.find(root, lambda n: n.fullpath == '/Variables/InitialCondition/AddICAction')
self.assertEqual(node.name, 'AddICAction')
self.assertTrue(node.removed)
node = moosetree.find(root, lambda n: n.fullpath == '/Variables/InitialCondition/BoundingBoxIC')
self.assertTrue(node.removed)
def testRemoveTestApp(self):
location = os.path.join(MooseDocs.MOOSE_DIR, 'modules', 'combined')
exe = mooseutils.find_moose_executable(location)
root = app_syntax(exe)
node = moosetree.find(root, lambda n: n.fullpath == '/UserObjects/TestDistributionPostprocessor')
self.assertTrue(node.removed)
self.assertIn('MiscTestApp', root.groups)
def testAlias(self):
location = os.path.join(MooseDocs.MOOSE_DIR, 'test')
exe = mooseutils.find_moose_executable(location)
alias = dict()
alias['/VectorPostprocessors/VolumeHistogram'] = '/VPP/VolumeHistogram'
root = app_syntax(exe, alias=alias)
node = moosetree.find(root, lambda n: n.fullpath == '/VectorPostprocessors/VolumeHistogram')
self.assertEqual(node.fullpath, '/VectorPostprocessors/VolumeHistogram')
self.assertEqual(node.alias, '/VPP/VolumeHistogram')
def testADObject(self):
location = os.path.join(MooseDocs.MOOSE_DIR, 'test')
exe = mooseutils.find_moose_executable(location)
root = app_syntax(exe)
node = moosetree.find(root, lambda n: n.fullpath == '/Kernels/ADDiffusion')
self.assertEqual(node.fullpath, '/Kernels/ADDiffusion')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
nuclear-wizard/moose
|
python/MooseDocs/test/tree/test_syntax_tree.py
|
Python
|
lgpl-2.1
| 2,774
|
[
"MOOSE"
] |
09be17eaec1052cad968a858ddeab54f2b97cb7c013e7fbde7fd953d9244b92c
|
#-------------------------------------------------------------------------------
# Name: output
# Purpose: Encapsulation of parsing/handling of data from computation
# textual output.
#
# Author: Brian Skinn
# bskinn@alum.mit.edu
#
# Created: 16 Feb 2015
# Copyright: (c) Brian Skinn 2016
# License: The MIT License; see "license.txt" for full license terms
# and contributor agreement.
#
# This file is part of opan (Open Anharmonic), a system for automated
# computation of anharmonic properties of molecular systems via wrapper
# calls to computational/quantum chemical software packages.
#
# http://www.github.com/bskinn/opan
#
#-------------------------------------------------------------------------------
""" Module implementing parsing of output data from external computations.
.. warning::
This module **will** be refactored at some point, to introduce a
superclass in the same vein as :class:`~opan.grad.SuperOpanGrad` and
:class:`~opan.hess.SuperOpanHess`. This is necessary because
automated execution of external
computation softwares will require some unified mechanism for
indicating whether a particular computation completed successfully,
independent of the
identify of the software package that was executed.
.. warning::
Further refactoring is also planned for :class:`OrcaOutput` generally.
Beware relying heavily on the behavior of this class & module.
**Superclass**
*To be implemented*
|
**Implemented Subclasses**
.. note::
Not yet actually a subclass of anything
:class:`OrcaOutput` -- Imports output files from |orca|
|
**Subclasses**
.. autoclass:: OrcaOutput
"""
# Debug constant
_DEBUG = False
class OrcaOutput(object):
""" Container for parsed textual output generated by |orca|.
All implemented results that are found in the indicated output are stored
in the :class:`OrcaOutput` instance. If a given quantity was
not detectable, it
is stored as |None| in the corresponding instance variable.
.. note::
In particular, thermochemistry from single atom/ion computations
**should work**, with |None| or zero/negligible values returned
for rotational and vibrational quantities.
The verbose contents of the output file are not generally retained within
the :class:`OrcaOutput` instance due to the potential for such to involve a
tremendously large string. Exceptions include, if present:
- THERMOCHEMISTRY section
|
**Contents**
* `Methods <OrcaOutput-Methods_>`_
* :meth:`~opan.output.OrcaOutput.__init__`
* :meth:`~opan.output.OrcaOutput.en_last`
* `Class Variables <OrcaOutput-ClassVars_>`_
* `Enumerations <OrcaOutput-ClassVars-Enums_>`_
* :class:`~opan.output.OrcaOutput.EN`
* :class:`~opan.output.OrcaOutput.SPINCONT`
* :class:`~opan.output.OrcaOutput.THERMO`
* `Regular Expression Patterns
<OrcaOutput-ClassVars-Regex_>`_
* :class:`~opan.output.OrcaOutput.p_en`
* :class:`~opan.output.OrcaOutput.p_spincont`
* :class:`~opan.output.OrcaOutput.p_thermo`
* `Instance Variables <OrcaOutput-InstVars_>`_
.. _OrcaOutput-Methods:
**Methods**
.. automethod:: __init__
.. automethod:: en_last
|
.. _OrcaOutput-ClassVars:
**Class Variables**
.. _OrcaOutput-ClassVars-Enums:
*Enumerations*
.. class:: EN
:class:`~opan.const.OpanEnum` for the energies reported
at the end of SCF cycles.
|
.. attribute:: D3
Grimme's D3BJ dispersion correction [Gri10]_. May or may not play
nicely with D3ZERO. Likely non-functional with DFT-NL
dispersion.
.. attribute:: GCP
Grimme's geometric counterpose (gCP) correction [Kru12]_
.. attribute:: OCC
COSMO outlying charge correction
.. todo:: Need COSMO reference
.. attribute:: SCFFINAL
SCF energy including gCP and D3 corrections
.. attribute:: SCFFINALOCC
:attr:`SCFFINAL` energy, but also with COSMO outlying
charge correction
.. attribute:: SCFOCC
SCF energy with only the COSMO outlying charge correction
(no dispersion or gCP corrections)
.. class:: OrcaOutput.SPINCONT
:class:`~opan.const.OpanEnum` for the spin contamination
values reported after each unrestricted SCF cycle.
|
.. attribute:: ACTUAL
Calculated :math:`\\left<S^2\\right>` expectation value
.. attribute:: DEV
Deviation of :math:`\\left<S^2\\right>` (calculated
minus ideal)
.. attribute:: IDEAL
Ideal :math:`\\left<S^2\\right>` expectation value
.. class:: OrcaOutput.THERMO
:class:`~opan.const.OpanEnum` for the quantities reported
in the THERMOCHEMISTRY block.
|
.. attribute:: BLOCK
Entire THERMOCHEMISTRY block (as |str|)
.. attribute:: E_EL
Electronic energy from the thermochemistry block, often
slightly different than the last
:attr:`EN.SCFFINAL <OrcaOutput.EN.SCFFINAL>` value
:math:`\\left(\\mathrm{E_h}\\right)`
.. attribute:: E_ROT
Thermal rotational internal energy correction
:math:`\\left(\\mathrm{E_h}\\right)`
.. attribute:: E_TRANS
Thermal translational internal energy correction
:math:`\\left(\\mathrm{E_h}\\right)`
.. attribute:: E_VIB
Thermal vibrational internal energy correction
:math:`\\left(\\mathrm{E_h}\\right)`
.. attribute:: E_ZPE
Zero-point energy correction
:math:`\\left(\\mathrm{E_h}\\right)`
.. attribute:: H_IG
Ideal-gas
:math:`\\left(k_\\mathrm{B} T\\right)`
enthalpy contribution
:math:`\\left(\\mathrm{E_h}\\right)`
.. attribute:: PRESS
Simulated pressure
:math:`\\left(\\mathrm{atm}\\right)`
.. attribute:: QROT
Rotational partition function (unitless)
.. attribute:: TEMP
Simulated temperature
:math:`\\left(\\mathrm K\\right)`
.. attribute:: TS_EL
Electronic :math:`TS` entropy contribution
:math:`\\left(\\mathrm{E_h}\\right)`
.. attribute:: TS_TRANS
Translational :math:`TS` entropy contribution
:math:`\\left(\\mathrm{E_h}\\right)`
.. attribute:: TS_VIB
Vibrational :math:`TS` entropy contribution
:math:`\\left(\\mathrm{E_h}\\right)`
.. _OrcaOutput-ClassVars-Regex:
*Regular Expression Patterns*
.. attribute:: OrcaOutput.p_en
|dict| of |re.compile| for the energies reported
at the end of SCF cycles. Keys are in :attr:`~OrcaOutput.EN`.
.. attribute:: OrcaOutput.p_spincont
|dict| of |re.compile| for the spin contamination block
values. Keys are in :attr:`~OrcaOutput.SPINCONT`.
.. attribute:: OrcaOutput.p_thermo
|dict| of |re.compile| for the quantities extracted
from the THERMOCHEMISTRY block. Keys are in
:attr:`~OrcaOutput.THERMO`.
|
.. _OrcaOutput-InstVars:
**Instance Variables**
.. attribute:: OrcaOutput.completed
|bool| --
|True| if |orca| output reports normal termination, |False| otherwise.
.. attribute:: OrcaOutput.converged
|bool| --
|True| if SCF converged ANYWHERE in run.
.. todo:: Update oo.converged with any robustifications
.. attribute:: OrcaOutput.en
|dict| of |list| of |npfloat_|--
Lists of the various energy values from the parsed output. Dict
keys are those of :attr:`EN`, above. Any energy type not found in the
output is assigned as an empty list.
.. attribute:: OrcaOutput.optimized
|bool| --
|True| if any OPT converged ANYWHERE in run. Fine for OPT,
but ambiguous for scans.
.. todo:: Update oo.optimized with any robustifications
.. attribute:: OrcaOutput.spincont
|dict| of |list| of |npfloat_|--
Lists of the various values from the spin contamination calculations
in the output, if present. Empty lists if absent. Dict keys are those
of :attr:`SPINCONT`, above.
.. attribute:: OrcaOutput.src_path
|str| --
Full path to the associated output file
.. attribute:: OrcaOutput.thermo
|dict| of |npfloat_|--
Values from the thermochemistry block of the parsed output. Dict keys
are those of :attr:`THERMO`, above.
.. attribute:: OrcaOutput.thermo_block
|str| --
Full text of the thermochemistry block, if found.
"""
# Imports
import re as _re
from .const import OpanEnum as _OpEnum
# Various class-level RegEx patterns, collected into dictionaries to
# facilitate later iterable data retrieval.
#
# IN ALL PATTERNS the group name is the same -- this is to simplify the
# parsing process when these patterns are used -- no need to dynamically
# fiddle with substituting in custom group names each time! The .replace()
# call in each pattern definition saves work if P_GROUP ever needs to be
# changed.
P_GROUP = "val"
# Patterns for SCF energies, reported at the end of single-point and each
# step of geometry optimizations, etc., if not suppressed by %output
# settings.
# String constants for retrieving energy quantities.
# Prefix is the uppercase of the Regex dictionary name
class EN(_OpEnum):
SCFFINAL = "SCFFINAL"
GCP = "GCP"
D3 = "D3"
SCFOCC = "SCFOCC"
OCC = "OCC"
SCFFINALOCC = "SCFFINALOCC"
## end class EN
# Initialize dictionary
p_en = dict()
# Final SCF energy, with gCP, D3, corrections included... but NOT COSMO
# outlying charge correction.
p_en.update({ EN.SCFFINAL :
_re.compile("""
-\\n # Hyphen on preceding line
FINAL\\ SINGLE\\ # Key text 1
POINT\\ ENERGY\\ # Key text 2
[\\ ]+(?P<>[0-9.-]+) # Energy on same line as key text
.*\\n- # Hyphen starting following line
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
# gCP corrections entering into reported FINAL ENERGY values.
p_en.update({ EN.GCP :
_re.compile("""
-\\n # Hyphen on preceding line
gCP\\ correction # Key text
[\\ ]+(?P<>[0-9.-]+) # Energy on same line as key text
.*\\n- # Hyphen starting following line.
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
# D3 corrections entering into reported FINAL ENERGY values.
p_en.update({ EN.D3 :
_re.compile("""
-\\n # Hyphen on preceding line
Dispersion\\ correction # Key text
[\\ ]+(?P<>[0-9.-]+) # Energy on same line as key text
.*\\n- # Hyphen starting following line.
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
# COSMO SCF energies after the COSMO outlying charge correction BUT BEFORE
# any other augmentations to the energy (no D3, gCP, etc.)
p_en.update({ EN.SCFOCC :
_re.compile("""
Total\\ Energy\\ after\\ # Key text 1
outlying\\ charge\\ # Key text 2
correction[\\ ]*= # Key text 3
[\\ ]+(?P<>[0-9.-]+) # Energy following key text
[\\ ]+Eh.*\\n # 'Eh' units, then newline
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
# Patterns for the entire thermochemistry block, as well as the individual
# data elements therein.
# String constants for retrieving energy quantities.
# Prefix is the uppercase of the Regex dictionary name
class THERMO(_OpEnum):
BLOCK = "BLOCK"
TEMP = "TEMP"
PRESS = "PRESS"
E_EL = "E_EL"
E_ZPE = "E_ZPE"
E_VIB = "E_VIB"
E_ROT = "E_ROT"
E_TRANS = "E_TRANS"
H_IG = "H_IG"
TS_EL = "TS_EL"
TS_VIB = "TS_VIB"
TS_TRANS = "TS_TRANS"
QROT = "QROT"
## end class THERMO
# Initialize dictionary
p_thermo = dict()
# Whole thermo block just in case; probably not needed for automated
# computation, but potentially handy for manual fiddling.
p_thermo.update({ THERMO.BLOCK :
_re.compile("""
(?P<>-+\\n # Hyphen line
THERMOCHEMISTRY\\ # Header text
AT\\ [0-9.]+\\ *K\\n # Temperature
-+\\n # Hyphen line
(.|\\n)*) # Everything until the end
Timings\\ for\\ # Closing blip 1
individual\\ modules # Closing blip 2
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
# Individual quantities. Descriptions in pattern definition comments.
p_thermo.update({ THERMO.TEMP :
_re.compile("""
temperature[\\ .]+ # Key text
(?P<>[0-9.]+) # Temperature value
[\\ ]+K # in Kelvin
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
p_thermo.update({ THERMO.PRESS :
_re.compile("""
pressure[\\ .]+ # Key text
(?P<>[0-9.]+) # Pressure value
[\\ ]+atm # in atm
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
p_thermo.update({ THERMO.E_EL :
_re.compile("""
electronic\\ energy # Key text 1
[\\ .]+ # Key text 2
(?P<>[0-9.-]+) # Electronic energy value
[\\ ]+Eh # in Hartrees
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
p_thermo.update({ THERMO.E_ZPE :
_re.compile("""
zero\\ point\\ energy # Key text 1
[\\ .]+ # Key text 2
(?P<>[0-9.-]+) # ZPE energy value
[\\ ]+Eh # in Hartrees
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
p_thermo.update({ THERMO.E_VIB :
_re.compile("""
thermal\\ vibrational\\ # Key text 1
correction[\\ .]+ # Key text 2
(?P<>[0-9.-]+) # Vibration energy correction value
[\\ ]+Eh # in Hartrees
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
p_thermo.update({ THERMO.E_ROT :
_re.compile("""
thermal\\ rotational\\ # Key text 1
correction[\\ .]+ # Key text 2
(?P<>[0-9.-]+) # Rotation energy correction value
[\\ ]+Eh # in Hartrees
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
p_thermo.update({ THERMO.E_TRANS :
_re.compile("""
thermal\\ # Key text 1
translational\\ # Key text 2
correction[\\ .]+ # Key text 3
(?P<>[0-9.-]+) # Translation energy correction value
[\\ ]+Eh # in Hartrees
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
p_thermo.update({ THERMO.H_IG :
_re.compile("""
thermal\\ enthalpy\\ # Key text 1
correction[\\ .]+ # Key text 2
(?P<>[0-9.-]+) # Ideal gas enthalpy correction
[\\ ]+Eh # in Hartrees
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
p_thermo.update({ THERMO.TS_EL :
_re.compile("""
electronic\\ entropy\\ # Key text
[\\ .]+ # Spacer
(?P<>[0-9.-]+) # Electronic entropy contribution
[\\ ]+Eh # in Hartrees
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
p_thermo.update({ THERMO.TS_VIB :
_re.compile("""
vibrational\\ entropy\\ # Key text
[\\ .]+ # Spacer
(?P<>[0-9.-]+) # Vibrational entropy contribution
[\\ ]+Eh # in Hartrees
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
p_thermo.update({ THERMO.TS_TRANS :
_re.compile("""
translational\\ # Key text 1
entropy\\ # Key text 2
[\\ .]+ # Spacer
(?P<>[0-9.-]+) # Translational entropy contribution
[\\ ]+Eh # in Hartrees
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
p_thermo.update({ THERMO.QROT :
_re.compile("""
qrot\\ +=\\ + # Key text
(?P<>[0-9.]+) # Rotational partition function
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
# Dipole moment pattern
p_dipmom = _re.compile("""
-+\\n # Hyphen line
dipole\\ moment.*\\n # Block label
-+\\n # Hyphen line
(.*\\n)+? # Lazy grab of any lines
Magnitude\ \(debye\)\ +:\ + # Line leader
(?P<>[0-9.]+).*\\n # Grab the dipole moment
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
# Patterns for the spin contamination information
# String constants for retrieving energy quantities.
# Prefix is the uppercase of the Regex dictionary name
class SPINCONT(_OpEnum):
ACTUAL = "ACTUAL"
IDEAL = "IDEAL"
DEV = "DEV"
## end class SPINCONT
# Initialize dictionary
p_spincont = dict()
# Patterns for the spin contamination information
p_spincont.update({ SPINCONT.ACTUAL :
_re.compile("""
expectation[ ]value[ ]of[ ]<S\*\*2> # Key text
[ ]+:[ ]+ # Space and separator
(?P<>[0-9.]+) # Grab the value
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
p_spincont.update({ SPINCONT.IDEAL :
_re.compile("""
ideal[ ]value[ ]s\\*\\(s\\+1\\)[ ] # Key text 1
for[ ]s=[0-9.]+ # Key text 2
[ ]+:[ ]+ # Space and separator
(?P<>[0-9.]+) # Grab the value
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
p_spincont.update({ SPINCONT.DEV :
_re.compile("""
deviation # Key text
[ ]+:[ ]+ # Space and separator
(?P<>[0-9.]+) # Grab the value
""".replace("P<", "P<" + P_GROUP), _re.I | _re.X)
})
#RESUME: Make patterns and constants for the virial block; update docstrings
## end class variables
def __init__(self, file_path):
""" Initialize :class:`OrcaOutput` object.
Imports the data found in the output file found at `file_path`.
.. warning::
THIS CLASS PRESENTLY ONLY WORKS ON A **VERY SMALL** SUBSET OF
COMPUTATION TYPES, currently HF, LDA-DFT, GGA-DFT, and mGGA-DFT.
*MAY* work on some double-hybrid or range-separated DFT.
Available data includes:
* SCF energies (incl D3BJ, gCP, COSMO outlying charge corrections)
* Thermochemistry
* Spin expectation values (actual, ideal, and deviation)
Success indicators include:
* `completed`
Checks for the 'ORCA TERMINATED NORMALLY' report at the
end of the file
* `converged`
Checks for any occurrence of successful SCF convergence
in the file (questionable for anything but single-point
calculations)
* `optimized`
Checks for any occurrence of "OPTIMIZATION HAS
CONVERGED" in the file (questionable for anything but
a standalone OPT -- i.e., not useful for a mode or
internal coordinate scan)
Parameters
----------
file_path
|str| --
Full path to the output file to be parsed.
Raises
------
~opan.error.OutputError
(various typecodes) If indicated output is un-parseably
malformed in some fashion
"""
#TODO: (?) OrcaOutput: Add initialization parameter to indicate which
# type of run should be expected?
# Imports
from .utils import pack_tups
from .utils import safe_cast as scast
from .error import OutputError
import numpy as np
# Get the output data
with open(file_path) as in_f:
datastr = in_f.read()
##end with
# Check for normal termination (weird values in dicts, etc. would be
# diagnostic also, but might as well define this since it's easy).
self.completed = datastr.find("ORCA TERMINATED NORMALLY") > -1
# Simple check for single-point SCF convergence
# TODO: Probably robustify convergence check to opt, and MDCI/MRCI/CAS
self.converged = datastr.find("SCF CONVERGED AFTER") > -1
# Simple check for optimization convergence
# TODO: Probably robustify optimization convergence check for
# scans as well as single optimizations. Multiple job runs promise
# to be thoroughly annoying.
self.optimized = datastr.find("OPTIMIZATION HAS CONVERGED") > -1
# Store the source information
self.src_path = file_path
# Initialize the energies dict as empty
self.en = dict()
# Populate with the Regex-retrieved values.
# If any are not found, this will store as an empty list.
for (k,p) in self.p_en.items():
self.en.update({ k :
[scast(m.group(self.P_GROUP), np.float_) for m in
p.finditer(datastr)] })
##next (k,p)
# Calculate just the outlying charge correction, if COSMO enabled,
# and then calculate the SCFFINAL result including the OCC.
if not self.en[self.EN.SCFOCC] == []:
self.en.update({ self.EN.OCC :
[t[0] - (t[1] - t[2] - t[3]) for t in
pack_tups(
self.en[self.EN.SCFOCC],
self.en[self.EN.SCFFINAL],
self.en[self.EN.D3] if self.en[self.EN.D3] != []
else 0,
self.en[self.EN.GCP] if self.en[self.EN.GCP] != []
else 0
)
] })
self.en.update({ self.EN.SCFFINALOCC :
[t[0] + t[1] for t in
pack_tups( # Could use zip() here, probably
self.en[self.EN.SCFFINAL],
self.en[self.EN.OCC]
)
] })
##end if
# Now collect the thermo quantities
# Just store the whole thermo block
try:
self.thermo_block = \
self.p_thermo[self.THERMO.BLOCK].search(datastr).group()
except AttributeError:
# Block not found; store as None
self.thermo_block = None
else:
# Only store the block details if the block is actually found!
# Initialize the empty dictionary for the numericals
self.thermo = dict()
# Iterate to pull the individual values
for (k,p) in self.p_thermo.items():
if k != self.THERMO.BLOCK:
try:
self.thermo.update({ k :
scast(p.search(datastr)
.group(self.P_GROUP), np.float_) })
except AttributeError:
# Value not found, probably due to monoatomic freq calc
# to autogenerate, e.g., enthalpy calculation
# Add as just None'
self.thermo.update({ k: None })
## end try
## end if
## next (k,p)
## end try
#TODO: (?) OrcaOutput: Pull the final geometry and atom masses. Would
# be nice not to require a Hessian calculation in order to have this
# info available.
# Masses and/or geometries may not always be in the output file,
# depending on the %output settings. Also have to address possible
# multiples of the coordinates in, e.g., scans.
# Pull all dipole moments
self.dipmoms = []
for m in OrcaOutput.p_dipmom.finditer(datastr):
self.dipmoms.append(scast(m.group(OrcaOutput.P_GROUP), np.float_))
## next m
# Initialize the spin contamination dict as empty
self.spincont = dict()
# Populate with the Regex-retrieved values.
# If any are not found, this will store as an empty list.
for (k,p) in self.p_spincont.items():
self.spincont.update({ k :
[scast(m.group(self.P_GROUP), np.float_) for m in
p.finditer(datastr)] })
##next (k,p)
#RESUME: Pull the virial block info (may be absent)
## end def __init__
def en_last(self):
""" Report the energies from the last SCF present in the output.
Returns a |dict| providing the various energy values from the
last SCF cycle performed in the output. Keys are those of
:attr:`~opan.output.OrcaOutput.p_en`.
Any energy value not relevant to the parsed
output is assigned as |None|.
Returns
-------
last_ens
|dict| of |npfloat_|--
Energies from the last SCF present in the output.
"""
# Initialize the return dict
last_ens = dict()
# Iterate and store
for (k,l) in self.en.items():
last_ens.update({ k : l[-1] if l != [] else None })
##next (k,l)
# Should be ready to return?
return last_ens
## end def en_last
if __name__ == '__main__':
print("Module not executable")
|
bskinn/opan
|
opan/output.py
|
Python
|
mit
| 26,931
|
[
"Brian",
"ORCA"
] |
bf5f13cc9545d8cdc7e0f88c6a08620eb19b5b3a684ef84fb03894242630c0cf
|
""" This is a test of the chain
SiteStatus -> ResourceStatusClient -> ResourceStatusDB
It supposes that the DB is present, and that the service is running
"""
# pylint: disable=invalid-name,wrong-import-position
from datetime import datetime
import unittest
import sys
import DIRAC
DIRAC.initialize() # Initialize configuration
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
Datetime = datetime.now()
testSite = "test1234.test.test"
class TestClientSiteStatusTestCase(unittest.TestCase):
def setUp(self):
self.rsClient = ResourceStatusClient()
self.stClient = SiteStatus()
self.stClient.rssFlag = True
def tearDown(self):
pass
class ClientChain(TestClientSiteStatusTestCase):
def test_addAndRemove(self):
# make sure that the test sites are not presented in the db
self.rsClient.deleteStatusElement("Site", "Status", testSite)
self.rsClient.deleteStatusElement("Site", "Status", "testActive1.test.test")
self.rsClient.deleteStatusElement("Site", "Status", "testActive.test.test")
self.rsClient.deleteStatusElement("Site", "Status", "testBanned.test.test")
# add test site
res = self.rsClient.insertStatusElement(
"Site",
"Status",
testSite,
"all",
"Active",
"Site",
"Synchronized",
Datetime,
Datetime,
"tokenOwner",
Datetime,
)
self.assertTrue(res["OK"])
self.stClient.rssCache.refreshCache()
# TEST getSites
# ...............................................................................
result = self.stClient.getSites()
self.assertTrue(result["OK"])
self.assertTrue(testSite in result["Value"])
# TEST getSiteStatuses
# ...............................................................................
result = self.stClient.getSiteStatuses([testSite])
self.assertTrue(result["OK"])
self.assertEqual(result["Value"][testSite], "Active")
# TEST getUsableSites
# ...............................................................................
result = self.stClient.getUsableSites([testSite])
self.assertTrue(result["OK"])
self.assertEqual(result["Value"][0], testSite)
# finally delete the test site
res = self.rsClient.deleteStatusElement("Site", "Status", testSite)
self.assertTrue(res["OK"])
# ...............................................................................
# adding some more test sites and more complex tests
# ...............................................................................
res = self.rsClient.insertStatusElement(
"Site",
"Status",
"testActive.test.test",
"all",
"Active",
"Site",
"Synchronized",
Datetime,
Datetime,
"tokenOwner",
Datetime,
)
self.assertTrue(res["OK"])
res = self.rsClient.insertStatusElement(
"Site",
"Status",
"testActive1.test.test",
"all",
"Active",
"Site",
"Synchronized",
Datetime,
Datetime,
"tokenOwner",
Datetime,
)
self.assertTrue(res["OK"])
res = self.rsClient.insertStatusElement(
"Site",
"Status",
"testBanned.test.test",
"all",
"Banned",
"Site",
"Synchronized",
Datetime,
Datetime,
"tokenOwner",
Datetime,
)
self.assertTrue(res["OK"])
self.stClient.rssCache.refreshCache()
# TEST getSites
# ...............................................................................
result = self.stClient.getSites()
self.assertTrue(result["OK"])
self.assertTrue("testActive1.test.test" in result["Value"])
self.assertTrue("testActive.test.test" in result["Value"])
self.assertFalse("testBanned.test.test" in result["Value"])
# TEST getSites
# ...............................................................................
result = self.stClient.getSites("All")
self.assertTrue(result["OK"])
self.assertTrue("testActive1.test.test" in result["Value"])
self.assertTrue("testActive.test.test" in result["Value"])
self.assertTrue("testBanned.test.test" in result["Value"])
# TEST getUsableSites
# ...............................................................................
result = self.stClient.getUsableSites()
self.assertTrue(result["OK"])
self.assertTrue("testActive1.test.test" in result["Value"])
self.assertTrue("testActive.test.test" in result["Value"])
# setting a status
result = self.stClient.setSiteStatus("testBanned.test.test", "Probing")
self.assertTrue(result["OK"])
self.stClient.rssCache.refreshCache()
result = self.stClient.getSites("Probing")
self.assertTrue(result["OK"])
self.assertTrue("testBanned.test.test" in result["Value"])
self.assertFalse("testActive.test.test" in result["Value"])
if __name__ == "__main__":
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestClientSiteStatusTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ClientChain))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
|
DIRACGrid/DIRAC
|
tests/Integration/ResourceStatusSystem/Test_SiteStatus.py
|
Python
|
gpl-3.0
| 5,867
|
[
"DIRAC"
] |
dde91a631335164e313f8353e96fb27e7b5630948a53987ef44f1c5a45ff8f7c
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
state = orm.State.objects.create(name='Nasarawa')
for clinic in orm.Clinic.objects.all():
clinic.lga_temp = clinic.lga
clinic.save()
lga, _ = orm.LGA.objects.get_or_create(name=clinic.lga, state=state)
def backwards(self, orm):
"Write your backwards methods here."
orm.LGA.objects.all().delete()
orm.State.objects.all().delete()
orm.Clinics.objects.all().update(lga_temp='')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clinics.clinic': {
'Meta': {'object_name': 'Clinic'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'lga_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'lga_temp': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'pbf_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'clinics.clinicscore': {
'Meta': {'object_name': 'ClinicScore'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quality': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {}),
'start_date': ('django.db.models.fields.DateField', [], {})
},
u'clinics.clinicstaff': {
'Meta': {'object_name': 'ClinicStaff'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'staff_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'year_started': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.genericfeedback': {
'Meta': {'object_name': 'GenericFeedback'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
'display_on_dashboard': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'message_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'clinics.lga': {
'Meta': {'object_name': 'LGA'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.State']"})
},
u'clinics.patient': {
'Meta': {'unique_together': "[('clinic', 'serial')]", 'object_name': 'Patient'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'clinics.region': {
'Meta': {'unique_together': "(('external_id', 'type'),)", 'object_name': 'Region'},
'alternate_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'external_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'lga'", 'max_length': '16'})
},
u'clinics.service': {
'Meta': {'object_name': 'Service'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'clinics.state': {
'Meta': {'object_name': 'State'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'clinics.visit': {
'Meta': {'object_name': 'Visit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Patient']"}),
'satisfied': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.ClinicStaff']", 'null': 'True', 'blank': 'True'}),
'survey_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'survey_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'survey_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'visit_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'welcome_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'clinics.visitregistrationerror': {
'Meta': {'object_name': 'VisitRegistrationError'},
'error_type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'clinics.visitregistrationerrorlog': {
'Meta': {'object_name': 'VisitRegistrationErrorLog'},
'error_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'message_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
}
}
complete_apps = ['clinics']
symmetrical = True
|
myvoice-nigeria/myvoice
|
myvoice/clinics/migrations/0039_initial_data_state_lga_lga2_lgatemp.py
|
Python
|
bsd-2-clause
| 13,520
|
[
"VisIt"
] |
a2e087460f89ae2127d1af3b33a921134b67c18730dd03a3225fe13b66c3a6e5
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the save_trajs function of the coordinates API by comparing
the direct, sequential retrieval of frames via mdtraj.load_frame() vs
the retrival via save_trajs
@author: gph82, clonker
"""
from __future__ import absolute_import
import unittest
import os
import shutil
import tempfile
import numpy as np
import pyemma.coordinates as coor
from pyemma.coordinates.data.util.reader_utils import single_traj_from_n_files, save_traj_w_md_load_frame, \
compare_coords_md_trajectory_objects
from pyemma.coordinates.api import save_trajs
from six.moves import range
import pkg_resources
class TestSaveTrajs(unittest.TestCase):
def setUp(self):
self.eps = 1e-10
path = pkg_resources.resource_filename(__name__, 'data') + os.path.sep
self.pdbfile = os.path.join(path, 'bpti_ca.pdb')
self.trajfiles = [os.path.join(path, 'bpti_001-033.xtc'),
os.path.join(path, 'bpti_034-066.xtc'),
os.path.join(path, 'bpti_067-100.xtc')
]
# Create random sets of files and frames to be retrieved from trajfiles
n_members_set1 = 10
n_members_set2 = 20
set_1 = np.vstack((np.random.permutation([0, 2] * n_members_set1)[:n_members_set1],
np.random.randint(32, size=n_members_set1))).T
set_2 = np.vstack((np.random.permutation([0, 2] * n_members_set2)[:n_members_set2],
np.random.randint(32, size=n_members_set2))).T
self.sets = [set_1, set_2]
self.subdir = tempfile.mkdtemp(suffix='save_trajs_test/')
# Instantiate the reader
self.reader = coor.source(self.trajfiles, top=self.pdbfile)
self.reader.chunksize = 30
self.n_pass_files = [self.subdir + 'n_pass.set_%06u.xtc' % ii for ii in range(len(self.sets))]
self.one_pass_files = [self.subdir + '1_pass.set_%06u.xtc' % ii for ii in range(len(self.sets))]
self.traj_ref = save_traj_w_md_load_frame(self.reader, self.sets)
self.strides = [2, 3, 5]
def tearDown(self):
shutil.rmtree(self.subdir, ignore_errors=True)
def test_save_SaveTrajs_IO(self):
# Test that we're saving to disk alright
flist = save_trajs(self.reader, self.sets, prefix=self.subdir)
exist = True
for f in flist:
exist = exist and os.stat(f)
self.assertTrue(exist, "Could not write to disk")
def test_save_SaveTrajs_multipass(self):
# Without the "inmemory" option, i.e. multipass
__ = save_trajs(self.reader, self.sets,
outfiles=self.n_pass_files)
# Reload the object to memory
traj_n_pass = single_traj_from_n_files(self.n_pass_files, top=self.pdbfile)
# Check for diffs
(found_diff, errmsg) = compare_coords_md_trajectory_objects(traj_n_pass, self.traj_ref, atom=0)
self.assertFalse(found_diff, errmsg)
def test_save_SaveTrajs_onepass(self):
# With the inmemory option = True
__ = save_trajs(self.reader, self.sets,
outfiles=self.one_pass_files, inmemory=True)
traj_1_pass = single_traj_from_n_files(self.one_pass_files, top=self.pdbfile)
# Check for diffs
(found_diff, errmsg) = compare_coords_md_trajectory_objects(traj_1_pass, self.traj_ref, atom=0)
self.assertFalse(found_diff, errmsg)
def test_save_SaveTrajs_onepass_with_stride(self):
# With the inmemory option = True
for stride in self.strides[:]:
# Since none of the trajfiles have more than 30 frames, the frames have to be re-drawn for every stride
sets = np.copy(self.sets)
sets[0][:, 1] = np.random.randint(0, high=30 / stride, size=np.shape(sets[0])[0])
sets[1][:, 1] = np.random.randint(0, high=30 / stride, size=np.shape(sets[1])[0])
__ = save_trajs(self.reader, sets,
outfiles=self.one_pass_files, inmemory=True, stride=stride)
traj_1_pass = single_traj_from_n_files(self.one_pass_files, top=self.pdbfile)
# Also the reference has to be re-drawn using the stride. For this, we use the re-scale the strided
# frame-indexes to the unstrided value
sets[0][:, 1] *= stride
sets[1][:, 1] *= stride
traj_ref = save_traj_w_md_load_frame(self.reader, sets)
# Check for diffs
(found_diff, errmsg) = compare_coords_md_trajectory_objects(traj_1_pass, traj_ref, atom=0)
self.assertFalse(found_diff, errmsg)
def test_save_SaveTrajs_multipass_with_stride(self):
# With the inmemory option = True
for stride in self.strides[:]:
# Since none of the trajfiles have more than 30 frames, the frames have to be re-drawn for every stride
sets = np.copy(self.sets)
sets[0][:, 1] = np.random.randint(0, high=30 / stride, size=np.shape(sets[0])[0])
sets[1][:, 1] = np.random.randint(0, high=30 / stride, size=np.shape(sets[1])[0])
__ = save_trajs(self.reader, sets,
outfiles=self.one_pass_files, inmemory=False, stride=stride)
traj_1_pass = single_traj_from_n_files(self.one_pass_files, top=self.pdbfile)
# Also the reference has to be re-drawn using the stride. For this, we use the re-scale the strided
# frame-indexes to the unstrided value
sets[0][:, 1] *= stride
sets[1][:, 1] *= stride
traj_ref = save_traj_w_md_load_frame(self.reader, sets)
# Check for diffs
(found_diff, errmsg) = compare_coords_md_trajectory_objects(traj_1_pass, traj_ref, atom=0)
self.assertFalse(found_diff, errmsg)
def test_out_of_bound_indexes(self):
# assert ValueError with index info is raised for faulty input
self.sets[0][:,1] *= 100000
with self.assertRaises(ValueError) as raised:
save_trajs(self.reader, self.sets, outfiles=self.one_pass_files)
if __name__ == "__main__":
unittest.main()
|
gph82/PyEMMA
|
pyemma/coordinates/tests/test_save_trajs.py
|
Python
|
lgpl-3.0
| 6,916
|
[
"MDTraj"
] |
d822eb1f6a9190b57c4450841199b3e89244914d740f64e8e67a613829c864c9
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 David Doukhan <doukhan@limsi.fr>
# This file is part of TimeSide.
# TimeSide is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# TimeSide is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with TimeSide. If not, see <http://www.gnu.org/licenses/>.
# Author: David Doukhan <doukhan@limsi.fr>
from timeside.core import implements, interfacedoc, _WITH_YAAFE
from timeside.core.analyzer import Analyzer, IAnalyzer
if not _WITH_YAAFE:
raise ImportError('yaafelib must be missing')
from timeside.plugins.diadems.limsi_sad import LimsiSad
from timeside.plugins.analyzer.externals.yaafe import Yaafe
import numpy as np
from pyannote.features.audio.yaafe import YaafeFrame
from pyannote.core.feature import SlidingWindowFeature
from pyannote.core import Annotation
from pyannote.algorithms.clustering.bic import BICClustering
from timeside.core.tools.parameters import HasTraits, Float, Enum
def gauss_div(data, winsize):
ret = []
for i in xrange(winsize , len(data) - winsize +1):
w1 = data[(i-winsize):i,:]
w2 = data[i:(i+winsize),:]
meandiff = np.mean(w1, axis = 0) - np.mean(w2, axis = 0)
invstdprod = 1. / (np.std(w1, axis = 0) * np.std(w2, axis = 0))
ret.append(np.sum(meandiff * meandiff * invstdprod))
return ret
def segment(data, minsize):
if len(data) == 0:
return []
am = np.argmax(data)
if am <= minsize:
ret1 = ([0] * am)
else:
ret1 = segment(data[:(am-minsize)], minsize) + ([0] * minsize)
if (am + minsize - 1) >= len(data):
ret2 = ([0] * (len(data) - am - 1))
else:
ret2 = ([0] * minsize) + segment(data[(am+minsize+1):], minsize)
return (ret1 + [1] + ret2)
class LimsiDiarization(Analyzer):
implements(IAnalyzer)
# Define Parameters
class _Param(HasTraits):
sad_model = Enum('etape', 'maya')
gdiff_win_size_sec = Float
min_seg_size_sec = Float
bic_penalty_coeff = Float
def __init__(self, sad_model='etape', gdiff_win_size_sec=5.,
min_seg_size_sec=2.5, bic_penalty_coeff=0.5):
super(LimsiDiarization, self).__init__()
self.gdiff_win_size_sec = gdiff_win_size_sec
self.min_seg_size_sec = min_seg_size_sec
self.bic_penalty_coeff = bic_penalty_coeff
sad_analyzer = LimsiSad(sad_model = sad_model)
self.sad_analyzer = sad_analyzer
self.parents['sad_analyzer'] = sad_analyzer
# feature extraction defition
feature_plan = ['mfccchop: MFCC CepsIgnoreFirstCoeff=0 blockSize=1024 stepSize=256']
self.parents['yaafe'] = Yaafe(feature_plan=feature_plan,
input_samplerate=16000)
# informative parameters
# these are not really taken into account by the system
# these are bypassed by yaafe feature plan
self.input_blocksize = 1024
self.input_stepsize = 256
@staticmethod
@interfacedoc
def id():
return "limsi_diarization"
@staticmethod
@interfacedoc
def name():
return "Limsi diarization system"
@staticmethod
@interfacedoc
def unit():
# return the unit of the data dB, St, ...
return "Speaker Id"
def process(self, frames, eod=False):
if self.input_samplerate != 16000:
raise Exception('%s requires 16000 input sample rate: %d provided' % (self.__class__.__name__, self.input_samplerate))
return frames, eod
def post_process(self):
# extract mfcc with yaafe and store them to be used with pyannote
res_yaafe = self.parents['yaafe'].results['yaafe.mfccchop']
mfcc = res_yaafe.data_object.value
sw = YaafeFrame(self.input_blocksize, self.input_stepsize,
self.input_samplerate)
pyannotefeat = SlidingWindowFeature(mfcc, sw)
# gaussian divergence window size
timestepsize = self.input_stepsize / float(self.input_samplerate)
gdiff_win_size_frame = int(self.gdiff_win_size_sec / timestepsize)
min_seg_size_frame = int(self.min_seg_size_sec / timestepsize)
# speech activity detection
sad_analyzer = self.parents['sad_analyzer']
res_sad = sad_analyzer.results['limsi_sad.sad_lhh_diff']
sadval = res_sad.data_object.value[:]
# indices of frames detected as speech
speech_threshold = 0.
frameids = [i for i, val in enumerate(sadval)
if val > speech_threshold]
# compute gaussian divergence of speech frames only
gdiff = gauss_div(mfcc[frameids, :], gdiff_win_size_frame)
# initial segmentation based on gaussian divergence criterion
seg = segment(gdiff, min_seg_size_frame)
# Convert initial segmentation to pyannote annotation
chunks = Annotation()
fbegin = None
lastframe = None
ichunk = 0
for segval, iframe in zip(seg, frameids):
if segval == 1:
if lastframe is not None:
chunks[pyannotefeat.sliding_window.rangeToSegment(fbegin, iframe-fbegin)] = str(ichunk)
ichunk += 1
fbegin= iframe
elif iframe -1 != lastframe:
if lastframe is not None:
chunks[pyannotefeat.sliding_window.rangeToSegment(fbegin, lastframe-fbegin+1)] = str(ichunk)
fbegin= iframe
lastframe = iframe
if lastframe != fbegin:
chunks[pyannotefeat.sliding_window.rangeToSegment(fbegin, lastframe-fbegin+1)] = str(ichunk)
# performs BIC clustering
bicClustering = BICClustering(covariance_type='full', penalty_coef=self.bic_penalty_coeff)
hypothesis = bicClustering(chunks, features=pyannotefeat)
# get diarisation results
tmplabel = [int(h[2]) for h in hypothesis.itertracks(True)]
tmptime = [h[0].start for h in hypothesis.itertracks()]
tmpduration = [h[0].duration for h in hypothesis.itertracks()]
# merge adjacent clusters having same labels
label = []
time = []
duration = []
lastlabel = None
for l, t, d in zip(tmplabel, tmptime, tmpduration):
if l != lastlabel:
label.append(l)
duration.append(d)
time.append(t)
else:
duration[-1] = t + d - time[-1]
lastlabel = l
# store diarisation result
diar_res = self.new_result(data_mode='label', time_mode='segment')
diar_res.id_metadata.id += '.' + 'speakers' # + name + 'diarisation'
diar_res.id_metadata.name += ' ' + 'speaker identifiers' # name + 'diarisation'
diar_res.data_object.label = label
diar_res.data_object.time = time
diar_res.data_object.duration = duration
diar_res.data_object.label_metadata.label = dict()
for lab in diar_res.data_object.label:
diar_res.data_object.label_metadata.label[lab] = str(lab)
self.add_result(diar_res)
# Generate Grapher for IRITSpeech4Hz analyzer
from timeside.core.grapher import DisplayAnalyzer
DisplayLimsiDiarization = DisplayAnalyzer.create(
analyzer=LimsiDiarization,
analyzer_parameters={'sad_model': 'etape'},
result_id='limsi_diarization.speakers',
grapher_id='grapher_limsi_diarization_speakers',
grapher_name='Speaker diarization (ETAPE)',
background='waveform',
staging=True)
DisplayLimsiDiarization = DisplayAnalyzer.create(
analyzer=LimsiDiarization,
analyzer_parameters={'sad_model': 'maya'},
result_id='limsi_diarization.speakers',
grapher_id='grapher_limsi_diarization_speakers_maya',
grapher_name='Speaker diarization (Mayan)',
background='waveform',
staging=True)
|
ANR-DIADEMS/timeside-diadems
|
timeside/plugins/diadems/limsi_diarization.py
|
Python
|
gpl-2.0
| 8,304
|
[
"Gaussian"
] |
268f0e8b4e7743a7f7d2900c4238d9e7d7851be56eadd96e22b10844a27a9cd3
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This tests the scafacos p2nfft dipolar calculations by matching against
# reference data from direct summation. In 2d, reference data from the mdlc
# test case is used
import espressomd
import espressomd.magnetostatics as magnetostatics
import espressomd.magnetostatic_extensions as magnetostatic_extensions
import numpy as np
import unittest as ut
import unittest_decorators as utx
from tests_common import abspath
@utx.skipIfMissingFeatures(["DIPOLES", "FFTW"])
class Dipolar_p3m_mdlc_p2nfft(ut.TestCase):
"""Tests mdlc (2d) as well as dipolar p3m and dipolar p2nfft (3d) against
stored data. Validity of the stored data:
2d: as long as this test AND the scafacos_dipolar_1d_2d test passes, we are safe.
3d: as long as the independently written p3m and p2nfft agree, we are safe.
"""
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
s.seed = s.cell_system.get_state()['n_nodes'] * [1234]
s.time_step = 0.01
s.cell_system.skin = .4
s.periodicity = [1, 1, 1]
s.thermostat.turn_off()
def test_mdlc(self):
s = self.s
s.part.clear()
rho = 0.3
# This is only for box size calculation. The actual particle number is
# lower, because particles are removed from the mdlc gap region
n_particle = 100
particle_radius = 0.5
box_l = pow(((4 * n_particle * np.pi) / (3 * rho)),
1.0 / 3.0) * particle_radius
s.box_l = 3 * [box_l]
f = open(abspath("data/mdlc_reference_data_energy.dat"))
ref_E = float(f.readline())
f.close()
# Particles
data = np.genfromtxt(abspath("data/mdlc_reference_data_forces_torques.dat"))
for p in data[:,:]:
s.part.add(id=int(p[0]), pos=p[1:4], dip=p[4:7])
s.part[:].rotation = (1, 1, 1)
p3m = magnetostatics.DipolarP3M(prefactor=1, mesh=32, accuracy=1E-4)
dlc = magnetostatic_extensions.DLC(maxPWerror=1E-5, gap_size=2.)
s.actors.add(p3m)
s.actors.add(dlc)
s.thermostat.turn_off()
s.integrator.run(0)
err_f = np.sum(np.linalg.norm(s.part[:].f - data[:, 7:10], axis=1)) / np.sqrt(data.shape[0])
err_t = np.sum(np.linalg.norm(s.part[:].torque_lab - data[:, 10:13], axis=1)) / np.sqrt(data.shape[0])
err_e = s.analysis.energy()["dipolar"] - ref_E
print("Energy difference", err_e)
print("Force difference", err_f)
print("Torque difference", err_t)
tol_f = 2E-3
tol_t = 2E-3
tol_e = 1E-3
self.assertLessEqual(abs(err_e), tol_e, "Energy difference too large")
self.assertLessEqual(abs(err_t), tol_t, "Torque difference too large")
self.assertLessEqual(abs(err_f), tol_f, "Force difference too large")
s.part.clear()
del s.actors[0]
del s.actors[0]
def test_p3m(self):
s = self.s
s.part.clear()
rho = 0.09
# This is only for box size calculation. The actual particle number is
# lower, because particles are removed from the mdlc gap region
n_particle = 1000
particle_radius = 1
box_l = pow(((4 * n_particle * np.pi) / (3 * rho)),
1.0 / 3.0) * particle_radius
s.box_l = 3 * [box_l]
# Particles
data = np.genfromtxt(abspath("data/p3m_magnetostatics_system.data"))
for p in data[:,:]:
s.part.add(id=int(p[0]), pos=p[1:4], dip=p[4:7])
s.part[:].rotation = (1, 1, 1)
p3m = magnetostatics.DipolarP3M(
prefactor=1, mesh=32, accuracy=1E-6, epsilon="metallic")
s.actors.add(p3m)
s.integrator.run(0)
expected = np.genfromtxt(abspath("data/p3m_magnetostatics_expected.data"))[:, 1:]
err_f = np.sum(np.linalg.norm(s.part[:].f - expected[:, 0:3], axis=1)) / np.sqrt(data.shape[0])
err_t = np.sum(np.linalg.norm(s.part[:].torque_lab - expected[:, 3:6], axis=1)) / np.sqrt(data.shape[0])
ref_E = 5.570
err_e = s.analysis.energy()["dipolar"] - ref_E
print("Energy difference", err_e)
print("Force difference", err_f)
print("Torque difference", err_t)
tol_f = 2E-3
tol_t = 2E-3
tol_e = 1E-3
self.assertLessEqual(abs(err_e), tol_e, "Energy difference too large")
self.assertLessEqual(abs(err_t), tol_t, "Torque difference too large")
self.assertLessEqual(abs(err_f), tol_f, "Force difference too large")
s.part.clear()
del s.actors[0]
@utx.skipIfMissingFeatures("SCAFACOS_DIPOLES")
def test_scafacos_dipoles(self):
s = self.s
s.part.clear()
rho = 0.09
# This is only for box size calculation. The actual particle number is
# lower, because particles are removed from the mdlc gap region
n_particle = 1000
particle_radius = 1
box_l = pow(((4 * n_particle * np.pi) / (3 * rho)),
1.0 / 3.0) * particle_radius
s.box_l = 3 * [box_l]
# Particles
data = np.genfromtxt(abspath("data/p3m_magnetostatics_system.data"))
for p in data[:,:]:
s.part.add(id=int(p[0]), pos=p[1:4], dip=p[4:7], rotation=(1, 1, 1))
scafacos = magnetostatics.Scafacos(
prefactor=1,
method_name="p2nfft",
method_params={
"p2nfft_verbose_tuning": 0,
"pnfft_N": "32,32,32",
"pnfft_n": "32,32,32",
"pnfft_window_name": "bspline",
"pnfft_m": "4",
"p2nfft_ignore_tolerance": "1",
"pnfft_diff_ik": "0",
"p2nfft_r_cut": "11",
"p2nfft_alpha": "0.31"})
s.actors.add(scafacos)
s.integrator.run(0)
expected = np.genfromtxt(abspath("data/p3m_magnetostatics_expected.data"))[:, 1:]
err_f = np.sum(np.linalg.norm(s.part[:].f - expected[:, 0:3], axis=1)) / np.sqrt(data.shape[0])
err_t = np.sum(np.linalg.norm(s.part[:].torque_lab - expected[:, 3:6], axis=1)) / np.sqrt(data.shape[0])
ref_E = 5.570
err_e = s.analysis.energy()["dipolar"] - ref_E
print("Energy difference", err_e)
print("Force difference", err_f)
print("Torque difference", err_t)
tol_f = 2E-3
tol_t = 2E-3
tol_e = 1E-3
self.assertLessEqual(abs(err_e), tol_e, "Energy difference too large")
self.assertLessEqual(abs(err_t), tol_t, "Torque difference too large")
self.assertLessEqual(abs(err_f), tol_f, "Force difference too large")
s.part.clear()
del s.actors[0]
if __name__ == "__main__":
ut.main()
|
mkuron/espresso
|
testsuite/python/dipolar_mdlc_p3m_scafacos_p2nfft.py
|
Python
|
gpl-3.0
| 7,409
|
[
"ESPResSo"
] |
8f34595fd119ccd0553beaf54cd00cc1784c851fac1c2132a37bda4097db0042
|
import re
import click
import six
from httpie.context import Environment
from httpie.core import main as httpie_main
from parsimonious.exceptions import ParseError, VisitationError
from parsimonious.grammar import Grammar
from parsimonious.nodes import NodeVisitor
from six import BytesIO
from six.moves.urllib.parse import urljoin
from .completion import ROOT_COMMANDS, ACTIONS, OPTION_NAMES, HEADER_NAMES
from .context import Context
from .utils import unescape
grammar = Grammar(r"""
command = mutation / immutation
mutation = concat_mut+ / nonconcat_mut
immutation = preview / action / help / exit / _
concat_mut = option_mut / full_quoted_mut / value_quoted_mut / unquoted_mut
nonconcat_mut = cd / rm
preview = _ tool _ (method _)? (urlpath _)? concat_mut*
action = _ method _ (urlpath _)? concat_mut*
urlpath = (~r"https?://" unquoted_string) / (!concat_mut string)
help = _ "help" _
exit = _ "exit" _
unquoted_mut = _ unquoted_mutkey mutop unquoted_mutval _
full_quoted_mut = full_squoted_mut / full_dquoted_mut
value_quoted_mut = value_squoted_mut / value_dquoted_mut
full_squoted_mut = _ "'" squoted_mutkey mutop squoted_mutval "'" _
full_dquoted_mut = _ '"' dquoted_mutkey mutop dquoted_mutval '"' _
value_squoted_mut = _ unquoted_mutkey mutop "'" squoted_mutval "'" _
value_dquoted_mut = _ unquoted_mutkey mutop '"' dquoted_mutval '"' _
mutop = ":" / "==" / "="
unquoted_mutkey = unquoted_mutkey_item+
unquoted_mutval = unquoted_stringitem*
unquoted_mutkey_item = unquoted_mutkey_char / escapeseq
unquoted_mutkey_char = ~r"[^\s'\"\\=:]"
squoted_mutkey = squoted_mutkey_item+
squoted_mutval = squoted_stringitem*
squoted_mutkey_item = squoted_mutkey_char / escapeseq
squoted_mutkey_char = ~r"[^\r\n'\\=:]"
dquoted_mutkey = dquoted_mutkey_item+
dquoted_mutval = dquoted_stringitem*
dquoted_mutkey_item = dquoted_mutkey_char / escapeseq
dquoted_mutkey_char = ~r'[^\r\n"\\=:]'
option_mut = flag_option_mut / value_option_mut
flag_option_mut = _ flag_optname _
flag_optname = "--json" / "-j" / "--form" / "-f" / "--verbose" / "-v" /
"--headers" / "-h" / "--body" / "-b" / "--stream" / "-S" /
"--download" / "-d" / "--continue" / "-c" / "--follow" /
"--check-status" / "--ignore-stdin" / "--help" /
"--version" / "--traceback" / "--debug"
value_option_mut = _ value_optname ~r"(\s+|=)" string _
value_optname = "--pretty" / "--style" / "-s" / "--print" / "-p" /
"--output" / "-o" / "--session" / "--session-read-only" /
"--auth" / "-a" / "--auth-type" / "--proxy" / "--verify" /
"--cert" / "--cert-key" / "--timeout"
cd = _ "cd" _ string _
rm = (_ "rm" _ "*" _) / (_ "rm" _ ~r"\-(h|q|b|o)" _ mutkey _)
tool = "httpie" / "curl"
method = ~r"get"i / ~r"head"i / ~r"post"i / ~r"put"i / ~r"delete"i /
~r"patch"i
mutkey = unquoted_mutkey / ("'" squoted_mutkey "'") /
('"' dquoted_mutkey '"') / flag_optname / value_optname
string = quoted_string / unquoted_string
quoted_string = ('"' dquoted_stringitem* '"') /
("'" squoted_stringitem* "'")
unquoted_string = unquoted_stringitem+
dquoted_stringitem = dquoted_stringchar / escapeseq
squoted_stringitem = squoted_stringchar / escapeseq
unquoted_stringitem = unquoted_stringchar / escapeseq
dquoted_stringchar = ~r'[^\r\n"\\]'
squoted_stringchar = ~r"[^\r\n'\\]"
unquoted_stringchar = ~r"[^\s'\"\\]"
escapeseq = ~r"\\."
_ = ~r"\s*"
""")
def urljoin2(base, path, **kwargs):
if not base.endswith('/'):
base += '/'
url = urljoin(base, path, **kwargs)
if url.endswith('/') and not path.endswith('/'):
url = url[:-1]
return url
def generate_help_text():
"""Return a formatted string listing commands, HTTPie options, and HTTP
actions.
"""
def generate_cmds_with_explanations(summary, cmds):
text = '{0}:\n'.format(summary)
for cmd, explanation in cmds:
text += '\t{0:<10}\t{1:<20}\n'.format(cmd, explanation)
return text + '\n'
text = generate_cmds_with_explanations('Commands', ROOT_COMMANDS.items())
text += generate_cmds_with_explanations('Options', OPTION_NAMES.items())
text += generate_cmds_with_explanations('Actions', ACTIONS.items())
text += generate_cmds_with_explanations('Headers', HEADER_NAMES.items())
return text
class ExecutionVisitor(NodeVisitor):
def __init__(self, context):
super(ExecutionVisitor, self).__init__()
self.context = context
self.context_override = Context(context.url)
self.method = None
self.tool = None
def visit_method(self, node, children):
self.method = node.text
return node
def visit_urlpath(self, node, children):
path = node.text
self.context_override.url = urljoin2(self.context_override.url, path)
return node
def visit_cd(self, node, children):
_, _, _, path, _ = children
self.context_override.url = urljoin2(self.context_override.url, path)
return node
def visit_rm(self, node, children):
children = children[0]
kind = children[3].text
if kind == '*':
# Clear context
for target in [self.context.headers,
self.context.querystring_params,
self.context.body_params,
self.context.options]:
target.clear()
return node
name = children[5]
if kind == '-h':
target = self.context.headers
elif kind == '-q':
target = self.context.querystring_params
elif kind == '-b':
target = self.context.body_params
else:
assert kind == '-o'
target = self.context.options
del target[name]
return node
def visit_help(self, node, children):
click.echo_via_pager(generate_help_text())
return node
def visit_exit(self, node, children):
self.context.should_exit = True
return node
def visit_mutkey(self, node, children):
if isinstance(children[0], list):
return children[0][1]
return children[0]
def _mutate(self, node, key, op, val):
if op == ':':
target = self.context_override.headers
elif op == '==':
target = self.context_override.querystring_params
elif op == '=':
target = self.context_override.body_params
target[key] = val
return node
def visit_unquoted_mut(self, node, children):
_, key, op, val, _ = children
return self._mutate(node, key, op, val)
def visit_full_squoted_mut(self, node, children):
_, _, key, op, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_full_dquoted_mut(self, node, children):
_, _, key, op, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_value_squoted_mut(self, node, children):
_, key, op, _, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_value_dquoted_mut(self, node, children):
_, key, op, _, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_unquoted_mutkey(self, node, children):
return unescape(node.text)
def visit_squoted_mutkey(self, node, children):
return node.text
def visit_dquoted_mutkey(self, node, children):
return node.text
def visit_mutop(self, node, children):
return node.text
def visit_unquoted_mutval(self, node, children):
return unescape(node.text)
def visit_squoted_mutval(self, node, children):
return node.text
def visit_dquoted_mutval(self, node, children):
return node.text
def visit_flag_option_mut(self, node, children):
_, key, _ = children
self.context_override.options[key] = None
return node
def visit_flag_optname(self, node, children):
return node.text
def visit_value_option_mut(self, node, children):
_, key, _, val, _ = children
self.context_override.options[key] = val
return node
def visit_value_optname(self, node, children):
return node.text
def visit_string(self, node, children):
return children[0]
def visit_unquoted_string(self, node, children):
return unescape(node.text)
def visit_quoted_string(self, node, children):
return node.text[1:-1]
def visit_tool(self, node, children):
self.tool = node.text
return node
def visit_mutation(self, node, children):
self.context.update(self.context_override)
return node
def _final_context(self):
context = self.context.copy()
context.update(self.context_override)
return context
def visit_immutation(self, node, children):
context = self._final_context()
child_type = children[0].expr_name
if child_type == 'preview':
if self.tool == 'httpie':
command = ['http'] + context.httpie_args(self.method,
quote=True)
else:
assert self.tool == 'curl'
command = ['curl'] + context.curl_args(self.method, quote=True)
click.echo(' '.join(command))
elif child_type == 'action':
output = BytesIO()
try:
env = Environment(stdout=output, is_windows=False)
httpie_main(context.httpie_args(self.method), env=env)
content = output.getvalue()
finally:
output.close()
# XXX: Work around a bug of click.echo_via_pager(). When you pass
# a bytestring to echo_via_pager(), it converts the bytestring with
# str(b'abc'), which makes it "b'abc'".
if six.PY2:
content = unicode(content, 'utf-8') # noqa
else:
content = str(content, 'utf-8')
click.echo_via_pager(content)
return node
def generic_visit(self, node, children):
if not node.expr_name and node.children:
if len(children) == 1:
return children[0]
return children
return node
def execute(command, context):
try:
root = grammar.parse(command)
except ParseError as err:
# TODO: Better error message
part = command[err.pos:err.pos + 10]
click.secho('Syntax error near "%s"' % part, err=True, fg='red')
else:
visitor = ExecutionVisitor(context)
try:
visitor.visit(root)
except VisitationError as err:
exc_class = err.original_class
if exc_class is KeyError:
# XXX: Need to parse VisitationError error message to get the
# original error message as VisitationError doesn't hold the
# original exception object
key = re.search(r"KeyError: u?'(.*)'", str(err)).group(1)
click.secho("Key '%s' not found" % key, err=True,
fg='red')
else:
# TODO: Better error message
click.secho(str(err), err=True, fg='red')
|
Yegorov/http-prompt
|
http_prompt/execution.py
|
Python
|
mit
| 11,589
|
[
"VisIt"
] |
e48a4baf3c549dfab8ff9fc1c62c9cdb1c40a00322e1edacebf35edce59c95e2
|
"""
Test LMS Notes
"""
from unittest import skip
import random
from uuid import uuid4
from datetime import datetime
from nose.plugins.attrib import attr
from common.test.acceptance.tests.helpers import UniqueCourseTest, EventsTestMixin
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.course_nav import CourseNavPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.edxnotes import EdxNotesUnitPage, EdxNotesPage, EdxNotesPageNoContent
from common.test.acceptance.fixtures.edxnotes import EdxNotesFixture, Note, Range
from flaky import flaky
class EdxNotesTestMixin(UniqueCourseTest):
"""
Creates a course with initial data and contains useful helper methods.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(EdxNotesTestMixin, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.note_unit_page = EdxNotesUnitPage(self.browser, self.course_id)
self.notes_page = EdxNotesPage(self.browser, self.course_id)
self.username = str(uuid4().hex)[:5]
self.email = "{}@email.com".format(self.username)
self.selector = "annotate-id"
self.edxnotes_fixture = EdxNotesFixture()
self.course_fixture = CourseFixture(
self.course_info["org"], self.course_info["number"],
self.course_info["run"], self.course_info["display_name"]
)
self.course_fixture.add_advanced_settings({
u"edxnotes": {u"value": True}
})
self.course_fixture.add_children(
XBlockFixtureDesc("chapter", "Test Section 1").add_children(
XBlockFixtureDesc("sequential", "Test Subsection 1").add_children(
XBlockFixtureDesc("vertical", "Test Unit 1").add_children(
XBlockFixtureDesc(
"html",
"Test HTML 1",
data="""
<p><span class="{}">Annotate this!</span></p>
<p>Annotate this</p>
""".format(self.selector)
),
XBlockFixtureDesc(
"html",
"Test HTML 2",
data="""<p><span class="{}">Annotate this!</span></p>""".format(self.selector)
),
),
XBlockFixtureDesc("vertical", "Test Unit 2").add_children(
XBlockFixtureDesc(
"html",
"Test HTML 3",
data="""<p><span class="{}">Annotate this!</span></p>""".format(self.selector)
),
),
),
XBlockFixtureDesc("sequential", "Test Subsection 2").add_children(
XBlockFixtureDesc("vertical", "Test Unit 3").add_children(
XBlockFixtureDesc(
"html",
"Test HTML 4",
data="""
<p><span class="{}">Annotate this!</span></p>
""".format(self.selector)
),
),
),
),
XBlockFixtureDesc("chapter", "Test Section 2").add_children(
XBlockFixtureDesc("sequential", "Test Subsection 3").add_children(
XBlockFixtureDesc("vertical", "Test Unit 4").add_children(
XBlockFixtureDesc(
"html",
"Test HTML 5",
data="""
<p><span class="{}">Annotate this!</span></p>
""".format(self.selector)
),
XBlockFixtureDesc(
"html",
"Test HTML 6",
data="""<p><span class="{}">Annotate this!</span></p>""".format(self.selector)
),
),
),
)).install()
self.addCleanup(self.edxnotes_fixture.cleanup)
AutoAuthPage(self.browser, username=self.username, email=self.email, course_id=self.course_id).visit()
def _add_notes(self):
xblocks = self.course_fixture.get_nested_xblocks(category="html")
notes_list = []
for index, xblock in enumerate(xblocks):
notes_list.append(
Note(
user=self.username,
usage_id=xblock.locator,
course_id=self.course_fixture._course_key,
ranges=[Range(startOffset=index, endOffset=index + 5)]
)
)
self.edxnotes_fixture.create_notes(notes_list)
self.edxnotes_fixture.install()
@attr(shard=4)
class EdxNotesDefaultInteractionsTest(EdxNotesTestMixin):
"""
Tests for creation, editing, deleting annotations inside annotatable components in LMS.
"""
def create_notes(self, components, offset=0):
self.assertGreater(len(components), 0)
index = offset
for component in components:
for note in component.create_note(".{}".format(self.selector)):
note.text = "TEST TEXT {}".format(index)
index += 1
def edit_notes(self, components, offset=0):
self.assertGreater(len(components), 0)
index = offset
for component in components:
self.assertGreater(len(component.notes), 0)
for note in component.edit_note():
note.text = "TEST TEXT {}".format(index)
index += 1
def edit_tags_in_notes(self, components, tags):
self.assertGreater(len(components), 0)
index = 0
for component in components:
self.assertGreater(len(component.notes), 0)
for note in component.edit_note():
note.tags = tags[index]
index += 1
self.assertEqual(index, len(tags), "Number of supplied tags did not match components")
def remove_notes(self, components):
self.assertGreater(len(components), 0)
for component in components:
self.assertGreater(len(component.notes), 0)
component.remove_note()
def assert_notes_are_removed(self, components):
for component in components:
self.assertEqual(0, len(component.notes))
def assert_text_in_notes(self, notes):
actual = [note.text for note in notes]
expected = ["TEST TEXT {}".format(i) for i in xrange(len(notes))]
self.assertEqual(expected, actual)
def assert_tags_in_notes(self, notes, expected_tags):
actual = [note.tags for note in notes]
expected = [expected_tags[i] for i in xrange(len(notes))]
self.assertEqual(expected, actual)
def test_can_create_notes(self):
"""
Scenario: User can create notes.
Given I have a course with 3 annotatable components
And I open the unit with 2 annotatable components
When I add 2 notes for the first component and 1 note for the second
Then I see that notes were correctly created
When I change sequential position to "2"
And I add note for the annotatable component on the page
Then I see that note was correctly created
When I refresh the page
Then I see that note was correctly stored
When I change sequential position to "1"
Then I see that notes were correctly stored on the page
"""
self.note_unit_page.visit()
components = self.note_unit_page.components
self.create_notes(components)
self.assert_text_in_notes(self.note_unit_page.notes)
self.courseware_page.go_to_sequential_position(2)
components = self.note_unit_page.components
self.create_notes(components)
components = self.note_unit_page.refresh()
self.assert_text_in_notes(self.note_unit_page.notes)
self.courseware_page.go_to_sequential_position(1)
components = self.note_unit_page.components
self.assert_text_in_notes(self.note_unit_page.notes)
def test_can_edit_notes(self):
"""
Scenario: User can edit notes.
Given I have a course with 3 components with notes
And I open the unit with 2 annotatable components
When I change text in the notes
Then I see that notes were correctly changed
When I change sequential position to "2"
And I change the note on the page
Then I see that note was correctly changed
When I refresh the page
Then I see that edited note was correctly stored
When I change sequential position to "1"
Then I see that edited notes were correctly stored on the page
"""
self._add_notes()
self.note_unit_page.visit()
components = self.note_unit_page.components
self.edit_notes(components)
self.assert_text_in_notes(self.note_unit_page.notes)
self.courseware_page.go_to_sequential_position(2)
components = self.note_unit_page.components
self.edit_notes(components)
self.assert_text_in_notes(self.note_unit_page.notes)
components = self.note_unit_page.refresh()
self.assert_text_in_notes(self.note_unit_page.notes)
self.courseware_page.go_to_sequential_position(1)
components = self.note_unit_page.components
self.assert_text_in_notes(self.note_unit_page.notes)
def test_can_delete_notes(self):
"""
Scenario: User can delete notes.
Given I have a course with 3 components with notes
And I open the unit with 2 annotatable components
When I remove all notes on the page
Then I do not see any notes on the page
When I change sequential position to "2"
And I remove all notes on the page
Then I do not see any notes on the page
When I refresh the page
Then I do not see any notes on the page
When I change sequential position to "1"
Then I do not see any notes on the page
"""
self._add_notes()
self.note_unit_page.visit()
components = self.note_unit_page.components
self.remove_notes(components)
self.assert_notes_are_removed(components)
self.courseware_page.go_to_sequential_position(2)
components = self.note_unit_page.components
self.remove_notes(components)
self.assert_notes_are_removed(components)
components = self.note_unit_page.refresh()
self.assert_notes_are_removed(components)
self.courseware_page.go_to_sequential_position(1)
components = self.note_unit_page.components
self.assert_notes_are_removed(components)
def test_can_create_note_with_tags(self):
"""
Scenario: a user of notes can define one with tags
Given I have a course with 3 annotatable components
And I open the unit with 2 annotatable components
When I add a note with tags for the first component
And I refresh the page
Then I see that note was correctly stored with its tags
"""
self.note_unit_page.visit()
components = self.note_unit_page.components
for note in components[0].create_note(".{}".format(self.selector)):
note.tags = ["fruit", "tasty"]
self.note_unit_page.refresh()
self.assertEqual(["fruit", "tasty"], self.note_unit_page.notes[0].tags)
def test_can_change_tags(self):
"""
Scenario: a user of notes can edit tags on notes
Given I have a course with 3 components with notes
When I open the unit with 2 annotatable components
And I edit tags on the notes for the 2 annotatable components
Then I see that the tags were correctly changed
And I again edit tags on the notes for the 2 annotatable components
And I refresh the page
Then I see that the tags were correctly changed
"""
self._add_notes()
self.note_unit_page.visit()
components = self.note_unit_page.components
self.edit_tags_in_notes(components, [["hard"], ["apple", "pear"]])
self.assert_tags_in_notes(self.note_unit_page.notes, [["hard"], ["apple", "pear"]])
self.edit_tags_in_notes(components, [[], ["avocado"]])
self.assert_tags_in_notes(self.note_unit_page.notes, [[], ["avocado"]])
self.note_unit_page.refresh()
self.assert_tags_in_notes(self.note_unit_page.notes, [[], ["avocado"]])
def test_sr_labels(self):
"""
Scenario: screen reader labels exist for text and tags fields
Given I have a course with 3 components with notes
When I open the unit with 2 annotatable components
And I open the editor for each note
Then the text and tags fields both have screen reader labels
"""
self._add_notes()
self.note_unit_page.visit()
# First note is in the first annotatable component, will have field indexes 0 and 1.
for note in self.note_unit_page.components[0].edit_note():
self.assertTrue(note.has_sr_label(0, 0, "Note"))
self.assertTrue(note.has_sr_label(1, 1, "Tags (space-separated)"))
# Second note is in the second annotatable component, will have field indexes 2 and 3.
for note in self.note_unit_page.components[1].edit_note():
self.assertTrue(note.has_sr_label(0, 2, "Note"))
self.assertTrue(note.has_sr_label(1, 3, "Tags (space-separated)"))
@attr(shard=4)
class EdxNotesPageTest(EventsTestMixin, EdxNotesTestMixin):
"""
Tests for Notes page.
"""
def _add_notes(self, notes_list):
self.edxnotes_fixture.create_notes(notes_list)
self.edxnotes_fixture.install()
def _add_default_notes(self, tags=None, extra_notes=0):
"""
Creates 5 test notes by default & number of extra_notes will be created if specified.
If tags are not specified, will populate the notes with some test tag data.
If tags are specified, they will be used for each of the 3 notes that have tags.
"""
xblocks = self.course_fixture.get_nested_xblocks(category="html")
# pylint: disable=attribute-defined-outside-init
self.raw_note_list = [
Note(
usage_id=xblocks[4].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="First note",
quote="Annotate this",
updated=datetime(2011, 1, 1, 1, 1, 1, 1).isoformat(),
),
Note(
usage_id=xblocks[2].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="",
quote=u"Annotate this",
updated=datetime(2012, 1, 1, 1, 1, 1, 1).isoformat(),
tags=["Review", "cool"] if tags is None else tags
),
Note(
usage_id=xblocks[0].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Third note",
quote="Annotate this",
updated=datetime(2013, 1, 1, 1, 1, 1, 1).isoformat(),
ranges=[Range(startOffset=0, endOffset=18)],
tags=["Cool", "TODO"] if tags is None else tags
),
Note(
usage_id=xblocks[3].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Fourth note",
quote="",
updated=datetime(2014, 1, 1, 1, 1, 1, 1).isoformat(),
tags=["review"] if tags is None else tags
),
Note(
usage_id=xblocks[1].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Fifth note",
quote="Annotate this",
updated=datetime(2015, 1, 1, 1, 1, 1, 1).isoformat()
),
]
if extra_notes > 0:
for __ in range(extra_notes):
self.raw_note_list.append(
Note(
usage_id=xblocks[random.choice([0, 1, 2, 3, 4, 5])].locator,
user=self.username,
course_id=self.course_fixture._course_key, # pylint: disable=protected-access
text="Fourth note",
quote="",
updated=datetime(2014, 1, 1, 1, 1, 1, 1).isoformat(),
tags=["review"] if tags is None else tags
)
)
self._add_notes(self.raw_note_list)
def assertNoteContent(self, item, text=None, quote=None, unit_name=None, time_updated=None, tags=None):
""" Verifies the expected properties of the note. """
self.assertEqual(text, item.text)
if item.quote is not None:
self.assertIn(quote, item.quote)
else:
self.assertIsNone(quote)
self.assertEqual(unit_name, item.unit_name)
self.assertEqual(time_updated, item.time_updated)
self.assertEqual(tags, item.tags)
def assertChapterContent(self, item, title=None, subtitles=None):
"""
Verifies the expected title and subsection titles (subtitles) for the given chapter.
"""
self.assertEqual(item.title, title)
self.assertEqual(item.subtitles, subtitles)
def assertGroupContent(self, item, title=None, notes=None):
"""
Verifies the expected title and child notes for the given group.
"""
self.assertEqual(item.title, title)
self.assertEqual(item.notes, notes)
def assert_viewed_event(self, view=None):
"""
Verifies that the correct view event was captured for the Notes page.
"""
# There will always be an initial event for "Recent Activity" because that is the default view.
# If view is something besides "Recent Activity", expect 2 events, with the second one being
# the view name passed in.
if view == 'Recent Activity':
view = None
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.course.student_notes.notes_page_viewed'},
number_of_matches=1 if view is None else 2
)
expected_events = [{'event': {'view': 'Recent Activity'}}]
if view:
expected_events.append({'event': {'view': view}})
self.assert_events_match(expected_events, actual_events)
def assert_unit_link_event(self, usage_id, view):
"""
Verifies that the correct used_unit_link event was captured for the Notes page.
"""
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.course.student_notes.used_unit_link'},
number_of_matches=1
)
expected_events = [
{'event': {'component_usage_id': usage_id, 'view': view}}
]
self.assert_events_match(expected_events, actual_events)
def assert_search_event(self, search_string, number_of_results):
"""
Verifies that the correct searched event was captured for the Notes page.
"""
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.course.student_notes.searched'},
number_of_matches=1
)
expected_events = [
{'event': {'search_string': search_string, 'number_of_results': number_of_results}}
]
self.assert_events_match(expected_events, actual_events)
def _verify_pagination_info(
self,
notes_count_on_current_page,
header_text,
previous_button_enabled,
next_button_enabled,
current_page_number,
total_pages
):
"""
Verify pagination info
"""
self.assertEqual(self.notes_page.count(), notes_count_on_current_page)
self.assertEqual(self.notes_page.get_pagination_header_text(), header_text)
if total_pages > 1:
self.assertEqual(self.notes_page.footer_visible, True)
self.assertEqual(self.notes_page.is_previous_page_button_enabled(), previous_button_enabled)
self.assertEqual(self.notes_page.is_next_page_button_enabled(), next_button_enabled)
self.assertEqual(self.notes_page.get_current_page_number(), current_page_number)
self.assertEqual(self.notes_page.get_total_pages, total_pages)
else:
self.assertEqual(self.notes_page.footer_visible, False)
def search_and_verify(self):
"""
Add, search and verify notes.
"""
self._add_default_notes(extra_notes=22)
self.notes_page.visit()
# Run the search
self.notes_page.search("note")
# No error message appears
self.assertFalse(self.notes_page.is_error_visible)
self.assertIn(u"Search Results", self.notes_page.tabs)
self.assertEqual(self.notes_page.get_total_pages, 2)
def test_no_content(self):
"""
Scenario: User can see `No content` message.
Given I have a course without notes
When I open Notes page
Then I see only "You do not have any notes within the course." message
"""
notes_page_empty = EdxNotesPageNoContent(self.browser, self.course_id)
notes_page_empty.visit()
self.assertIn(
"You have not made any notes in this course yet. Other students in this course are using notes to:",
notes_page_empty.no_content_text)
def test_notes_works_correctly_with_xss(self):
"""
Scenario: Note text & tags should be HTML and JS escaped
Given I am enrolled in a course with notes enabled
When I visit the Notes page, with a Notes text and tag containing HTML characters like < and >
Then the text and tags appear as expected due to having been properly escaped
"""
xblocks = self.course_fixture.get_nested_xblocks(category="html")
self._add_notes([
Note(
usage_id=xblocks[0].locator,
user=self.username,
course_id=self.course_fixture._course_key, # pylint: disable=protected-access
text='<script>alert("XSS")</script>',
quote="quote",
updated=datetime(2014, 1, 1, 1, 1, 1, 1).isoformat(),
tags=['<script>alert("XSS")</script>']
),
Note(
usage_id=xblocks[1].locator,
user=self.username,
course_id=self.course_fixture._course_key, # pylint: disable=protected-access
text='<b>bold</b>',
quote="quote",
updated=datetime(2014, 2, 1, 1, 1, 1, 1).isoformat(),
tags=['<i>bold</i>']
)
])
self.notes_page.visit()
notes = self.notes_page.notes
self.assertEqual(len(notes), 2)
self.assertNoteContent(
notes[0],
quote=u"quote",
text='<b>bold</b>',
unit_name="Test Unit 1",
time_updated="Feb 01, 2014 at 01:01 UTC",
tags=['<i>bold</i>']
)
self.assertNoteContent(
notes[1],
quote=u"quote",
text='<script>alert("XSS")</script>',
unit_name="Test Unit 1",
time_updated="Jan 01, 2014 at 01:01 UTC",
tags=['<script>alert("XSS")</script>']
)
def test_recent_activity_view(self):
"""
Scenario: User can view all notes by recent activity.
Given I have a course with 5 notes
When I open Notes page
Then I see 5 notes sorted by the updated date
And I see correct content in the notes
And an event has fired indicating that the Recent Activity view was selected
"""
self._add_default_notes()
self.notes_page.visit()
notes = self.notes_page.notes
self.assertEqual(len(notes), 5)
self.assertNoteContent(
notes[0],
quote=u"Annotate this",
text=u"Fifth note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2015 at 01:01 UTC"
)
self.assertNoteContent(
notes[1],
text=u"Fourth note",
unit_name="Test Unit 3",
time_updated="Jan 01, 2014 at 01:01 UTC",
tags=["review"]
)
self.assertNoteContent(
notes[2],
quote="Annotate this",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
self.assertNoteContent(
notes[3],
quote=u"Annotate this",
unit_name="Test Unit 2",
time_updated="Jan 01, 2012 at 01:01 UTC",
tags=["Review", "cool"]
)
self.assertNoteContent(
notes[4],
quote=u"Annotate this",
text=u"First note",
unit_name="Test Unit 4",
time_updated="Jan 01, 2011 at 01:01 UTC"
)
self.assert_viewed_event()
def test_course_structure_view(self):
"""
Scenario: User can view all notes by location in Course.
Given I have a course with 5 notes
When I open Notes page
And I switch to "Location in Course" view
Then I see 2 groups, 3 sections and 5 notes
And I see correct content in the notes and groups
And an event has fired indicating that the Location in Course view was selected
"""
self._add_default_notes()
self.notes_page.visit().switch_to_tab("structure")
notes = self.notes_page.notes
groups = self.notes_page.chapter_groups
sections = self.notes_page.subsection_groups
self.assertEqual(len(notes), 5)
self.assertEqual(len(groups), 2)
self.assertEqual(len(sections), 3)
self.assertChapterContent(
groups[0],
title=u"Test Section 1",
subtitles=[u"Test Subsection 1", u"Test Subsection 2"]
)
self.assertGroupContent(
sections[0],
title=u"Test Subsection 1",
notes=[u"Fifth note", u"Third note", None]
)
self.assertNoteContent(
notes[0],
quote=u"Annotate this",
text=u"Fifth note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2015 at 01:01 UTC"
)
self.assertNoteContent(
notes[1],
quote=u"Annotate this",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
self.assertNoteContent(
notes[2],
quote=u"Annotate this",
unit_name="Test Unit 2",
time_updated="Jan 01, 2012 at 01:01 UTC",
tags=["Review", "cool"]
)
self.assertGroupContent(
sections[1],
title=u"Test Subsection 2",
notes=[u"Fourth note"]
)
self.assertNoteContent(
notes[3],
text=u"Fourth note",
unit_name="Test Unit 3",
time_updated="Jan 01, 2014 at 01:01 UTC",
tags=["review"]
)
self.assertChapterContent(
groups[1],
title=u"Test Section 2",
subtitles=[u"Test Subsection 3"],
)
self.assertGroupContent(
sections[2],
title=u"Test Subsection 3",
notes=[u"First note"]
)
self.assertNoteContent(
notes[4],
quote=u"Annotate this",
text=u"First note",
unit_name="Test Unit 4",
time_updated="Jan 01, 2011 at 01:01 UTC"
)
self.assert_viewed_event('Location in Course')
def test_tags_view(self):
"""
Scenario: User can view all notes by associated tags.
Given I have a course with 5 notes and I am viewing the Notes page
When I switch to the "Tags" view
Then I see 4 tag groups
And I see correct content in the notes and groups
And an event has fired indicating that the Tags view was selected
"""
self._add_default_notes()
self.notes_page.visit().switch_to_tab("tags")
notes = self.notes_page.notes
groups = self.notes_page.tag_groups
self.assertEqual(len(notes), 7)
self.assertEqual(len(groups), 4)
# Tag group "cool"
self.assertGroupContent(
groups[0],
title=u"cool (2)",
notes=[u"Third note", None]
)
self.assertNoteContent(
notes[0],
quote=u"Annotate this",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
self.assertNoteContent(
notes[1],
quote=u"Annotate this",
unit_name="Test Unit 2",
time_updated="Jan 01, 2012 at 01:01 UTC",
tags=["Review", "cool"]
)
# Tag group "review"
self.assertGroupContent(
groups[1],
title=u"review (2)",
notes=[u"Fourth note", None]
)
self.assertNoteContent(
notes[2],
text=u"Fourth note",
unit_name="Test Unit 3",
time_updated="Jan 01, 2014 at 01:01 UTC",
tags=["review"]
)
self.assertNoteContent(
notes[3],
quote=u"Annotate this",
unit_name="Test Unit 2",
time_updated="Jan 01, 2012 at 01:01 UTC",
tags=["Review", "cool"]
)
# Tag group "todo"
self.assertGroupContent(
groups[2],
title=u"todo (1)",
notes=["Third note"]
)
self.assertNoteContent(
notes[4],
quote=u"Annotate this",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
# Notes with no tags
self.assertGroupContent(
groups[3],
title=u"[no tags] (2)",
notes=["Fifth note", "First note"]
)
self.assertNoteContent(
notes[5],
quote=u"Annotate this",
text=u"Fifth note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2015 at 01:01 UTC"
)
self.assertNoteContent(
notes[6],
quote=u"Annotate this",
text=u"First note",
unit_name="Test Unit 4",
time_updated="Jan 01, 2011 at 01:01 UTC"
)
self.assert_viewed_event('Tags')
@flaky # TNL-4590
def test_easy_access_from_notes_page(self):
"""
Scenario: Ensure that the link to the Unit works correctly.
Given I have a course with 5 notes
When I open Notes page
And I click on the first unit link
Then I see correct text on the unit page and a unit link event was fired
When go back to the Notes page
And I switch to "Location in Course" view
And I click on the second unit link
Then I see correct text on the unit page and a unit link event was fired
When go back to the Notes page
And I switch to "Tags" view
And I click on the first unit link
Then I see correct text on the unit page and a unit link event was fired
When go back to the Notes page
And I run the search with "Fifth" query
And I click on the first unit link
Then I see correct text on the unit page and a unit link event was fired
"""
def assert_page(note, usage_id, view):
""" Verify that clicking on the unit link works properly. """
quote = note.quote
note.go_to_unit()
self.courseware_page.wait_for_page()
self.assertIn(quote, self.courseware_page.xblock_component_html_content())
self.assert_unit_link_event(usage_id, view)
self.reset_event_tracking()
self._add_default_notes()
self.notes_page.visit()
# visiting the page results in an ajax request to fetch the notes
self.notes_page.wait_for_ajax()
note = self.notes_page.notes[0]
assert_page(note, self.raw_note_list[4]['usage_id'], "Recent Activity")
self.notes_page.visit().switch_to_tab("structure")
# visiting the page results in an ajax request to fetch the notes
self.notes_page.wait_for_ajax()
note = self.notes_page.notes[1]
assert_page(note, self.raw_note_list[2]['usage_id'], "Location in Course")
self.notes_page.visit().switch_to_tab("tags")
# visiting the page results in an ajax request to fetch the notes
self.notes_page.wait_for_ajax()
note = self.notes_page.notes[0]
assert_page(note, self.raw_note_list[2]['usage_id'], "Tags")
self.notes_page.visit().search("Fifth")
# visiting the page results in an ajax request to fetch the notes
self.notes_page.wait_for_ajax()
note = self.notes_page.notes[0]
assert_page(note, self.raw_note_list[4]['usage_id'], "Search Results")
def test_search_behaves_correctly(self):
"""
Scenario: Searching behaves correctly.
Given I have a course with 5 notes
When I open Notes page
When I run the search with " " query
Then I see the following error message "Please enter a term in the search field."
And I do not see "Search Results" tab
When I run the search with "note" query
Then I see that error message disappears
And I see that "Search Results" tab appears with 4 notes found
And an event has fired indicating that the Search Results view was selected
And an event has fired recording the search that was performed
"""
self._add_default_notes()
self.notes_page.visit()
# Run the search with whitespaces only
self.notes_page.search(" ")
# Displays error message
self.assertTrue(self.notes_page.is_error_visible)
self.assertEqual(self.notes_page.error_text, u"Please enter a term in the search field.")
# Search results tab does not appear
self.assertNotIn(u"Search Results", self.notes_page.tabs)
# Run the search with correct query
self.notes_page.search("note")
# Error message disappears
self.assertFalse(self.notes_page.is_error_visible)
self.assertIn(u"Search Results", self.notes_page.tabs)
notes = self.notes_page.notes
self.assertEqual(len(notes), 4)
self.assertNoteContent(
notes[0],
quote=u"Annotate this",
text=u"Fifth note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2015 at 01:01 UTC"
)
self.assertNoteContent(
notes[1],
text=u"Fourth note",
unit_name="Test Unit 3",
time_updated="Jan 01, 2014 at 01:01 UTC",
tags=["review"]
)
self.assertNoteContent(
notes[2],
quote="Annotate this",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
self.assertNoteContent(
notes[3],
quote=u"Annotate this",
text=u"First note",
unit_name="Test Unit 4",
time_updated="Jan 01, 2011 at 01:01 UTC"
)
self.assert_viewed_event('Search Results')
self.assert_search_event('note', 4)
@skip("scroll to tag functionality is disabled")
def test_scroll_to_tag_recent_activity(self):
"""
Scenario: Can scroll to a tag group from the Recent Activity view (default view)
Given I have a course with 5 notes and I open the Notes page
When I click on a tag associated with a note
Then the Tags view tab gets focus and I scroll to the section of notes associated with that tag
"""
self._add_default_notes(["apple", "banana", "kiwi", "pear", "pumpkin", "squash", "zucchini"])
self.notes_page.visit()
self._scroll_to_tag_and_verify("pear", 3)
@skip("scroll to tag functionality is disabled")
def test_scroll_to_tag_course_structure(self):
"""
Scenario: Can scroll to a tag group from the Course Structure view
Given I have a course with 5 notes and I open the Notes page and select the Course Structure view
When I click on a tag associated with a note
Then the Tags view tab gets focus and I scroll to the section of notes associated with that tag
"""
self._add_default_notes(["apple", "banana", "kiwi", "pear", "pumpkin", "squash", "zucchini"])
self.notes_page.visit().switch_to_tab("structure")
self._scroll_to_tag_and_verify("squash", 5)
@skip("scroll to tag functionality is disabled")
def test_scroll_to_tag_search(self):
"""
Scenario: Can scroll to a tag group from the Search Results view
Given I have a course with 5 notes and I open the Notes page and perform a search
Then the Search view tab opens and gets focus
And when I click on a tag associated with a note
Then the Tags view tab gets focus and I scroll to the section of notes associated with that tag
"""
self._add_default_notes(["apple", "banana", "kiwi", "pear", "pumpkin", "squash", "zucchini"])
self.notes_page.visit().search("note")
self._scroll_to_tag_and_verify("pumpkin", 4)
@skip("scroll to tag functionality is disabled")
def test_scroll_to_tag_from_tag_view(self):
"""
Scenario: Can scroll to a tag group from the Tags view
Given I have a course with 5 notes and I open the Notes page and select the Tag view
When I click on a tag associated with a note
Then I scroll to the section of notes associated with that tag
"""
self._add_default_notes(["apple", "banana", "kiwi", "pear", "pumpkin", "squash", "zucchini"])
self.notes_page.visit().switch_to_tab("tags")
self._scroll_to_tag_and_verify("kiwi", 2)
def _scroll_to_tag_and_verify(self, tag_name, group_index):
""" Helper method for all scroll to tag tests """
self.notes_page.notes[1].go_to_tag(tag_name)
# Because all the notes (with tags) have the same tags, they will end up ordered alphabetically.
pear_group = self.notes_page.tag_groups[group_index]
self.assertEqual(tag_name + " (3)", pear_group.title)
self.assertTrue(pear_group.scrolled_to_top(group_index))
def test_tabs_behaves_correctly(self):
"""
Scenario: Tabs behaves correctly.
Given I have a course with 5 notes
When I open Notes page
Then I see only "Recent Activity", "Location in Course", and "Tags" tabs
When I run the search with "note" query
And I see that "Search Results" tab appears with 4 notes found
Then I switch to "Recent Activity" tab
And I see all 5 notes
Then I switch to "Location in Course" tab
And I see all 2 groups and 5 notes
When I switch back to "Search Results" tab
Then I can still see 4 notes found
When I close "Search Results" tab
Then I see that "Recent Activity" tab becomes active
And "Search Results" tab disappears
And I see all 5 notes
"""
self._add_default_notes()
self.notes_page.visit()
# We're on Recent Activity tab.
self.assertEqual(len(self.notes_page.tabs), 3)
self.assertEqual([u"Recent Activity", u"Location in Course", u"Tags"], self.notes_page.tabs)
self.notes_page.search("note")
# We're on Search Results tab
self.assertEqual(len(self.notes_page.tabs), 4)
self.assertIn(u"Search Results", self.notes_page.tabs)
self.assertEqual(len(self.notes_page.notes), 4)
# We can switch on Recent Activity tab and back.
self.notes_page.switch_to_tab("recent")
self.assertEqual(len(self.notes_page.notes), 5)
self.notes_page.switch_to_tab("structure")
self.assertEqual(len(self.notes_page.chapter_groups), 2)
self.assertEqual(len(self.notes_page.notes), 5)
self.notes_page.switch_to_tab("search")
self.assertEqual(len(self.notes_page.notes), 4)
# Can close search results page
self.notes_page.close_tab()
self.assertEqual(len(self.notes_page.tabs), 3)
self.assertNotIn(u"Search Results", self.notes_page.tabs)
self.assertEqual(len(self.notes_page.notes), 5)
def test_open_note_when_accessed_from_notes_page(self):
"""
Scenario: Ensure that the link to the Unit opens a note only once.
Given I have a course with 2 sequentials that contain respectively one note and two notes
When I open Notes page
And I click on the first unit link
Then I see the note opened on the unit page
When I switch to the second sequential
I do not see any note opened
When I switch back to first sequential
I do not see any note opened
"""
xblocks = self.course_fixture.get_nested_xblocks(category="html")
self._add_notes([
Note(
usage_id=xblocks[1].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Third note",
quote="Annotate this",
updated=datetime(2012, 1, 1, 1, 1, 1, 1).isoformat(),
ranges=[Range(startOffset=0, endOffset=14)],
),
Note(
usage_id=xblocks[2].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Second note",
quote="Annotate this",
updated=datetime(2013, 1, 1, 1, 1, 1, 1).isoformat(),
ranges=[Range(startOffset=0, endOffset=14)],
),
Note(
usage_id=xblocks[0].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="First note",
quote="Annotate this",
updated=datetime(2014, 1, 1, 1, 1, 1, 1).isoformat(),
ranges=[Range(startOffset=0, endOffset=14)],
),
])
self.notes_page.visit()
item = self.notes_page.notes[0]
item.go_to_unit()
self.courseware_page.wait_for_page()
note = self.note_unit_page.notes[0]
self.assertTrue(note.is_visible)
note = self.note_unit_page.notes[1]
self.assertFalse(note.is_visible)
self.courseware_page.go_to_sequential_position(2)
note = self.note_unit_page.notes[0]
self.assertFalse(note.is_visible)
self.courseware_page.go_to_sequential_position(1)
note = self.note_unit_page.notes[0]
self.assertFalse(note.is_visible)
def test_page_size_limit(self):
"""
Scenario: Verify that we can't get notes more than default page size.
Given that I am a registered user
And I have a course with 11 notes
When I open Notes page
Then I can see notes list contains 10 items
And I should see paging header and footer with correct data
And I should see disabled previous button
And I should also see enabled next button
"""
self._add_default_notes(extra_notes=21)
self.notes_page.visit()
self._verify_pagination_info(
notes_count_on_current_page=25,
header_text='Showing 1-25 out of 26 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_pagination_with_single_page(self):
"""
Scenario: Notes list pagination works as expected for single page
Given that I am a registered user
And I have a course with 5 notes
When I open Notes page
Then I can see notes list contains 5 items
And I should see paging header and footer with correct data
And I should see disabled previous and next buttons
"""
self._add_default_notes()
self.notes_page.visit()
self._verify_pagination_info(
notes_count_on_current_page=5,
header_text='Showing 1-5 out of 5 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
def test_next_and_previous_page_button(self):
"""
Scenario: Next & Previous buttons are working as expected for notes list pagination
Given that I am a registered user
And I have a course with 26 notes
When I open Notes page
Then I can see notes list contains 25 items
And I should see paging header and footer with correct data
And I should see disabled previous button
And I should see enabled next button
When I click on next page button in footer
Then I should be navigated to second page
And I should see a list with 1 item
And I should see paging header and footer with correct info
And I should see enabled previous button
And I should also see disabled next button
When I click on previous page button in footer
Then I should be navigated to first page
And I should see a list with 25 items
And I should see paging header and footer with correct info
And I should see disabled previous button
And I should also see enabled next button
"""
self._add_default_notes(extra_notes=21)
self.notes_page.visit()
self._verify_pagination_info(
notes_count_on_current_page=25,
header_text='Showing 1-25 out of 26 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
self.notes_page.press_next_page_button()
self._verify_pagination_info(
notes_count_on_current_page=1,
header_text='Showing 26-26 out of 26 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
self.notes_page.press_previous_page_button()
self._verify_pagination_info(
notes_count_on_current_page=25,
header_text='Showing 1-25 out of 26 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_pagination_with_valid_and_invalid_page_number(self):
"""
Scenario: Notes list pagination works as expected for valid & invalid page number
Given that I am a registered user
And I have a course with 26 notes
When I open Notes page
Then I can see notes list contains 25 items
And I should see paging header and footer with correct data
And I should see total page value is 2
When I enter 2 in the page number input
Then I should be navigated to page 2
When I enter 3 in the page number input
Then I should not be navigated away from page 2
"""
self._add_default_notes(extra_notes=21)
self.notes_page.visit()
self.assertEqual(self.notes_page.get_total_pages, 2)
# test pagination with valid page number
self.notes_page.go_to_page(2)
self._verify_pagination_info(
notes_count_on_current_page=1,
header_text='Showing 26-26 out of 26 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
# test pagination with invalid page number
self.notes_page.go_to_page(3)
self._verify_pagination_info(
notes_count_on_current_page=1,
header_text='Showing 26-26 out of 26 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
def test_search_behaves_correctly_with_pagination(self):
"""
Scenario: Searching behaves correctly with pagination.
Given that I am a registered user
And I have a course with 27 notes
When I open Notes page
Then I can see notes list with 25 items
And I should see paging header and footer with correct data
And previous button is disabled
And next button is enabled
When I run the search with "note" query
Then I see no error message
And I see that "Search Results" tab appears with 26 notes found
And an event has fired indicating that the Search Results view was selected
And an event has fired recording the search that was performed
"""
self.search_and_verify()
self._verify_pagination_info(
notes_count_on_current_page=25,
header_text='Showing 1-25 out of 26 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
self.assert_viewed_event('Search Results')
self.assert_search_event('note', 26)
def test_search_with_next_and_prev_page_button(self):
"""
Scenario: Next & Previous buttons are working as expected for search
Given that I am a registered user
And I have a course with 27 notes
When I open Notes page
Then I can see notes list with 25 items
And I should see paging header and footer with correct data
And previous button is disabled
And next button is enabled
When I run the search with "note" query
Then I see that "Search Results" tab appears with 26 notes found
And an event has fired indicating that the Search Results view was selected
And an event has fired recording the search that was performed
When I click on next page button in footer
Then I should be navigated to second page
And I should see a list with 1 item
And I should see paging header and footer with correct info
And I should see enabled previous button
And I should also see disabled next button
When I click on previous page button in footer
Then I should be navigated to first page
And I should see a list with 25 items
And I should see paging header and footer with correct info
And I should see disabled previous button
And I should also see enabled next button
"""
self.search_and_verify()
self._verify_pagination_info(
notes_count_on_current_page=25,
header_text='Showing 1-25 out of 26 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
self.assert_viewed_event('Search Results')
self.assert_search_event('note', 26)
self.notes_page.press_next_page_button()
self._verify_pagination_info(
notes_count_on_current_page=1,
header_text='Showing 26-26 out of 26 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
self.notes_page.press_previous_page_button()
self._verify_pagination_info(
notes_count_on_current_page=25,
header_text='Showing 1-25 out of 26 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_search_with_valid_and_invalid_page_number(self):
"""
Scenario: Notes list pagination works as expected for valid & invalid page number
Given that I am a registered user
And I have a course with 27 notes
When I open Notes page
Then I can see notes list contains 25 items
And I should see paging header and footer with correct data
And I should see total page value is 2
When I run the search with "note" query
Then I see that "Search Results" tab appears with 26 notes found
And an event has fired indicating that the Search Results view was selected
And an event has fired recording the search that was performed
When I enter 2 in the page number input
Then I should be navigated to page 2
When I enter 3 in the page number input
Then I should not be navigated away from page 2
"""
self.search_and_verify()
# test pagination with valid page number
self.notes_page.go_to_page(2)
self._verify_pagination_info(
notes_count_on_current_page=1,
header_text='Showing 26-26 out of 26 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
# test pagination with invalid page number
self.notes_page.go_to_page(3)
self._verify_pagination_info(
notes_count_on_current_page=1,
header_text='Showing 26-26 out of 26 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
@attr(shard=4)
class EdxNotesToggleSingleNoteTest(EdxNotesTestMixin):
"""
Tests for toggling single annotation.
"""
def setUp(self):
super(EdxNotesToggleSingleNoteTest, self).setUp()
self._add_notes()
self.note_unit_page.visit()
def test_can_toggle_by_clicking_on_highlighted_text(self):
"""
Scenario: User can toggle a single note by clicking on highlighted text.
Given I have a course with components with notes
When I click on highlighted text
And I move mouse out of the note
Then I see that the note is still shown
When I click outside the note
Then I see the the note is closed
"""
note = self.note_unit_page.notes[0]
note.click_on_highlight()
self.note_unit_page.move_mouse_to("body")
self.assertTrue(note.is_visible)
self.note_unit_page.click("body")
self.assertFalse(note.is_visible)
def test_can_toggle_by_clicking_on_the_note(self):
"""
Scenario: User can toggle a single note by clicking on the note.
Given I have a course with components with notes
When I click on the note
And I move mouse out of the note
Then I see that the note is still shown
When I click outside the note
Then I see the the note is closed
"""
note = self.note_unit_page.notes[0]
note.show().click_on_viewer()
self.note_unit_page.move_mouse_to("body")
self.assertTrue(note.is_visible)
self.note_unit_page.click("body")
self.assertFalse(note.is_visible)
def test_interaction_between_notes(self):
"""
Scenario: Interactions between notes works well.
Given I have a course with components with notes
When I click on highlighted text in the first component
And I move mouse out of the note
Then I see that the note is still shown
When I click on highlighted text in the second component
Then I see that the new note is shown
"""
note_1 = self.note_unit_page.notes[0]
note_2 = self.note_unit_page.notes[1]
note_1.click_on_highlight()
self.note_unit_page.move_mouse_to("body")
self.assertTrue(note_1.is_visible)
note_2.click_on_highlight()
self.assertFalse(note_1.is_visible)
self.assertTrue(note_2.is_visible)
@attr(shard=4)
class EdxNotesToggleNotesTest(EdxNotesTestMixin):
"""
Tests for toggling visibility of all notes.
"""
def setUp(self):
super(EdxNotesToggleNotesTest, self).setUp()
self._add_notes()
self.note_unit_page.visit()
def test_can_disable_all_notes(self):
"""
Scenario: User can disable all notes.
Given I have a course with components with notes
And I open the unit with annotatable components
When I click on "Show notes" checkbox
Then I do not see any notes on the sequential position
When I change sequential position to "2"
Then I still do not see any notes on the sequential position
When I go to "Test Subsection 2" subsection
Then I do not see any notes on the subsection
"""
# Disable all notes
self.note_unit_page.toggle_visibility()
self.assertEqual(len(self.note_unit_page.notes), 0)
self.courseware_page.go_to_sequential_position(2)
self.assertEqual(len(self.note_unit_page.notes), 0)
self.course_nav.go_to_section(u"Test Section 1", u"Test Subsection 2")
self.assertEqual(len(self.note_unit_page.notes), 0)
def test_can_reenable_all_notes(self):
"""
Scenario: User can toggle notes visibility.
Given I have a course with components with notes
And I open the unit with annotatable components
When I click on "Show notes" checkbox
Then I do not see any notes on the sequential position
When I click on "Show notes" checkbox again
Then I see that all notes appear
When I change sequential position to "2"
Then I still can see all notes on the sequential position
When I go to "Test Subsection 2" subsection
Then I can see all notes on the subsection
"""
# Disable notes
self.note_unit_page.toggle_visibility()
self.assertEqual(len(self.note_unit_page.notes), 0)
# Enable notes to make sure that I can enable notes without refreshing
# the page.
self.note_unit_page.toggle_visibility()
self.assertGreater(len(self.note_unit_page.notes), 0)
self.courseware_page.go_to_sequential_position(2)
self.assertGreater(len(self.note_unit_page.notes), 0)
self.course_nav.go_to_section(u"Test Section 1", u"Test Subsection 2")
self.assertGreater(len(self.note_unit_page.notes), 0)
|
louyihua/edx-platform
|
common/test/acceptance/tests/lms/test_lms_edxnotes.py
|
Python
|
agpl-3.0
| 60,053
|
[
"VisIt"
] |
b952c56f874a107c3cbb0fd80ed18c9e628d6e930e564a7804d08e4fae77d50e
|
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import maya.cmds as cmds
kPluginNodeName = "MitsubaRoughCoatingShader"
kPluginNodeClassify = "/shader/surface"
kPluginNodeId = OpenMaya.MTypeId(0x8700F)
class roughcoating(OpenMayaMPx.MPxNode):
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
mIntIOR = OpenMaya.MObject()
mExtIOR = OpenMaya.MObject()
mInteriorMaterial = OpenMaya.MObject()
mExteriorMaterial = OpenMaya.MObject()
mThickness = OpenMaya.MObject()
mSigmaA = OpenMaya.MObject()
mBSDF = OpenMaya.MObject()
mAlpha = OpenMaya.MObject()
mDistribution = OpenMaya.MObject()
mReflectance = OpenMaya.MObject()
mOutColor = OpenMaya.MObject()
def compute(self, plug, block):
if plug == roughcoating.mOutColor or plug.parent() == roughcoating.mOutColor:
resultColor = OpenMaya.MFloatVector(0.0,0.0,0.0)
outColorHandle = block.outputValue( roughcoating.mOutColor )
outColorHandle.setMFloatVector(resultColor)
outColorHandle.setClean()
else:
return OpenMaya.kUnknownParameter
def nodeCreator():
return roughcoating()
def nodeInitializer():
nAttr = OpenMaya.MFnNumericAttribute()
eAttr = OpenMaya.MFnEnumAttribute()
try:
roughcoating.mDistribution = eAttr.create("distribution", "dist")
eAttr.setKeyable(1)
eAttr.setStorable(1)
eAttr.setReadable(1)
eAttr.setWritable(1)
eAttr.addField("Beckmann", 0)
eAttr.addField("GGX", 1)
eAttr.addField("Phong", 2)
roughcoating.mAlpha = nAttr.create("alpha","a", OpenMaya.MFnNumericData.kFloat, 0.1)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
roughcoating.mInteriorMaterial = eAttr.create("interiorMaterial", "intmat")
eAttr.setKeyable(1)
eAttr.setStorable(1)
eAttr.setReadable(1)
eAttr.setWritable(1)
eAttr.addField("Use Value", 0)
eAttr.addField("Vacuum - 1.0", 1)
eAttr.addField("Helum - 1.00004", 2)
eAttr.addField("Hydrogen - 1.00013", 3)
eAttr.addField("Air - 1.00028", 4)
eAttr.addField("Carbon Dioxide - 1.00045", 5)
eAttr.addField("Water - 1.3330", 6)
eAttr.addField("Acetone - 1.36", 7)
eAttr.addField("Ethanol - 1.361", 8)
eAttr.addField("Carbon Tetrachloride - 1.461", 9)
eAttr.addField("Glycerol - 1.4729", 10)
eAttr.addField("Benzene - 1.501", 11)
eAttr.addField("Silicone Oil - 1.52045", 12)
eAttr.addField("Bromine - 1.661", 13)
eAttr.addField("Water Ice - 1.31", 14)
eAttr.addField("Fused Quartz - 1.458", 15)
eAttr.addField("Pyrex - 1.470", 16)
eAttr.addField("Acrylic Glass - 1.49", 17)
eAttr.addField("Polypropylene - 1.49", 18)
eAttr.addField("BK7 - 1.5046", 19)
eAttr.addField("Sodium Chloride - 1.544", 20)
eAttr.addField("Amber - 1.55", 21)
eAttr.addField("Pet - 1.575", 22)
eAttr.addField("Diamond - 2.419", 23)
# Default to Bk7
eAttr.setDefault(19)
roughcoating.mIntIOR = nAttr.create("interiorIOR","intior", OpenMaya.MFnNumericData.kFloat, 1.5046)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
roughcoating.mExteriorMaterial = eAttr.create("exteriorMaterial", "extmat")
eAttr.setKeyable(1)
eAttr.setStorable(1)
eAttr.setReadable(1)
eAttr.setWritable(1)
eAttr.addField("Use Value", 0)
eAttr.addField("Vacuum - 1.0", 1)
eAttr.addField("Helum - 1.00004", 2)
eAttr.addField("Hydrogen - 1.00013", 3)
eAttr.addField("Air - 1.00028", 4)
eAttr.addField("Carbon Dioxide - 1.00045", 5)
eAttr.addField("Water - 1.3330", 6)
eAttr.addField("Acetone - 1.36", 7)
eAttr.addField("Ethanol - 1.361", 8)
eAttr.addField("Carbon Tetrachloride - 1.461", 9)
eAttr.addField("Glycerol - 1.4729", 10)
eAttr.addField("Benzene - 1.501", 11)
eAttr.addField("Silicone Oil - 1.52045", 12)
eAttr.addField("Bromine - 1.661", 13)
eAttr.addField("Water Ice - 1.31", 14)
eAttr.addField("Fused Quartz - 1.458", 15)
eAttr.addField("Pyrex - 1.470", 16)
eAttr.addField("Acrylic Glass - 1.49", 17)
eAttr.addField("Polypropylene - 1.49", 18)
eAttr.addField("BK7 - 1.5046", 19)
eAttr.addField("Sodium Chloride - 1.544", 20)
eAttr.addField("Amber - 1.55", 21)
eAttr.addField("Pet - 1.575", 22)
eAttr.addField("Diamond - 2.419", 23)
# Default to Air
eAttr.setDefault(4)
roughcoating.mExtIOR = nAttr.create("exteriorIOR","extior", OpenMaya.MFnNumericData.kFloat, 1.000277)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
roughcoating.mThickness = nAttr.create("thickness","th", OpenMaya.MFnNumericData.kFloat, 1.0)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
roughcoating.mSigmaA = nAttr.createColor("sigmaA", "sa")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(0.0,0.0,0.0)
roughcoating.mReflectance = nAttr.createColor("specularReflectance", "sr")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(1.0,1.0,1.0)
roughcoating.mBSDF = nAttr.createColor("bsdf", "bsdf")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(0.0,0.0,0.0)
roughcoating.mOutColor = nAttr.createColor("outColor", "oc")
nAttr.setStorable(0)
nAttr.setHidden(0)
nAttr.setReadable(1)
nAttr.setWritable(0)
except:
sys.stderr.write("Failed to create attributes\n")
raise
try:
roughcoating.addAttribute(roughcoating.mDistribution)
roughcoating.addAttribute(roughcoating.mAlpha)
roughcoating.addAttribute(roughcoating.mThickness)
roughcoating.addAttribute(roughcoating.mSigmaA)
roughcoating.addAttribute(roughcoating.mReflectance)
roughcoating.addAttribute(roughcoating.mInteriorMaterial)
roughcoating.addAttribute(roughcoating.mIntIOR)
roughcoating.addAttribute(roughcoating.mExteriorMaterial)
roughcoating.addAttribute(roughcoating.mExtIOR)
roughcoating.addAttribute(roughcoating.mBSDF)
roughcoating.addAttribute(roughcoating.mOutColor)
except:
sys.stderr.write("Failed to add attributes\n")
raise
try:
roughcoating.attributeAffects(roughcoating.mBSDF, roughcoating.mOutColor)
except:
sys.stderr.write("Failed in setting attributeAffects\n")
raise
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.registerNode( kPluginNodeName, kPluginNodeId, nodeCreator,
nodeInitializer, OpenMayaMPx.MPxNode.kDependNode, kPluginNodeClassify )
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeName )
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode( kPluginNodeId )
except:
sys.stderr.write( "Failed to deregister node: %s" % kPluginNodeName )
raise
|
hpd/MitsubaForMaya
|
plug-ins/mitsuba/materials/roughcoating.py
|
Python
|
mit
| 7,843
|
[
"Amber"
] |
3593436990da693a49b9ebc6ace50467664f09d254b1b7929b63f1dc27d421ed
|
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit https://code.google.com/p/feedparser/ for the latest version
Visit http://packages.python.org/feedparser/ for the latest documentation
Required: Python 2.4 or later
Recommended: iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.1.2"
__license__ = """
Copyright (c) 2010-2012 Kurt McKee <contactme@kurtmckee.org>
Copyright (c) 2002-2008 Mark Pilgrim
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>"]
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# If you want feedparser to automatically parse microformat content embedded
# in entry contents, set this to 1
PARSE_MICROFORMATS = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
try:
# Python 3.1 introduces bytes.maketrans and simultaneously
# deprecates string.maketrans; use bytes.maketrans if possible
_maketrans = bytes.maketrans
except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
try:
if bytes is str:
# In Python 2.5 and below, bytes doesn't exist (NameError)
# In Python 2.6 and above, bytes and str are the same type
raise NameError
except NameError:
# Python 2
def _s2bytes(s):
return s
def _l2bytes(l):
return ''.join(map(chr, l))
else:
# Python 3
def _s2bytes(s):
return bytes(s, 'utf8')
def _l2bytes(l):
return bytes(l)
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import cgi
import codecs
import copy
import datetime
import re
import struct
import time
import types
import urllib
import urllib2
import urlparse
import warnings
from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
from xml.sax.saxutils import escape as _xmlescape
except ImportError:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
else:
try:
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
except xml.sax.SAXReaderNotAvailable:
_XML_AVAILABLE = 0
else:
_XML_AVAILABLE = 1
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing, content santizing, and
# microformat support (at least while feedparser depends on BeautifulSoup).
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
# iconv_codec provides support for more character encodings.
# It's available from http://cjkpython.i18n.org/
try:
import iconv_codec
except ImportError:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
except ImportError:
chardet = None
# BeautifulSoup is used to extract microformat content from HTML
# feedparser is tested using BeautifulSoup 3.2.0
# http://www.crummy.com/software/BeautifulSoup/
try:
import BeautifulSoup
except ImportError:
BeautifulSoup = None
PARSE_MICROFORMATS = False
try:
# the utf_32 codec was introduced in Python 2.6; it's necessary to
# check this as long as feedparser supports Python 2.4 and 2.5
codecs.lookup('utf_32')
except LookupError:
_UTF32_AVAILABLE = False
else:
_UTF32_AVAILABLE = True
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
SUPPORTED_VERSIONS = {'': u'unknown',
'rss090': u'RSS 0.90',
'rss091n': u'RSS 0.91 (Netscape)',
'rss091u': u'RSS 0.91 (Userland)',
'rss092': u'RSS 0.92',
'rss093': u'RSS 0.93',
'rss094': u'RSS 0.94',
'rss20': u'RSS 2.0',
'rss10': u'RSS 1.0',
'rss': u'RSS (unknown version)',
'atom01': u'Atom 0.1',
'atom02': u'Atom 0.2',
'atom03': u'Atom 0.3',
'atom10': u'Atom 1.0',
'atom': u'Atom (unknown version)',
'cdf': u'CDF',
}
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError, "object doesn't have key 'category'"
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']==u'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if not dict.__contains__(self, 'updated') and \
dict.__contains__(self, 'published'):
#warnings.warn("To avoid breaking existing software while "
# "fixing issue 310, a temporary mapping has been created "
# "from `updated` to `published` if `updated` doesn't "
# "exist. This fallback will be removed in a future version "
# "of feedparser.", DeprecationWarning)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if not dict.__contains__(self, 'updated_parsed') and \
dict.__contains__(self, 'published_parsed'):
#warnings.warn("To avoid breaking existing software while "
# "fixing issue 310, a temporary mapping has been created "
# "from `updated_parsed` to `published_parsed` if "
# "`updated_parsed` doesn't exist. This fallback will be "
# "removed in a future version of feedparser.",
# DeprecationWarning)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError, "object has no attribute '%s'" % key
def __hash__(self):
return id(self)
_cp1252 = {
128: unichr(8364), # euro sign
130: unichr(8218), # single low-9 quotation mark
131: unichr( 402), # latin small letter f with hook
132: unichr(8222), # double low-9 quotation mark
133: unichr(8230), # horizontal ellipsis
134: unichr(8224), # dagger
135: unichr(8225), # double dagger
136: unichr( 710), # modifier letter circumflex accent
137: unichr(8240), # per mille sign
138: unichr( 352), # latin capital letter s with caron
139: unichr(8249), # single left-pointing angle quotation mark
140: unichr( 338), # latin capital ligature oe
142: unichr( 381), # latin capital letter z with caron
145: unichr(8216), # left single quotation mark
146: unichr(8217), # right single quotation mark
147: unichr(8220), # left double quotation mark
148: unichr(8221), # right double quotation mark
149: unichr(8226), # bullet
150: unichr(8211), # en dash
151: unichr(8212), # em dash
152: unichr( 732), # small tilde
153: unichr(8482), # trade mark sign
154: unichr( 353), # latin small letter s with caron
155: unichr(8250), # single right-pointing angle quotation mark
156: unichr( 339), # latin small ligature oe
158: unichr( 382), # latin small letter z with caron
159: unichr( 376), # latin capital letter y with diaeresis
}
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
#try:
if not isinstance(uri, unicode):
uri = uri.decode('utf-8', 'ignore')
uri = urlparse.urljoin(base, uri)
if not isinstance(uri, unicode):
return uri.decode('utf-8', 'ignore')
return uri
#except:
# uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
# return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
}
_matchnamespaces = {}
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
html_types = [u'text/html', u'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or u''
self.lang = baselang or None
self.svgOK = 0
self.title_depth = -1
self.depth = 0
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
v = v.replace('&', '&')
if not isinstance(v, unicode):
v = v.decode('utf-8')
return (k, v)
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = map(self._normalize_attributes, attrs)
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if not isinstance(baseuri, unicode):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == u'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = u'text/plain'
elif contentType == 'html':
contentType = u'text/html'
elif contentType == 'xhtml':
contentType = u'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = u'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = u'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = u'atom10'
if loweruri.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = u'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or u'', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, unicode):
pieces[i] = v.decode('utf-8')
output = u''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = u'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if PARSE_MICROFORMATS and is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
if self.encoding and not isinstance(output, unicode):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if isinstance(output, unicode):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)):
return
# all entities must have been defined as valid HTML entities
if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith(u'text/'):
return 0
if self.contentparams['type'].endswith(u'+xml'):
return 0
if self.contentparams['type'].endswith(u'/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': u'rss091u',
'0.92': u'rss092',
'0.93': u'rss093',
'0.94': u'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith(u'rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = u'rss20'
else:
self.version = u'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': u'atom01',
'0.2': u'atom02',
'0.3': u'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = u'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = u'%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, u'')
author = author.replace(u'()', u'')
author = author.replace(u'<>', u'')
author = author.replace(u'<>', u'')
author = author.strip()
if author and (author[0] == u'('):
author = author[1:]
if author and (author[-1] == u')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, u'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, u'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._addTag(term.strip(), u'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', u'alternate')
if attrsD['rel'] == u'self':
attrsD.setdefault('type', u'application/atom+xml')
else:
attrsD.setdefault('type', u'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if 'href' in attrsD:
expectingText = 0
if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
self.title_depth = self.depth
_end_dc_title = _end_title
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_description(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, u'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = u'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, u'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, u'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
elif attrsD.get('url'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = u'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
])
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
goahead.func_code = sgmllib.SGMLParser.goahead.func_code
def __parse_starttag(self, i):
pass
__parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + u'_INVALID_PYTHON_3'
except NameError:
if self.encoding and isinstance(data, unicode):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if not isinstance(value, unicode):
value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs = strattrs.encode(self.encoding)
except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if ref in name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', u'xml').endswith(u'xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = set(['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'])
known_binary_extensions = set(['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'])
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if isinstance(data, unicode):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if isinstance(s, basestring):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)}
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple:
return []
elif iPropertyType == self.STRING:
return ''
elif iPropertyType == self.DATE:
return None
elif iPropertyType == self.URI:
return ''
elif iPropertyType == self.NODE:
return None
else:
return None
arValues = []
for elmResult in arResults:
sValue = None
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a':
sValue = elmResult.get('href')
elif sNodeName == 'img':
sValue = elmResult.get('src')
elif sNodeName == 'object':
sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or u''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
# Completely remove the agent element from the parse tree
elmAgent.extract()
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard']
# XXX - this is super ugly; properly fix this with issue 148
for i, s in enumerate(arLines):
if not isinstance(s, unicode):
arLines[i] = s.decode('utf-8', 'ignore')
sVCards += u'\n'.join(arLines) + u'\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if 'href' not in attrsD:
return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1:
return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href:
continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
if segments:
tag = segments.pop()
else:
# there are no tags
continue
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', u'')) and not self.isProbablyDownloadable(elm):
continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', u'').split()
xfn_rels = [r for r in rels if r in self.known_xfn_relationships]
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup:
return
try:
p = _MicroformatsParser(htmlSource, baseURI, encoding)
except UnicodeEncodeError:
# sgmllib throws this exception when performing lookups of tags
# with non-ASCII characters in them.
return
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
try:
return _urljoin(base, rel or u'')
except ValueError:
return u''
if not base:
return rel or u''
if not rel:
try:
scheme = urlparse.urlparse(base)[0]
except ValueError:
return u''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
try:
uri = _urljoin(base, rel)
except ValueError:
return u''
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
'xml:lang'])
unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
acceptable_css_properties = set(['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width'])
# survey of common keywords found in feeds
acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow'])
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics'])
mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use'])
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan'])
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity'])
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == u'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = isinstance(data, unicode)
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
code, msg, hdrs)
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib2.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
if base64 is None or 'Authorization' not in req.headers \
or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = _base64decode(req.headers['Authorization'].split(' ')[1])
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if isinstance(url_file_stream_or_string, basestring) \
and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
if isinstance(url_file_stream_or_string, unicode):
url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except (IOError, UnicodeEncodeError, TypeError):
# if url_file_stream_or_string is a unicode object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
# treat url_file_stream_or_string as string
if isinstance(url_file_stream_or_string, unicode):
return _StringIO(url_file_stream_or_string.encode('utf-8'))
return _StringIO(url_file_stream_or_string)
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = u''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, basestring):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
# Modified to also support MSSQL-style datetimes as defined at:
# http://msdn.microsoft.com/en-us/library/ms186724.aspx
# (which basically means allowing a space as a date/time/timezone separator)
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?'
'|(?P<julian>\d\d\d)))?')
__tzd_re = ' ?(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)?'
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?'
+ __tzd_re)
__datetime_re = '%s(?:[T ]%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString):
return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0:
return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
# Define the strings used by the RFC822 datetime parser
_rfc822_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
_rfc822_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# Only the first three letters of the month name matter
_rfc822_month = "(?P<month>%s)(?:[a-z]*,?)" % ('|'.join(_rfc822_months))
# The year may be 2 or 4 digits; capture the century if it exists
_rfc822_year = "(?P<year>(?:\d{2})?\d{2})"
_rfc822_day = "(?P<day> *\d{1,2})"
_rfc822_date = "%s %s %s" % (_rfc822_day, _rfc822_month, _rfc822_year)
_rfc822_hour = "(?P<hour>\d{2}):(?P<minute>\d{2})(?::(?P<second>\d{2}))?"
_rfc822_tz = "(?P<tz>ut|gmt(?:[+-]\d{2}:\d{2})?|[aecmp][sd]?t|[zamny]|[+-]\d{4})"
_rfc822_tznames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
# The timezone may be prefixed by 'Etc/'
_rfc822_time = "%s (?:etc/)?%s" % (_rfc822_hour, _rfc822_tz)
_rfc822_dayname = "(?P<dayname>%s)" % ('|'.join(_rfc822_daynames))
_rfc822_match = re.compile(
"(?:%s, )?%s(?: %s)?" % (_rfc822_dayname, _rfc822_date, _rfc822_time)
).match
def _parse_date_rfc822(dt):
"""Parse RFC 822 dates and times, with one minor
difference: years may be 4DIGIT or 2DIGIT.
http://tools.ietf.org/html/rfc822#section-5"""
try:
m = _rfc822_match(dt.lower()).groupdict(0)
except AttributeError:
return None
# Calculate a date and timestamp
for k in ('year', 'day', 'hour', 'minute', 'second'):
m[k] = int(m[k])
m['month'] = _rfc822_months.index(m['month']) + 1
# If the year is 2 digits, assume everything in the 90's is the 1990's
if m['year'] < 100:
m['year'] += (1900, 2000)[m['year'] < 90]
stamp = datetime.datetime(*[m[i] for i in
('year', 'month', 'day', 'hour', 'minute', 'second')])
# Use the timezone information to calculate the difference between
# the given date and timestamp and Universal Coordinated Time
tzhour = 0
tzmin = 0
if m['tz'] and m['tz'].startswith('gmt'):
# Handle GMT and GMT+hh:mm timezone syntax (the trailing
# timezone info will be handled by the next `if` block)
m['tz'] = ''.join(m['tz'][3:].split(':')) or 'gmt'
if not m['tz']:
pass
elif m['tz'].startswith('+'):
tzhour = int(m['tz'][1:3])
tzmin = int(m['tz'][3:])
elif m['tz'].startswith('-'):
tzhour = int(m['tz'][1:3]) * -1
tzmin = int(m['tz'][3:]) * -1
else:
tzhour = _rfc822_tznames[m['tz']]
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in UTC
return (stamp - delta).utctimetuple()
registerDateHandler(_parse_date_rfc822)
def _parse_date_asctime(dt):
"""Parse asctime-style dates"""
dayname, month, day, remainder = dt.split(None, 3)
# Convert month and day into zero-padded integers
month = '%02i ' % (_rfc822_months.index(month.lower()) + 1)
day = '%02i ' % (int(day),)
dt = month + day + remainder
return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, )
registerDateHandler(_parse_date_asctime)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). iconv_codec can help a lot;
you should definitely install it if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
charset = params.get('charset', '').replace("'", "")
if not isinstance(charset, unicode):
charset = charset.decode('utf-8', 'ignore')
return content_type, charset
sniffed_xml_encoding = u''
xml_encoding = u''
true_encoding = u''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == _l2bytes([0x4c, 0x6f, 0xa7, 0x94]):
# In all forms of EBCDIC, these four bytes correspond
# to the string '<?xm'; try decoding using CP037
sniffed_xml_encoding = u'cp037'
xml_data = xml_data.decode('cp037').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x00, 0x3c, 0x00, 0x3f]):
# UTF-16BE
sniffed_xml_encoding = u'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xfe, 0xff])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])):
# UTF-16BE with BOM
sniffed_xml_encoding = u'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x3f, 0x00]):
# UTF-16LE
sniffed_xml_encoding = u'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xff, 0xfe])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])):
# UTF-16LE with BOM
sniffed_xml_encoding = u'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x00, 0x00, 0x00, 0x3c]):
# UTF-32BE
sniffed_xml_encoding = u'utf-32be'
if _UTF32_AVAILABLE:
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x00, 0x00]):
# UTF-32LE
sniffed_xml_encoding = u'utf-32le'
if _UTF32_AVAILABLE:
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]):
# UTF-32BE with BOM
sniffed_xml_encoding = u'utf-32be'
if _UTF32_AVAILABLE:
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]):
# UTF-32LE with BOM
sniffed_xml_encoding = u'utf-32le'
if _UTF32_AVAILABLE:
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == _l2bytes([0xef, 0xbb, 0xbf]):
# UTF-8 with BOM
sniffed_xml_encoding = u'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')).match(xml_data)
except UnicodeDecodeError:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
if sniffed_xml_encoding and (xml_encoding in (u'iso-10646-ucs-2', u'ucs-2', u'csunicode', u'iso-10646-ucs-4', u'ucs-4', u'csucs4', u'utf-16', u'utf-32', u'utf_16', u'utf_32', u'utf16', u'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = (u'application/xml', u'application/xml-dtd', u'application/xml-external-parsed-entity')
text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith(u'application/') and http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or u'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith(u'text/')) and http_content_type.endswith(u'+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or u'us-ascii'
elif http_content_type.startswith(u'text/'):
true_encoding = http_encoding or u'us-ascii'
elif http_headers and 'content-type' not in http_headers:
true_encoding = xml_encoding or u'iso-8859-1'
else:
true_encoding = xml_encoding or u'utf-8'
# some feeds claim to be gb2312 but are actually gb18030.
# apparently MSIE and Firefox both do the following switch:
if true_encoding.lower() == u'gb2312':
true_encoding = u'gb18030'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == _l2bytes([0xfe, 0xff])) and (data[2:4] != _l2bytes([0x00, 0x00])):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == _l2bytes([0xff, 0xfe])) and (data[2:4] != _l2bytes([0x00, 0x00])):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == _l2bytes([0xef, 0xbb, 0xbf]):
encoding = 'utf-8'
data = data[3:]
elif data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]):
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]):
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head,data = data[:start+1], data[start+1:]
entity_pattern = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
entity_results=entity_pattern.findall(head)
head = entity_pattern.sub(_s2bytes(''), head)
doctype_pattern = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
doctype_results = doctype_pattern.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if doctype.lower().count(_s2bytes('netscape')):
version = u'rss091n'
else:
version = None
# only allow in 'safe' inline entity definitions
replacement=_s2bytes('')
if len(doctype_results)==1 and entity_results:
safe_pattern=re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
if safe_entities:
replacement=_s2bytes('<!DOCTYPE feed [\n <!ENTITY') + _s2bytes('>\n <!ENTITY ').join(safe_entities) + _s2bytes('>\n]>')
data = doctype_pattern.sub(replacement, head) + data
return version, data, dict(replacement and [(k.decode('utf-8'), v.decode('utf-8')) for k, v in safe_pattern.findall(replacement)])
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
if handlers is None:
handlers = []
if request_headers is None:
request_headers = {}
if response_headers is None:
response_headers = {}
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# lowercase all of the HTTP headers for comparisons per RFC 2616
if 'headers' in result:
http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
else:
http_headers = {}
# if feed is gzip-compressed, decompress it
if f and data and http_headers:
if gzip and 'gzip' in http_headers.get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except (IOError, struct.error), e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = 1
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error, e:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error, e:
result['bozo'] = 1
result['bozo_exception'] = e
# save HTTP headers
if http_headers:
if 'etag' in http_headers:
etag = http_headers.get('etag', u'')
if not isinstance(etag, unicode):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in http_headers:
modified = http_headers.get('last-modified', u'')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if hasattr(f, 'url'):
if not isinstance(f.url, unicode):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
if data is None:
return result
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
# ensure that baseuri is an absolute uri using an acceptable URI scheme
contentloc = http_headers.get('content-location', u'')
href = result.get('href', u'')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', None)
if not isinstance(baselang, unicode) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
# if server sent 304, we're done
if getattr(f, 'code', 0) == 304:
result['version'] = u''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if data is None:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = use_strict_parser = 1
break
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
proposed_encoding = unicode(chardet.detect(data)['encoding'], 'ascii', 'ignore')
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = use_strict_parser = 1
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and (u'utf-8' not in tried_encodings):
proposed_encoding = u'utf-8'
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except UnicodeDecodeError:
pass
else:
known_encoding = use_strict_parser = 1
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and (u'windows-1252' not in tried_encodings):
proposed_encoding = u'windows-1252'
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except UnicodeDecodeError:
pass
else:
known_encoding = use_strict_parser = 1
# if still no luck and we haven't tried iso-8859-2 yet, try that.
if (not known_encoding) and (u'iso-8859-2' not in tried_encodings):
proposed_encoding = u'iso-8859-2'
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except UnicodeDecodeError:
pass
else:
known_encoding = use_strict_parser = 1
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = u''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'document declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
result['version'], data, entities = _stripDoctype(data)
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXParseException, e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser and _SGML_AVAILABLE:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
|
boompieman/iim_project
|
project_python2/lib/python2.7/site-packages/pattern/web/feed/feedparser.py
|
Python
|
gpl-3.0
| 167,911
|
[
"NetCDF",
"VisIt"
] |
1c369a00338df4c6fbfd5b9c0aafb638b857c4f355cdac46a0f17a44256f6549
|
import hashlib
import numpy as np
from cryspy import numbers as nb
from cryspy import geo as geo
from cryspy import blockprint as bp
from cryspy import tables
class Drawable():
def __init__(self, name, pos):
assert isinstance(name, str), \
"First argument of crystal.Drawable.__init__(...) must be " \
"of type str."
assert isinstance(pos, geo.Pos), \
"Second argument of crystal.Drawable.__init__(...) must be " \
"of type geo.Pos."
self.name = name
self.pos = pos
self.has_color = False
self.color = None
def set_color(self, color):
assert isinstance(color, tuple), \
"Argument of crystal.Momentum.set_color(color) must be of type " \
" tuple."
assert (len(color) == 3), \
"Argument of crystal.Momentum.set_color(color) must be of type "\
"tuple and must have three items."
for item in color:
assert isinstance(item, float) or isinstance(item, int) \
or isinstance(item, nb.Mixed), \
"Argument of crystal.Momentum.set_color(color) must be of " \
"type tuple with three numbers in it."
self.has_color = True
self.color = (float(color[0]), float(color[1]), float(color[2]))
class Atom(Drawable):
def __init__(self, name, typ, pos):
assert isinstance(name, str), \
"First argument must be of type str."
assert isinstance(typ, str), \
"Second argument must be of type str."
assert isinstance(pos, geo.Pos), \
"Third argument must be of type Pos."
Drawable.__init__(self, name, pos)
self.typ = typ
self.pos = pos
def __str__(self):
return bp.block([["Atom", " " + self.name,
" " + self.typ, " " + self.pos.__str__()], ])
def __eq__(self, right):
return hash(self) == hash(right)
def __add__(self, right):
if isinstance(right, geo.Dif):
return Atom(self.name, self.typ, self.pos + right)
elif isinstance(right, str):
return Atom(self.name + right, self.typ, self.pos)
else:
return NotImplemented
def __rpow__(self, left):
assert isinstance(left, geo.Operator) \
or isinstance(left, geo.Coset), \
"I cannot apply an object of type %s " \
"to an object of type Atom." % (type(left))
return Atom(self.name, self.typ, left ** self.pos)
def __mod__(self, right):
assert isinstance(right, geo.Transgen), \
"I cannot take an object of type Atom " \
"modulo an object of type %s" % (type(right))
return Atom(self.name, self.typ, self.pos % right)
def __hash__(self):
string = "atom%s%i" % (
self.typ,
hash(self.pos))
return int(hashlib.sha1(string.encode()).hexdigest(), 16)
class Momentum(Drawable):
def __init__(self, name, pos, direction):
assert isinstance(pos, geo.Pos), \
"First argument of crystal.Momentum(pos, dir) must be of type " \
" geo.Pos ."
assert isinstance(direction, geo.Dif), \
"Second argument of crystal.Momentum(pos, dir) must be of type" \
" geo.Dif ."
Drawable.__init__(self, name, pos)
self.direction = direction
self.has_plotlength = False
self.plotlength = None
def set_plotlength(self, plotlength):
assert isinstance(plotlength, float) or isinstance(plotlength, int) \
or isinstance(plotlength, nb.Mixed), \
"Argument of crystal.Momentum.set_plotlength(plotlength) must " \
"be of type float or int or numbers.Mixed."
self.has_plotlength = True
self.plotlength = float(plotlength)
def __str__(self):
return "Momentum"
def __eq__(self, right):
if isinstance(right, Momentum):
if (self.pos == right.pos) and (self.direction == right.direction):
return True
else:
return False
else:
return False
def __add__(self, right):
if isinstance(right, geo.Dif):
result = Momentum(self.name, self.pos + right, self.direction)
if self.has_color:
result.set_color(self.color)
if self.has_plotlength:
result.set_plotlength(self.plotlength)
return result
elif isinstance(right, str):
return Momentum(self.name + right, self.pos, self.direction)
else:
return NotImplemented
def __rpow__(self, left):
if isinstance(left, geo.Operator) \
or isinstance(left, geo.Coset):
result = Momentum(self.name, left ** self.pos, self.direction)
if self.has_color:
result.set_color(self.color)
if self.has_plotlength:
result.set_plotlength(self.plotlength)
return result
else:
return NotImplemented
def __mod__(self, right):
assert isinstance(right, geo.Transgen), \
"I cannot take an object of type Atom " \
"modulo an object of type %s" % (type(right))
return Momentum(self.name, self.pos % right, self.direction)
def __hash__(self):
string = "momentum%i,%i" \
% (hash(self.pos), hash(self.direction))
return int(hashlib.sha1(string.encode()).hexdigest(), 16)
class Bond(Drawable):
def __init__(self, name, start, target):
assert isinstance(start, geo.Pos), \
"The start position of an object of type Bond " \
"must be of type geo.Position."
assert isinstance(target, geo.Pos), \
"The target position of an object of type Bond " \
"must be of type geo.Position."
Drawable.__init__(self, name, geo.centre_of_gravity([start, target]))
self.start = start
self.target = target
self.has_thickness = False
self.thickness = None
def set_color(self, color):
assert isinstance(color, tuple), \
"Argument of crystal.Bond.set_color(color) must be of type " \
" tuple."
assert (len(color) == 3), \
"Argument of crystal.Bond.set_color(color) must be of type "\
"tuple and must have three items."
for item in color:
assert isinstance(item, float) or isinstance(item, int) \
or isinstance(item, nb.Mixed), \
"Argument of crystal.Bond.set_color(color) must be of " \
"type tuple with three numbers in it."
self.has_color = True
self.color = (float(color[0]), float(color[1]), float(color[2]))
def set_thickness(self, thickness):
assert isinstance(thickness, float) or isinstance(thickness, int) \
or isinstance(thickness, nb.Mixed), \
"Argument of crystal.Bond.set_thickness(...) must be of type " \
"float, int or Mixed."
self.thickness = thickness
self.has_thickness = True
def __str__(self):
return "Bond"
def __eq__(self, right):
if isinstance(right, Bond):
if (self.start == right.start) and (self.target == right.target) \
or (self.start == right.target) and (self.target == right.start):
return True
else:
return False
else:
return False
def __add__(self, right):
if isinstance(right, geo.Dif):
result = Bond(self.name, self.start + right, self.target + right)
if self.has_color:
result.set_color(self.color)
if self.has_thickness:
result.set_thickness(self.thickness)
return result
elif isinstance(right, str):
result = Bond(self.name + right, self.start, self.target)
if self.has_color:
result.set_color(self.color)
if self.has_thickness:
result.set_thickness(self.thickness)
return result
else:
return NotImplemented
def __rpow__(self, left):
if isinstance(left, geo.Operator) \
or isinstance(left, geo.Coset):
if isinstance(left, geo.Coset):
correct_centre = left ** self.pos
wrong_centre = left.symmetry ** self.pos
correction = correct_centre - wrong_centre
print(correction)
result = Bond(
self.name,
left.symmetry ** self.start + correction,
left.symmetry ** self.target + correction
)
else:
result = Bond(self.name, left ** self.start, left ** self.target)
if self.has_color:
result.set_color(self.color)
if self.has_thickness:
result.set_thickness(self.thickness)
return result
else:
return NotImplemented
def __mod__(self, right):
assert isinstance(right, geo.Transgen), \
"Cannot apply object of type %s to object of type " \
"cryspy.crystal.Bond."%(str(type(left)))
if isinstance(right, geo.Transgen):
pos_new = self.pos % right
correct = (pos_new - self.pos).to_Symmetry()
new_bond = Bond(
self.name, correct ** self.start, correct ** self.target
)
if self.has_color:
new_bond.set_color(self.color)
if self.has_thickness:
new_bond.set_thickness(self.thickness)
return new_bond
def __hash__(self):
# The order of start and target does not matter.
# This is why I hash the sum and the product of the hashes.
string = "bond%i,%i" \
% (hash(self.start)+hash(self.target),
hash(self.start) * hash(self.target))
return int(hashlib.sha1(string.encode()).hexdigest(), 16)
class Face(Drawable):
def __init__(self, name, corners):
assert isinstance(corners, list), \
"Face must be created by a list of objects of type geo.Pos."
for corner in corners:
assert isinstance(corner, geo.Pos), \
"Face must be created by a list of objects of type geo.Pos."
Drawable.__init__(self, name, geo.centre_of_gravity(corners))
self.corners = corners
self.has_opacity = False
self.opacity = None
def set_color(self, color):
assert isinstance(color, tuple), \
"Argument of crystal.Bond.set_color(color) must be of type " \
" tuple."
assert (len(color) == 3), \
"Argument of crystal.Bond.set_color(color) must be of type "\
"tuple and must have three items."
for item in color:
assert isinstance(item, float) or isinstance(item, int) \
or isinstance(item, nb.Mixed), \
"Argument of crystal.Bond.set_color(color) must be of " \
"type tuple with three numbers in it."
self.has_color = True
self.color = (float(color[0]), float(color[1]), float(color[2]))
def set_opacity(self, opacity):
assert isinstance(opacity, nb.Mixed) or isinstance(opacity, float) \
or isinstance(opacity, int), \
"Opacity must be a number."
assert 0 <= float(opacity) <= 1, \
"Opacity must be between 0 and 1."
self.has_opacity = True
self.opacity = opacity
def flip(self):
# Flips the orientation, i.e. reverses the order of the corners.
liste = []
for corner in self.corners:
liste.append(corner)
n = len(liste)
liste = [liste[n-i-1] for i in range(n)]
result = Face(self.name, liste)
if self.has_color:
result.set_color(self.color)
if self.has_opacity:
result.set_opacity(self.opacity)
return result
def __str__(self):
return "Face"
def __eq__(self, right):
return hash(self) == hash(right)
def __add__(self, right):
if isinstance(right, geo.Dif):
liste = []
for corner in self.corners:
liste.append(corner + right)
result = Face(self.name, liste)
if self.has_color:
result.set_color(self.color)
if self.has_opacity:
result.set_opacity(self.opacity)
return result
elif isinstance(right, str):
return Face(self.name + right, self.corners)
else:
return NotImplemented
def __rpow__(self, left):
if isinstance(left, geo.Operator) \
or isinstance(left, geo.Coset):
must_flip = False
if isinstance(left, geo.Operator):
if float(left.value.det()) < 0:
must_flip = True
result = Face(self.name, [left ** corner for corner in self.corners])
elif isinstance(left, geo.Coset):
if float(left.symmetry.value.det()) < 0:
must_flip = True
correct = (left**self.pos - left.symmetry**self.pos).to_Symmetry()
result = Face(
self.name,
[correct ** (left.symmetry ** corner)
for corner in self.corners]
)
if self.has_color:
result.set_color(self.color)
if self.has_opacity:
result.set_opacity(self.opacity)
if must_flip:
result = result.flip()
return result
else:
return NotImplemented
def __hash__(self):
summe = 0
sum_of_products = 0
for i in range(len(self.corners) - 1):
summe += hash(self.corners[i])
sum_of_products += hash(self.corners[i]) * hash(self.corners[i+1])**2
summe += hash(self.corners[-1])
sum_of_products += hash(self.corners[-1]) * hash(self.corners[0])**2
string = "face%i,%i" % \
(summe, sum_of_products)
return int(hashlib.sha1(string.encode()).hexdigest(), 16)
class Bitmapface(Drawable):
def __init__(self, name, southwest, southeast, northwest, northeast, bitmap, format):
# Corners:
# northwest northeast
# ----------------------
# | |
# | bitmap 1. index |
# | ^ |
# | | |
# | -> bitmap 0. index |
# ----------------------
# southwest southeast
#
# Parameter format must be "RGBA", but in future more possibilities
# can be implemented.
#
# 3. index of bitmap depends on format. Eg. for format = "RGBA" the
# index counts through red, green, blue, opacity.
assert isinstance(name, str), \
"Error: First argument for creating cryspy.crystal.Bitmapface must " \
"be of type str ."
assert isinstance(southwest, geo.Pos) \
and isinstance(southeast, geo.Pos) \
and isinstance(northwest, geo.Pos) \
and isinstance(northeast, geo.Pos), \
"Error: 2. to 5. arguments for creating cryspy.crystal.Bitmapface must be " \
"of type cryspy.geo.Pos ."
assert isinstance(bitmap, np.ndarray), \
"Error: 6. argument for creating cryspy.crystal.Bitmapface must be " \
"of type numpy.array ."
assert len(bitmap.shape) == 3, \
"Error: 6. argument for creating cryspy.crystal.Bitmapface must be " \
"of type numpy.array with three indices ."
assert isinstance(format, str), \
"Error: 7. argument for creating cryspy.crystal.Bitmapface must be " \
"of type str ."
assert format in ["RGBA"], \
"Error: 7. argument for creating cryspy.crystal.Bitmapface must be " \
"the string 'RGBA' ."
if format == "RGBA":
assert bitmap.shape[2] == 4, \
"Error creating an object of type cryspy.crystal.Bitmapface:\n" \
"In the case format = 'RGBA', bitmap must be " \
"of shape (*, *, 4) ."
self.name = name
self.southwest = southwest
self.southeast = southeast
self.northwest = northwest
self.northeast = northeast
self.bitmap = bitmap
self.format = format
class Atomset():
def __init__(self, menge):
assert isinstance(menge, set), \
"Argument must be of type set."
for item in menge:
assert isinstance(item, Atom) or isinstance(item, Momentum) \
or isinstance(item, Bond) or isinstance(item, Face) \
or isinstance(item, Subset) or isinstance(item, Bitmapface), \
"Argument must be a set of "\
"objects of type Atom, Momentum, Bond or Face."
self.menge = menge
self.names = set([])
for item in menge:
self.names.add(item.name)
def __eq__(self, right):
if isinstance(right, Atomset):
return (self.menge == right.menge)
else:
return False
def __str__(self):
# The Atoms are printed in alphabetically order with regard to
# the name, and if name is equal, with regard to the type.
strings = [["Atomset\n"
"-------"], ]
liste = [atom for atom in self.menge]
atomliste = []
momentumliste = []
bondliste = []
faceliste = []
subsetliste = []
for item in self.menge:
if isinstance(item, Atom):
atomliste.append(item)
elif isinstance(item, Momentum):
momentumliste.append(item)
elif isinstance(item, Bond):
bondliste.append(item)
elif isinstance(item, Face):
faceliste.append(item)
elif isinstance(item, Subset):
subsetliste.append(item)
types = [atom.typ for atom in atomliste]
indexes = [i for (j, i) in sorted(zip(types, range(len(atomliste))))]
names = [atomliste[i].name for i in indexes]
indexes = [i for (j, i) in sorted(zip(names, indexes))]
print(indexes)
for i in indexes:
strings.append(["", atomliste[i].__str__()])
strings.append([""])
for momentum in momentumliste:
strings.append(["", str(momentum)])
for bond in bondliste:
strings.append(["", str(bond)])
for face in faceliste:
strings.append(["", str(face)])
for subset in subsetliste:
strings.append(["", str(subset)])
return bp.block(strings)
def add(self, item):
if not (item in self.menge):
self.menge.add(item)
self.names.add(item.name)
def __add__(self, right):
if isinstance(right, geo.Dif):
return Atomset({atom + right for atom in self.menge})
elif isinstance(right, str):
menge = set([])
for item in self.menge:
if isinstance(item, Atom):
menge.add(item + right)
else:
menge.add(item)
return Atomset(menge)
elif isinstance(right, Atomset):
return Atomset(self.menge.union(right.menge))
else:
return NotImplemented
def __radd__(self, left):
if isinstance(left, geo.Dif):
return self + left
else:
return NotImplemented
def __rpow__(self, left):
assert isinstance(left, geo.Operator) \
or isinstance(left, geo.Spacegroup), \
"Argument must be of type Operator."
if isinstance(left, geo.Operator):
menge = set([])
return Atomset({left ** item for item in self.menge})
if isinstance(left, geo.Spacegroup):
atomset = Atomset(set([]))
for item in self.menge:
for coset in left.liste_cosets:
new_item = coset ** item
new_item.name = atomset.nextname(new_item.name)
atomset.add(new_item)
return atomset
def nextname(self, name):
if name in self.names:
words = name.split('_')
if words[-1].isdigit():
return self.nextname('_'.join(words[:-1] + [str(int(words[-1])+1)]))
else:
return self.nextname(name + '_1')
else:
return name
def __mod__(self, right):
assert isinstance(right, geo.Transgen), \
"I cannot take an object of type Atomset " \
"modulo an object of type" % (type(right))
atoms = set([])
for atom in self.menge:
atoms |= set([atom % right])
return Atomset(atoms)
def get_atom(self, atomname):
for atom in self.menge:
if atom.name == atomname:
return atom
return None
def unpack_subsets(self):
menge_new = set([])
for item in self.menge:
if not isinstance(item, Subset):
menge_new.add(item)
else:
for subitem in item.atomset.menge:
subitem.name = item.name + '.' + subitem.name
menge_new.add(subitem)
return Atomset(menge_new)
class Subset(Drawable):
def __init__(self, name, pos, menge):
assert isinstance(name, str), \
"First argument must be of type str."
assert isinstance(pos, geo.Pos), \
"Second argument must be of type cryspy.geo.Pos."
assert isinstance(menge, set), \
"Third argument must be of type set."
for item in menge:
assert isinstance(item, Atom) or isinstance(item, Momentum) \
or isinstance(item, Bond) or isinstance(item, Face), \
"Third argument must be a set of "\
"objects of type Atom, Momentum, Bond or Face."
Drawable.__init__(self, name, pos)
self.atomset = Atomset(menge)
self.has_hash = False
self.hash = 0
def __eq__(self, right):
return hash(self) == hash(right)
def __str__(self):
return "Subset"
def __rpow__(self, left):
assert isinstance(left, geo.Symmetry) \
or isinstance(left, geo.Transformation) \
or isinstance(left, geo.Coset), \
"Cannot apply object of type %s to object of type " \
"cryspy.crystal.Subset."%(str(type(left)))
if isinstance(left, geo.Symmetry):
return Subset(self.name, left**self.pos,
{left ** item for item in self.atomset.menge})
elif isinstance(left, geo.Transformation):
return Subset(self.name, left**self.pos,
{left ** item for item in self.atomset.menge})
elif isinstance(left, geo.Coset):
pos = left ** self.pos
correct = (pos - left.symmetry ** self.pos).to_Symmetry()
return Subset(self.name, pos,
{correct ** (left.symmetry ** item)
for item in self.atomset.menge})
def __mod__(self, right):
assert isinstance(right, geo.Transgen), \
"Cannot apply object of type %s to object of type " \
"cryspy.crystal.Subset."%(str(type(left)))
if isinstance(right, geo.Transgen):
pos = self.pos % right
correct = (pos - self.pos).to_Symmetry()
return Subset(self.name, pos,
{correct ** item
for item in self.atomset.menge})
def __add__(self, right):
assert isinstance(right, geo.Dif) \
or isinstance(right, str), \
"Cannot add object of type %s to object of type Subset." \
%(str(type(right)))
if isinstance(right, geo.Dif):
return right.to_Symmetry() ** self
elif isinstance(right, str):
return Subset(self.name + right, self.pos, self.atomset.menge)
else:
return NotImplemented
def __hash__(self):
print("hash")
if self.has_hash:
return self.hash
else:
print("neues hash")
h = 0
for item in self.atomset.menge:
h += hash(item)
string = "%s%i%i%i%i" % (
"Subset",
hash(self.pos.x()),
hash(self.pos.y()),
hash(self.pos.z()),
h)
ha = int(hashlib.sha1(string.encode()).hexdigest(), 16)
self.hash = ha
self.has_hash = True
return ha
def structurefactor(atomset, metric, q, wavelength):
assert isinstance(atomset, Atomset), \
"atomset must be of type Atomset."
assert isinstance(metric, geo.Metric), \
"metric must be of type geo.Metric."
assert isinstance(q, geo.Rec), \
"q (scattering vector) must be of type geo.Rec."
wavelength = nb.Mixed(wavelength)
assert isinstance(wavelength, nb.Mixed), \
"wavelength must be of type numbers.Mixed or a type " \
"that can be converted to this."
sintl = 0.5 * metric.length(q)
i2pi = np.complex(0, 1) * 2.0 * np.pi
F = 0
for atom in atomset.menge:
F += tables.formfactor(atom.typ, sintl) \
* np.exp(i2pi * float(q * (atom.pos - geo.origin)))
return F
|
tobias-froehlich/cryspy
|
cryspy/crystal.py
|
Python
|
gpl-3.0
| 26,189
|
[
"CRYSTAL"
] |
b47f63b611843193a9c325322c41acd11c6f2abb7f89f36c051ce23ec82a8972
|
#!/usr/bin/env python
import argparse
import copy
import logging
import re
import sys
from BCBio import GFF
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(name='blastxml2gff3')
__author__ = "Eric Rasche"
__version__ = "0.4.0"
__maintainer__ = "Eric Rasche"
__email__ = "esr@tamu.edu"
__doc__ = """
BlastXML files, when transformed to GFF3, do not normally show gaps in the
blast hits. This tool aims to fill that "gap".
"""
def blastxml2gff3(blastxml, min_gap=3, trim=False, trim_end=False):
from Bio.Blast import NCBIXML
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation
blast_records = NCBIXML.parse(blastxml)
records = []
for record in blast_records:
# http://www.sequenceontology.org/browser/release_2.4/term/SO:0000343
match_type = { # Currently we can only handle BLASTN, BLASTP
'BLASTN': 'nucleotide_match',
'BLASTP': 'protein_match',
}.get(record.application, 'match')
rec = SeqRecord(Seq("ACTG"), id=record.query)
for hit in record.alignments:
for hsp in hit.hsps:
qualifiers = {
"source": "blast",
"score": hsp.expect,
"accession": hit.accession,
"hit_id": hit.hit_id,
"length": hit.length,
"hit_titles": hit.title.split(' >')
}
desc = hit.title.split(' >')[0]
qualifiers['description'] = desc[desc.index(' '):]
# This required a fair bit of sketching out/match to figure out
# the first time.
#
# the match_start location must account for queries and
# subjecst that start at locations other than 1
parent_match_start = hsp.query_start - hsp.sbjct_start
# The end is the start + hit.length because the match itself
# may be longer than the parent feature, so we use the supplied
# subject/hit length to calculate the real ending of the target
# protein.
parent_match_end = hsp.query_start + hit.length + hsp.query.count('-')
# However, if the user requests that we trim the feature, then
# we need to cut the ``match`` start to 0 to match the parent feature.
# We'll also need to cut the end to match the query's end. It (maybe)
# should be the feature end? But we don't have access to that data, so
# We settle for this.
if trim:
if parent_match_start < 1:
parent_match_start = 0
if trim or trim_end:
if parent_match_end > hsp.query_end:
parent_match_end = hsp.query_end + 1
# The ``match`` feature will hold one or more ``match_part``s
top_feature = SeqFeature(
FeatureLocation(parent_match_start, parent_match_end),
type=match_type, strand=0,
qualifiers=qualifiers
)
# Unlike the parent feature, ``match_part``s have sources.
part_qualifiers = {
"source": "blast",
}
top_feature.sub_features = []
for start, end, cigar in generate_parts(hsp.query, hsp.match,
hsp.sbjct,
ignore_under=min_gap):
part_qualifiers['Gap'] = cigar
part_qualifiers['ID'] = hit.hit_id
if trim:
# If trimming, then we start relative to the
# match's start
match_part_start = parent_match_start + start
else:
# Otherwise, we have to account for the subject start's location
match_part_start = parent_match_start + hsp.sbjct_start + start - 1
# We used to use hsp.align_length here, but that includes
# gaps in the parent sequence
#
# Furthermore align_length will give calculation errors in weird places
# So we just use (end-start) for simplicity
match_part_end = match_part_start + (end - start)
top_feature.sub_features.append(
SeqFeature(
FeatureLocation(match_part_start, match_part_end),
type="match_part", strand=0,
qualifiers=copy.deepcopy(part_qualifiers))
)
rec.features.append(top_feature)
rec.annotations = {}
records.append(rec)
return records
def __remove_query_gaps(query, match, subject):
"""remove positions in all three based on gaps in query
In order to simplify math and calculations...we remove all of the gaps
based on gap locations in the query sequence::
Q:ACTG-ACTGACTG
S:ACTGAAC---CTG
will become::
Q:ACTGACTGACTG
S:ACTGAC---CTG
which greatly simplifies the process of identifying the correct location
for a match_part
"""
prev = 0
fq = ''
fm = ''
fs = ''
for position in re.finditer('-', query):
fq += query[prev:position.start()]
fm += match[prev:position.start()]
fs += subject[prev:position.start()]
prev = position.start() + 1
fq += query[prev:]
fm += match[prev:]
fs += subject[prev:]
return (fq, fm, fs)
def generate_parts(query, match, subject, ignore_under=3):
region_q = []
region_m = []
region_s = []
(query, match, subject) = __remove_query_gaps(query, match, subject)
region_start = -1
region_end = -1
mismatch_count = 0
for i, (q, m, s) in enumerate(zip(query, match, subject)):
# If we have a match
if m != ' ' or m == '+':
if region_start == -1:
region_start = i
# It's a new region, we need to reset or it's pre-seeded with
# spaces
region_q = []
region_m = []
region_s = []
region_end = i
mismatch_count = 0
else:
mismatch_count += 1
region_q.append(q)
region_m.append(m)
region_s.append(s)
if mismatch_count >= ignore_under and region_start != -1 and region_end != -1:
region_q = region_q[0:-ignore_under]
region_m = region_m[0:-ignore_under]
region_s = region_s[0:-ignore_under]
yield region_start, region_end + 1, \
cigar_from_string(region_q, region_m, region_s, strict_m=True)
region_q = []
region_m = []
region_s = []
region_start = -1
region_end = -1
mismatch_count = 0
yield region_start, region_end + 1, \
cigar_from_string(region_q, region_m, region_s, strict_m=True)
def _qms_to_matches(query, match, subject, strict_m=True):
matchline = []
for (q, m, s) in zip(query, match, subject):
ret = ''
if m != ' ' or m == '+':
ret = '='
elif m == ' ':
if q == '-':
ret = 'D'
elif s == '-':
ret = 'I'
else:
ret = 'X'
else:
log.warn("Bad data: \n\t%s\n\t%s\n\t%s\n" % (query, match, subject))
if strict_m:
if ret == '=' or ret == 'X':
ret = 'M'
matchline.append(ret)
return matchline
def _matchline_to_cigar(matchline):
cigar_line = []
last_char = matchline[0]
count = 0
for char in matchline:
if char == last_char:
count += 1
else:
cigar_line.append("%s%s" % (last_char, count))
count = 1
last_char = char
cigar_line.append("%s%s" % (last_char, count))
return ' '.join(cigar_line)
def cigar_from_string(query, match, subject, strict_m=True):
matchline = _qms_to_matches(query, match, subject, strict_m=strict_m)
if len(matchline) > 0:
return _matchline_to_cigar(matchline)
else:
return ""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert Blast XML to gapped GFF3', epilog='')
parser.add_argument('blastxml', type=open, help='Blast XML Output')
parser.add_argument('--min_gap', type=int, help='Maximum gap size before generating a new match_part', default=3)
parser.add_argument('--trim', action='store_true', help='Trim blast hits to be only as long as the parent feature')
parser.add_argument('--trim_end', action='store_true', help='Cut blast results off at end of gene')
args = parser.parse_args()
result = blastxml2gff3(**vars(args))
GFF.write(result, sys.stdout)
|
yhoogstrate/tools-iuc
|
tools/blastxml_to_gapped_gff3/blastxml_to_gapped_gff3.py
|
Python
|
mit
| 9,199
|
[
"BLAST"
] |
4efa24a1e00eb63403d4463aee1eb70fb144cb95a5dd265c63c7aea7fec48f2c
|
"""Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.gaussian_process.kernels import DotProduct
from sklearn.utils.testing \
import (assert_true, assert_greater, assert_array_less,
assert_almost_equal, assert_equal, assert_raise_message,
assert_array_almost_equal, assert_array_equal)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2))]
def test_gpr_interpolation():
# Test the interpolating property for different kernels.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_pred, y)
assert_almost_equal(np.diag(y_cov), 0.)
def test_lml_improving():
# Test that hyperparameter-tuning improves log-marginal likelihood.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
# Test that lml of optimized kernel is stored correctly.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood())
def test_converged_to_local_maximum():
# Test that we are in local maximum after hyperparameter-optimization.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
def test_solution_inside_bounds():
# Test that hyperparameter-optimization remains in bounds#
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
def test_lml_gradient():
# Compare analytic and numeric gradient of log marginal likelihood.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_prior():
# Test that GP prior has mean 0 and identical variances.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
def test_sample_statistics():
# Test that statistics of samples drawn from GP are correct.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
# Test that kernel parameters are unmodified when optimizer is None.
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert_equal(np.exp(gpr.kernel_.theta), 1.0)
def test_predict_cov_vs_std():
# Test that predicted std.-dev. is consistent with cov's diagonal.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
# Test that GPR can identify meaningful anisotropic length-scales.
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(np.exp(gpr.kernel_.theta[1]),
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_y_normalization():
# Test normalization of the target values in GP
# Fitting non-normalizing GP on normalized y and fitting normalizing GP
# on unnormalized y should yield identical results
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_y_multioutput():
# Test that GPR can deal with multi-dimensional target values
y_2d = np.vstack((y, y * 2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
def test_custom_optimizer():
# Test that GPR can use externally defined optimizers.
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_gpr_correct_error_message():
X = np.arange(12).reshape(6, -1)
y = np.ones(6)
kernel = DotProduct()
gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0)
assert_raise_message(np.linalg.LinAlgError,
"The kernel, %s, is not returning a "
"positive definite matrix. Try gradually increasing "
"the 'alpha' parameter of your "
"GaussianProcessRegressor estimator."
% kernel, gpr.fit, X, y)
def test_duplicate_input():
# Test GPR can handle two different output-values for the same input.
for kernel in kernels:
gpr_equal_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
def test_no_fit_default_predict():
# Test that GPR predictions without fit does not break by default.
default_kernel = (C(1.0, constant_value_bounds="fixed") *
RBF(1.0, length_scale_bounds="fixed"))
gpr1 = GaussianProcessRegressor()
_, y_std1 = gpr1.predict(X, return_std=True)
_, y_cov1 = gpr1.predict(X, return_cov=True)
gpr2 = GaussianProcessRegressor(kernel=default_kernel)
_, y_std2 = gpr2.predict(X, return_std=True)
_, y_cov2 = gpr2.predict(X, return_cov=True)
assert_array_almost_equal(y_std1, y_std2)
assert_array_almost_equal(y_cov1, y_cov2)
def test_K_inv_reset():
y2 = f(X2).ravel()
for kernel in kernels:
# Test that self._K_inv is reset after a new fit
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_true(hasattr(gpr, '_K_inv'))
assert_true(gpr._K_inv is None)
gpr.predict(X, return_std=True)
assert_true(gpr._K_inv is not None)
gpr.fit(X2, y2)
assert_true(gpr._K_inv is None)
gpr.predict(X2, return_std=True)
gpr2 = GaussianProcessRegressor(kernel=kernel).fit(X2, y2)
gpr2.predict(X2, return_std=True)
# the value of K_inv should be independent of the first fit
assert_array_equal(gpr._K_inv, gpr2._K_inv)
|
zorroblue/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
Python
|
bsd-3-clause
| 13,791
|
[
"Gaussian"
] |
4d8bbeb18d27f09dbb559e3330f2da6d1ef9d7e84e5aba83d86c77a63ef637e9
|
"""
Training costs for unsupervised learning of energy-based models
"""
import functools
import logging
import numpy as np
import sys
from theano import scan
import theano.tensor as T
from theano.compat.six.moves import zip as izip
from pylearn2.compat import OrderedDict
from pylearn2.costs.cost import Cost, DefaultDataSpecsMixin
from pylearn2.utils import py_integer_types
from pylearn2.utils.rng import make_theano_rng
from pylearn2.models.rbm import BlockGibbsSampler
logger = logging.getLogger(__name__)
logger.debug("Cost changing the recursion limit.")
# We need this to be high enough that the big theano graphs we make
# when unrolling inference don't cause python to complain.
# python intentionally declares stack overflow well before the stack
# segment is actually exceeded. But we can't make this value too big
# either, or we'll get seg faults when the python interpreter really
# does go over the stack segment.
# IG encountered seg faults on eos3 (a machine at LISA labo) when using
# 50000 so for now it is set to 40000.
# I think the actual safe recursion limit can't be predicted in advance
# because you don't know how big of a stack frame each function will
# make, so there is not really a "correct" way to do this. Really the
# python interpreter should provide an option to raise the error
# precisely when you're going to exceed the stack segment.
sys.setrecursionlimit(40000)
class NCE(DefaultDataSpecsMixin, Cost):
"""
Noise-Contrastive Estimation
See "Noise-Contrastive Estimation: \
A new estimation principle for unnormalized models"
by Gutmann and Hyvarinen
Parameters
----------
noise : WRITEME
A Distribution from which noisy examples are generated
noise_per_clean : WRITEME
Number of noisy examples to generate for each clean example given
"""
def h(self, X, model):
"""
.. todo::
WRITEME
"""
return - T.nnet.sigmoid(self.G(X, model))
def G(self, X, model):
"""
.. todo::
WRITEME
"""
return model.log_prob(X) - self.noise.log_prob(X)
def expr(self, model, data, noisy_data=None):
"""
.. todo::
WRITEME
"""
# noisy_data is not considered part of the data.
# If you don't pass it in, it will be generated internally
# Passing it in lets you keep it constant while doing
# a learn search across several theano function calls
# and stuff like that
space, source = self.get_data_specs(model)
space.validate(data)
X = data
if X.name is None:
X_name = 'X'
else:
X_name = X.name
m_data = X.shape[0]
m_noise = m_data * self.noise_per_clean
if noisy_data is not None:
space.validate(noisy_data)
Y = noisy_data
else:
Y = self.noise.random_design_matrix(m_noise)
# Y = Print('Y',attrs=['min','max'])(Y)
# hx = self.h(X, model)
# hy = self.h(Y, model)
log_hx = -T.nnet.softplus(-self.G(X, model))
log_one_minus_hy = -T.nnet.softplus(self.G(Y, model))
# based on equation 3 of the paper
# ours is the negative of theirs because
# they maximize it and we minimize it
rval = -T.mean(log_hx)-T.mean(log_one_minus_hy)
rval.name = 'NCE('+X_name+')'
return rval
def __init__(self, noise, noise_per_clean):
self.noise = noise
assert isinstance(noise_per_clean, py_integer_types)
self.noise_per_clean = noise_per_clean
class SM(DefaultDataSpecsMixin, Cost):
"""
(Regularized) Score Matching
See:
- "Regularized estimation of image statistics by Score Matching",
D. Kingma, Y. LeCun, NIPS 2010
- eqn. 4 of "On Autoencoders and Score Matching for Energy Based Models"
Swersky et al 2011
Uses the mean over visible units rather than sum over visible units
so that hyperparameters won't depend as much on the # of visible units
Parameters
----------
lambd : WRITEME
"""
def __init__(self, lambd=0):
assert lambd >= 0
self.lambd = lambd
def expr(self, model, data):
"""
.. todo::
WRITEME
"""
self.get_data_specs(model)[0].validate(data)
X = data
X_name = 'X' if X.name is None else X.name
def f(i, _X, _dx):
return T.grad(_dx[:, i].sum(), _X)[:, i]
dx = model.score(X)
ddx, _ = scan(f, sequences=[T.arange(X.shape[1])],
non_sequences=[X, dx])
ddx = ddx.T
assert len(ddx.type.broadcastable) == 2
rval = T.mean(0.5 * dx**2 + ddx + self.lambd * ddx**2)
rval.name = 'sm('+X_name+')'
return rval
class SMD(DefaultDataSpecsMixin, Cost):
"""
Denoising Score Matching
See eqn. 4.3 of
"A Connection Between Score Matching and Denoising Autoencoders"
by Pascal Vincent for details
Note that instead of using half the squared norm we use the mean
squared error, so that hyperparameters don't depend as much on
the # of visible units
Parameters
----------
corruptor : WRITEME
WRITEME
"""
def __init__(self, corruptor):
super(SMD, self).__init__()
self.corruptor = corruptor
@functools.wraps(Cost.expr)
def expr(self, model, data):
self.get_data_specs(model)[0].validate(data)
X = data
X_name = 'X' if X.name is None else X.name
corrupted_X = self.corruptor(X)
if corrupted_X.name is None:
corrupted_X.name = 'corrupt('+X_name+')'
model_score = model.score(corrupted_X)
assert len(model_score.type.broadcastable) == len(X.type.broadcastable)
parzen_score = T.grad(
- T.sum(self.corruptor.corruption_free_energy(corrupted_X, X)),
corrupted_X)
assert \
len(parzen_score.type.broadcastable) == len(X.type.broadcastable)
score_diff = model_score - parzen_score
score_diff.name = 'smd_score_diff('+X_name+')'
assert len(score_diff.type.broadcastable) == len(X.type.broadcastable)
# TODO: this could probably be faster as a tensordot,
# but we don't have tensordot for gpu yet
sq_score_diff = T.sqr(score_diff)
# sq_score_diff = Print('sq_score_diff',attrs=['mean'])(sq_score_diff)
smd = T.mean(sq_score_diff)
smd.name = 'SMD('+X_name+')'
return smd
def get_data_specs(self, model):
return (model.get_input_space(), model.get_input_source())
class SML(Cost):
"""
Stochastic Maximum Likelihood
See "On the convergence of Markovian stochastic algorithms with rapidly
decreasing ergodicity rates" by Laurent Younes (1998)
Also known as Persistent Constrastive Divergence (PCD)
See "Training restricted boltzmann machines using approximations to
the likelihood gradient" by Tijmen Tieleman (2008)
The number of particles fits the batch size.
Parameters
----------
batch_size: int
Batch size of the training algorithm
nsteps: int
Number of steps made by the block Gibbs sampler between each epoch
"""
def __init__(self, batch_size, nsteps):
super(SML, self).__init__()
self.nchains = batch_size
self.nsteps = nsteps
def get_gradients(self, model, data, **kwargs):
cost = self._cost(model, data, **kwargs)
params = list(model.get_params())
grads = T.grad(cost, params, disconnected_inputs='ignore',
consider_constant=[self.sampler.particles])
gradients = OrderedDict(izip(params, grads))
updates = OrderedDict()
sampler_updates = self.sampler.updates()
updates.update(sampler_updates)
return gradients, updates
def _cost(self, model, data):
if not hasattr(self, 'sampler'):
self.sampler = BlockGibbsSampler(
rbm=model,
particles=0.5+np.zeros((self.nchains, model.get_input_dim())),
rng=model.rng,
steps=self.nsteps)
# compute negative phase updates
sampler_updates = self.sampler.updates()
# Compute SML cost
pos_v = data
neg_v = self.sampler.particles
ml_cost = (model.free_energy(pos_v).mean() -
model.free_energy(neg_v).mean())
return ml_cost
def expr(self, model, data):
return None
def get_data_specs(self, model):
return (model.get_input_space(), model.get_input_source())
class CDk(Cost):
"""
Contrastive Divergence
See "Training products of experts by minimizing contrastive divergence"
by Geoffrey E. Hinton (2002)
Parameters
----------
nsteps : int
Number of Markov chain steps for the negative sample
seed : int
Seed for the random number generator
"""
def __init__(self, nsteps, seed=42):
super(CDk, self).__init__()
self.nsteps = nsteps
self.rng = make_theano_rng(seed, which_method='binomial')
def _cost(self, model, data):
pos_v = data
neg_v = data
for k in range(self.nsteps):
[neg_v, _locals] = model.gibbs_step_for_v(neg_v, self.rng)
# Compute CD cost
ml_cost = (model.free_energy(pos_v).mean() -
model.free_energy(neg_v).mean())
return ml_cost, neg_v
def get_gradients(self, model, data, **kwargs):
cost, neg_v = self._cost(model, data, **kwargs)
params = list(model.get_params())
grads = T.grad(cost, params, disconnected_inputs='ignore',
consider_constant=[neg_v])
gradients = OrderedDict(izip(params, grads))
updates = OrderedDict()
return gradients, updates
def expr(self, model, data):
return None
def get_data_specs(self, model):
return (model.get_input_space(), model.get_input_source())
|
junbochen/pylearn2
|
pylearn2/costs/ebm_estimation.py
|
Python
|
bsd-3-clause
| 10,143
|
[
"CDK"
] |
6d694d622a914dfbac1be90ec5c992dc29d488af25bba070318c34079f9afc85
|
#!/usr/bin/env python
#JSON {"lot": "RKS/6-31G(d)",
#JSON "scf": "CDIISSCFSolver",
#JSON "er": "cholesky",
#JSON "difficulty": 7,
#JSON "description": "Basic RKS DFT example with hybrid MGGA exhange-correlation functional (TPSS)"}
import numpy as np
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
# Load the coordinates from file.
# Use the XYZ file from HORTON's test data directory.
fn_xyz = context.get_fn('test/water.xyz')
mol = IOData.from_file(fn_xyz)
# Create a Gaussian basis set
obasis = get_gobasis(mol.coordinates, mol.numbers, '6-31g(d)')
# Compute Gaussian integrals
olp = obasis.compute_overlap()
kin = obasis.compute_kinetic()
na = obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers)
er_vecs = obasis.compute_electron_repulsion_cholesky()
# Define a numerical integration grid needed the XC functionals
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers)
# Create alpha orbitals
orb_alpha = Orbitals(obasis.nbasis)
# Initial guess
guess_core_hamiltonian(olp, kin + na, orb_alpha)
# Construct the restricted HF effective Hamiltonian
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
libxc_term = RLibXCHybridMGGA('xc_m05')
terms = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er_vecs, 'hartree'),
RGridGroup(obasis, grid, [libxc_term]),
RExchangeTerm(er_vecs, 'x_hf', libxc_term.get_exx_fraction()),
RTwoIndexTerm(na, 'ne'),
]
ham = REffHam(terms, external)
# Decide how to occupy the orbitals (5 alpha electrons)
occ_model = AufbauOccModel(5)
# Converge WFN with CDIIS SCF
# - Construct the initial density matrix (needed for CDIIS).
occ_model.assign(orb_alpha)
dm_alpha = orb_alpha.to_dm()
# - SCF solver
scf_solver = CDIISSCFSolver(1e-6)
scf_solver(ham, olp, occ_model, dm_alpha)
# Derive orbitals (coeffs, energies and occupations) from the Fock and density
# matrices. The energy is also computed to store it in the output file below.
fock_alpha = np.zeros(olp.shape)
ham.reset(dm_alpha)
ham.compute_energy()
ham.compute_fock(fock_alpha)
orb_alpha.from_fock_and_dm(fock_alpha, dm_alpha, olp)
# Assign results to the molecule object and write it to a file, e.g. for
# later analysis. Note that the CDIIS algorithm can only really construct an
# optimized density matrix and no orbitals.
mol.title = 'RKS computation on water'
mol.energy = ham.cache['energy']
mol.obasis = obasis
mol.orb_alpha = orb_alpha
mol.dm_alpha = dm_alpha
# useful for post-processing (results stored in double precision):
mol.to_file('water.h5')
# CODE BELOW IS FOR horton-regression-test.py ONLY. IT IS NOT PART OF THE EXAMPLE.
rt_results = {
'energy': ham.cache['energy'],
'orb_alpha': orb_alpha.energies,
'nn': ham.cache["energy_nn"],
'kin': ham.cache["energy_kin"],
'ne': ham.cache["energy_ne"],
'grid': ham.cache["energy_grid_group"],
'hartree': ham.cache["energy_hartree"],
'x_hf': ham.cache["energy_x_hf"],
}
# BEGIN AUTOGENERATED CODE. DO NOT CHANGE MANUALLY.
rt_previous = {
'energy': -76.372223106410885,
'orb_alpha': np.array([
-19.174675917533499, -1.0216889289766689, -0.54324149010045464,
-0.37631403914157158, -0.30196183487620326, 0.079896573985756419,
0.16296304612701332, 0.81419059490960388, 0.86377461055569127, 0.9243929453024935,
0.95050094195149326, 1.1033737076332981, 1.4108569929549999, 1.7561523962868733,
1.761532111350379, 1.8055689722633752, 2.3348442517458823, 2.6275437456471868
]),
'grid': -6.821114560989138,
'hartree': 46.93245844915478,
'kin': 76.05549816546615,
'ne': -199.18635862588496,
'nn': 9.1571750364299866,
'x_hf': -2.50988157058769,
}
|
QuantumElephant/horton
|
data/examples/hf_dft/rks_water_hybmgga.py
|
Python
|
gpl-3.0
| 3,707
|
[
"Gaussian"
] |
3d28612510d909969ff24e689f29a0bf972aac8a9bbc3f83a0e8bf2599ba5eec
|
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import vtk
from .ClipperFilterBase import ClipperFilterBase
class BoxClipper(ClipperFilterBase):
"""
Clip object using a box.
see vtkBox
"""
@staticmethod
def getOptions():
opt = ClipperFilterBase.getOptions()
opt.add('lower', [0.5, 0.5, 0.5], "The outward normal of the clipping plane.")
opt.add('upper', [1, 1, 1], "The origin of the clipping plane.")
return opt
def __init__(self, **kwargs):
super(BoxClipper, self).__init__(vtkclipfunction=vtk.vtkBox, **kwargs)
def update(self, **kwargs):
"""
Update the bounds of the clipping box.
"""
super(BoxClipper, self).update(**kwargs)
lower = self.getPosition(self.getOption('lower'))
upper = self.getPosition(self.getOption('upper'))
self._vtkclipfunction.SetXMin(lower)
self._vtkclipfunction.SetXMax(upper)
|
harterj/moose
|
python/chigger/filters/BoxClipper.py
|
Python
|
lgpl-2.1
| 1,241
|
[
"MOOSE",
"VTK"
] |
a7cf17442fba320dc544a1930e2cf8903cf93d098353d0baaad2ffa27badd63e
|
"""
An efficient implementation of the triple-plane view showing 3 cut planes
on volumetric data, and side views showing each cut, with a cursor to
move the other cuts.
This is an example of complex callback interaction. It builds on the
:ref:`example_volume_slicer` but has more complex logic. You should try
to understand the :ref:`example_volume_slicer` first.
In this example, the VolumeSlicer object displays a position attribute
giving the position of the cut in data coordinates. Traits callbacks are
used to move the cut planes when this position attribute is modifed.
In the 3D window, the 3D cuts are displayed using ImagePlaneWidgets
cutting the 3D volumetric data. The data extracted by the
ImagePlaneWidgets for plotting is captured using the TVTK
ImagePlaneWidget's `_get_reslice_output` method. The resulting dataset is
plotted in each side view using another ImagePlaneWidget. As a result the
data is not copied (at the VTK level, there is only one pipeline), and
modifications of the data plotted on the planes in the 3D view (for
instance when these planes are moved) are propagated to the 2D side views
by the VTK pipeline.
A cursor is displayed in each side view using a glyph. The cursor
indicates the position of the cut.
In the side view, when the mouse button is pressed on the planes, it
creates a VTK `InteractionEvent`. When this happens, VTK calls an
callback (observer, it VTK terms), that we use to move the position of
the cut. The Traits callbacks do the rest for the updating.
"""
import numpy as np
from traits.api import HasTraits, Instance, Array, \
Bool, Dict, on_trait_change
from traitsui.api import View, Item, HGroup, Group
from tvtk.api import tvtk
from tvtk.pyface.scene import Scene
from mayavi import mlab
from mayavi.core.api import PipelineBase, Source
from mayavi.core.ui.api import SceneEditor
from mayavi.tools.mlab_scene_model import MlabSceneModel
################################################################################
# The object implementing the dialog
class VolumeSlicer(HasTraits):
# The data to plot
data = Array
# The position of the view
position = Array(shape=(3,))
# The 4 views displayed
scene3d = Instance(MlabSceneModel, ())
scene_x = Instance(MlabSceneModel, ())
scene_y = Instance(MlabSceneModel, ())
scene_z = Instance(MlabSceneModel, ())
# The data source
data_src = Instance(Source)
# The image plane widgets of the 3D scene
ipw_3d_x = Instance(PipelineBase)
ipw_3d_y = Instance(PipelineBase)
ipw_3d_z = Instance(PipelineBase)
# The cursors on each view:
cursors = Dict()
disable_render = Bool
_axis_names = dict(x=0, y=1, z=2)
#---------------------------------------------------------------------------
# Object interface
#---------------------------------------------------------------------------
def __init__(self, **traits):
super(VolumeSlicer, self).__init__(**traits)
# Force the creation of the image_plane_widgets:
self.ipw_3d_x
self.ipw_3d_y
self.ipw_3d_z
#---------------------------------------------------------------------------
# Default values
#---------------------------------------------------------------------------
def _position_default(self):
return 0.5*np.array(self.data.shape)
def _data_src_default(self):
return mlab.pipeline.scalar_field(self.data,
figure=self.scene3d.mayavi_scene,
name='Data',)
def make_ipw_3d(self, axis_name):
ipw = mlab.pipeline.image_plane_widget(self.data_src,
figure=self.scene3d.mayavi_scene,
plane_orientation='%s_axes' % axis_name,
name='Cut %s' % axis_name)
return ipw
def _ipw_3d_x_default(self):
return self.make_ipw_3d('x')
def _ipw_3d_y_default(self):
return self.make_ipw_3d('y')
def _ipw_3d_z_default(self):
return self.make_ipw_3d('z')
#---------------------------------------------------------------------------
# Scene activation callbacks
#---------------------------------------------------------------------------
@on_trait_change('scene3d.activated')
def display_scene3d(self):
outline = mlab.pipeline.outline(self.data_src,
figure=self.scene3d.mayavi_scene,
)
self.scene3d.mlab.view(40, 50)
# Interaction properties can only be changed after the scene
# has been created, and thus the interactor exists
for ipw in (self.ipw_3d_x, self.ipw_3d_y, self.ipw_3d_z):
ipw.ipw.interaction = 0
self.scene3d.scene.background = (0, 0, 0)
# Keep the view always pointing up
self.scene3d.scene.interactor.interactor_style = \
tvtk.InteractorStyleTerrain()
self.update_position()
def make_side_view(self, axis_name):
scene = getattr(self, 'scene_%s' % axis_name)
scene.scene.parallel_projection = True
ipw_3d = getattr(self, 'ipw_3d_%s' % axis_name)
# We create the image_plane_widgets in the side view using a
# VTK dataset pointing to the data on the corresponding
# image_plane_widget in the 3D view (it is returned by
# ipw_3d._get_reslice_output())
ipw = mlab.pipeline.image_plane_widget(
ipw_3d.ipw._get_reslice_output(),
plane_orientation='z_axes',
vmin=self.data.min(),
vmax=self.data.max(),
figure=scene.mayavi_scene,
name='Cut view %s' % axis_name,
)
setattr(self, 'ipw_%s' % axis_name, ipw)
# Make left-clicking create a crosshair
ipw.ipw.left_button_action = 0
x, y, z = self.position
cursor = mlab.points3d(x, y, z,
mode='axes',
color=(0, 0, 0),
scale_factor=2*max(self.data.shape),
figure=scene.mayavi_scene,
name='Cursor view %s' % axis_name,
)
self.cursors[axis_name] = cursor
# Add a callback on the image plane widget interaction to
# move the others
this_axis_number = self._axis_names[axis_name]
def move_view(obj, evt):
# Disable rendering on all scene
position = list(obj.GetCurrentCursorPosition())[:2]
position.insert(this_axis_number, self.position[this_axis_number])
# We need to special case y, as the view has been rotated.
if axis_name is 'y':
position = position[::-1]
self.position = position
ipw.ipw.add_observer('InteractionEvent', move_view)
ipw.ipw.add_observer('StartInteractionEvent', move_view)
# Center the image plane widget
ipw.ipw.slice_position = 0.5*self.data.shape[
self._axis_names[axis_name]]
# 2D interaction: only pan and zoom
scene.scene.interactor.interactor_style = \
tvtk.InteractorStyleImage()
scene.scene.background = (0, 0, 0)
# Some text:
mlab.text(0.01, 0.8, axis_name, width=0.08)
# Choose a view that makes sens
views = dict(x=(0, 0), y=(90, 180), z=(0, 0))
#mlab.view(*views[axis_name],figure=scene.mayavi_scene)
#focalpoint=0.5*np.array(self.data.shape),
scene.scene.camera.parallel_scale = 0.52*np.mean(self.data.shape)
@on_trait_change('scene_x.activated')
def display_scene_x(self):
return self.make_side_view('x')
@on_trait_change('scene_y.activated')
def display_scene_y(self):
return self.make_side_view('y')
@on_trait_change('scene_z.activated')
def display_scene_z(self):
return self.make_side_view('z')
#---------------------------------------------------------------------------
# Traits callback
#---------------------------------------------------------------------------
@on_trait_change('position')
def update_position(self):
""" Update the position of the cursors on each side view, as well
as the image_plane_widgets in the 3D view.
"""
# First disable rendering in all scenes to avoid unecessary
# renderings
self.disable_render = True
# For each axis, move image_plane_widget and the cursor in the
# side view
for axis_name, axis_number in self._axis_names.iteritems():
ipw3d = getattr(self, 'ipw_3d_%s' % axis_name)
ipw3d.ipw.slice_position = self.position[axis_number]
# Go from the 3D position, to the 2D coordinates in the
# side view
position2d = list(self.position)
position2d.pop(axis_number)
if axis_name is 'y':
position2d = position2d[::-1]
# Move the cursor
self.cursors[axis_name].mlab_source.set(
x=[position2d[0]],
y=[position2d[1]],
z=[0])
# Finally re-enable rendering
self.disable_render = False
@on_trait_change('disable_render')
def _render_enable(self):
for scene in (self.scene3d, self.scene_x, self.scene_y,
self.scene_z):
scene.scene.disable_render = self.disable_render
#---------------------------------------------------------------------------
# The layout of the dialog created
#---------------------------------------------------------------------------
view = View(HGroup(
Group(
Item('scene_y',
editor=SceneEditor(scene_class=Scene),
height=250, width=300),
Item('scene_z',
editor=SceneEditor(scene_class=Scene),
height=250, width=300),
show_labels=False,
),
Group(
Item('scene_x',
editor=SceneEditor(scene_class=Scene),
height=250, width=300),
Item('scene3d',
editor=SceneEditor(scene_class=Scene),
height=250, width=300),
show_labels=False,
),
),
resizable=True,
title='Volume Slicer',
)
################################################################################
if __name__ == '__main__':
# Create some data
x, y, z = np.ogrid[-5:5:64j, -5:5:64j, -5:5:64j]
data = np.sin(3*x)/x + 0.05*z**2 + np.cos(3*y)
m = VolumeSlicer(data=data)
m.configure_traits()
|
LTS5/connectomeviewer
|
cviewer/visualization/volume/volume_slicer_advanced.py
|
Python
|
bsd-3-clause
| 11,268
|
[
"Mayavi",
"VTK"
] |
369d9bc29d78b53e01ca6381598f9e06a05ca1fe82235cac98f62cea939c45a8
|
# -*- coding: utf-8 -*-
# imports here
from aiida.parsers.exceptions import OutputParsingError
#
from aiida.orm.data.folder import FolderData
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.structure import StructureData
from aiida.orm.data.array import ArrayData
#
from aiida.parsers.plugins.vasp.instruction import BaseInstruction
#
from pymatgen.io.vasp import Vasprun
import numpy as np
class Array_data_parserInstruction(BaseInstruction):
_input_file_list_ = ['vasprun.xml', 'POTCAR']
def _parser_function(self):
"""
Parses the vasprun.xml.
"""
# Get born charges and epsilon
nodes_list = []
array_data = ArrayData()
try:
import xml.etree.cElementTree as ET
tree = ET.parse(self._out_folder.get_abs_path('vasprun.xml'))
root = tree.getroot()
for elements in root.iter('varray'):
if elements.attrib['name'] == 'epsilon':
epsilon = []
for row in elements:
epsilon.append(np.array(row.text.split(), dtype=float))
epsilon = np.array(epsilon)
array_data.set_array('epsilon', epsilon)
break
for elements in root.iter('array'):
try:
if elements.attrib['name'] == 'born_charges':
born_charges = []
for atom in elements[1:]:
atom_array = []
for c in atom:
atom_array.append(np.array(c.text.split(), dtype=float))
born_charges.append(atom_array)
born_charges = np.array(born_charges)
array_data.set_array('born_charges', born_charges)
break
except KeyError:
pass
except:
pass
# Use pymatgen vasp parser to get atomic forces and stress tensor
vspr = Vasprun(self._out_folder.get_abs_path('vasprun.xml'), exception_on_bad_xml=False)
# Get forces using pymatgen
try:
forces = np.array([vspr.ionic_steps[-1]['forces']])
array_data.set_array('forces', forces)
except Exception, e:
msg = (
"Processing forces, "
"with error Message:\n>> {}".format(e)
)
raise OutputParsingError(msg)
try:
stress = np.array(vspr.ionic_steps[-1]['stress'])
array_data.set_array('stress', stress)
except Exception, e:
msg = (
"Processing stress, "
"with error Message:\n>> {}".format(e)
)
raise OutputParsingError(msg)
try:
nodes_list.append((
'output_array', array_data
))
except Exception, e:
msg = (
"Failed to create AiiDA data structures "
"(ParameterData/ArrrayData) from parsed data, "
"with error message:\n>> {}".format(e)
)
raise OutputParsingError(msg)
parser_warnings = None
return nodes_list, parser_warnings
|
abelcarreras/aiida_extensions
|
plugins/parsers/vasp/instruction/data/array_data_parser.py
|
Python
|
mit
| 3,324
|
[
"VASP",
"pymatgen"
] |
272195dc25cadd074db7fde495772afbe62eab401975c4027dc877dc061dc44c
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Mozilla-specific Buildbot steps.
#
# The Initial Developer of the Original Code is
# Mozilla Foundation.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Brian Warner <warner@lothar.com>
# Chris AtLee <catlee@mozilla.com>
# Dustin Mitchell <dustin@zmanda.com>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import sys, collections, base64
from twisted.python import log, threadable
from twisted.internet import defer
from twisted.enterprise import adbapi
from buildbot import util
from buildbot.util import collections as bbcollections
from buildbot.changes.changes import Change
from buildbot.sourcestamp import SourceStamp
from buildbot.buildrequest import BuildRequest
from buildbot.process.properties import Properties
from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE
from buildbot.util.eventual import eventually
from buildbot.util import json
# Don't auto-resubmit queries that encounter a broken connection: let them
# fail. Use the "notification doorbell" thing to provide the retry. Set
# cp_reconnect=True, so that a connection failure will prepare the
# ConnectionPool to reconnect next time.
class MyTransaction(adbapi.Transaction):
def execute(self, *args, **kwargs):
#print "Q", args, kwargs
return self._cursor.execute(*args, **kwargs)
def fetchall(self):
rc = self._cursor.fetchall()
#print " F", rc
return rc
def _one_or_else(res, default=None, process_f=lambda x: x):
if not res:
return default
return process_f(res[0][0])
def str_or_none(s):
if s is None:
return None
return str(s)
class Token: # used for _start_operation/_end_operation
pass
class DBConnector(util.ComparableMixin):
# this will refuse to create the database: use 'create-master' for that
compare_attrs = ["args", "kwargs"]
synchronized = ["notify", "_end_operation"]
MAX_QUERY_TIMES = 1000
def __init__(self, spec):
# typical args = (dbmodule, dbname, username, password)
self._query_times = collections.deque()
self._spec = spec
# this is for synchronous calls: runQueryNow, runInteractionNow
self._dbapi = spec.get_dbapi()
self._nonpool = None
self._nonpool_lastused = None
self._nonpool_max_idle = spec.get_maxidle()
# pass queries in with "?" placeholders. If the backend uses a
# different style, we'll replace them.
self.paramstyle = self._dbapi.paramstyle
self._pool = spec.get_async_connection_pool()
self._pool.transactionFactory = MyTransaction
# the pool must be started before it can be used. The real
# buildmaster process will do this at reactor start. CLI tools (like
# "buildbot upgrade-master") must do it manually. Unit tests are run
# in an environment in which it is already started.
self._change_cache = util.LRUCache()
self._sourcestamp_cache = util.LRUCache()
self._active_operations = set() # protected by synchronized=
self._pending_notifications = []
self._subscribers = bbcollections.defaultdict(set)
self._pending_operation_count = 0
self._started = False
def _getCurrentTime(self):
# this is a seam for use in testing
return util.now()
def start(self):
# this only *needs* to be called in reactorless environments (which
# should be eliminated anyway). but it doesn't hurt anyway
self._pool.start()
self._started = True
def stop(self):
"""Call this when you're done with me"""
# Close our synchronous connection if we've got one
if self._nonpool:
self._nonpool.close()
self._nonpool = None
self._nonpool_lastused = None
if not self._started:
return
self._pool.close()
self._started = False
del self._pool
def quoteq(self, query):
"""
Given a query that contains qmark-style placeholders, like::
INSERT INTO foo (col1, col2) VALUES (?,?)
replace the '?' with '%s' if the backend uses format-style
placeholders, like::
INSERT INTO foo (col1, col2) VALUES (%s,%s)
"""
if self.paramstyle == "format":
return query.replace("?","%s")
assert self.paramstyle == "qmark"
return query
def parmlist(self, count):
"""
When passing long lists of values to e.g., an INSERT query, it is
tedious to pass long strings of ? placeholders. This function will
create a parenthesis-enclosed list of COUNT placeholders. Note that
the placeholders have already had quoteq() applied.
"""
p = self.quoteq("?")
return "(" + ",".join([p]*count) + ")"
def get_version(self):
"""Returns None for an empty database, or a number (probably 1) for
the database's version"""
try:
res = self.runQueryNow("SELECT version FROM version")
except (self._dbapi.OperationalError, self._dbapi.ProgrammingError):
# this means the version table is missing: the db is empty
return None
assert len(res) == 1
return res[0][0]
def runQueryNow(self, *args, **kwargs):
# synchronous+blocking version of runQuery()
assert self._started
return self.runInteractionNow(self._runQuery, *args, **kwargs)
def _runQuery(self, c, *args, **kwargs):
c.execute(*args, **kwargs)
return c.fetchall()
def _start_operation(self):
t = Token()
self._active_operations.add(t)
return t
def _end_operation(self, t):
# this is always invoked from the main thread, but is wrapped by
# synchronized= and threadable.synchronous(), since it touches
# self._pending_notifications, which is also touched by
# runInteraction threads
self._active_operations.discard(t)
if self._active_operations:
return
for (category, args) in self._pending_notifications:
# in the distributed system, this will be a
# transport.write(" ".join([category] + [str(a) for a in args]))
eventually(self.send_notification, category, args)
self._pending_notifications = []
def runInteractionNow(self, interaction, *args, **kwargs):
# synchronous+blocking version of runInteraction()
assert self._started
start = self._getCurrentTime()
t = self._start_operation()
try:
return self._runInteractionNow(interaction, *args, **kwargs)
finally:
self._end_operation(t)
self._add_query_time(start)
def get_sync_connection(self):
# This is a wrapper around spec.get_sync_connection that maintains a
# single connection to the database for synchronous usage. It will get
# a new connection if the existing one has been idle for more than
# max_idle seconds.
if self._nonpool_max_idle is not None:
now = util.now()
if self._nonpool_lastused and self._nonpool_lastused + self._nonpool_max_idle < now:
self._nonpool = None
if not self._nonpool:
self._nonpool = self._spec.get_sync_connection()
self._nonpool_lastused = util.now()
return self._nonpool
def _runInteractionNow(self, interaction, *args, **kwargs):
conn = self.get_sync_connection()
c = conn.cursor()
try:
result = interaction(c, *args, **kwargs)
c.close()
conn.commit()
return result
except:
excType, excValue, excTraceback = sys.exc_info()
try:
conn.rollback()
c2 = conn.cursor()
c2.execute(self._pool.good_sql)
c2.close()
conn.commit()
except:
log.msg("rollback failed, will reconnect next query")
log.err()
# and the connection is probably dead: clear the reference,
# so we'll establish a new connection next time
self._nonpool = None
raise excType, excValue, excTraceback
def notify(self, category, *args):
# this is wrapped by synchronized= and threadable.synchronous(),
# since it will be invoked from runInteraction threads
self._pending_notifications.append( (category,args) )
def send_notification(self, category, args):
# in the distributed system, this will be invoked by lineReceived()
#print "SEND", category, args
for observer in self._subscribers[category]:
eventually(observer, category, *args)
def subscribe_to(self, category, observer):
self._subscribers[category].add(observer)
def runQuery(self, *args, **kwargs):
assert self._started
self._pending_operation_count += 1
start = self._getCurrentTime()
#t = self._start_operation() # why is this commented out? -warner
d = self._pool.runQuery(*args, **kwargs)
#d.addBoth(self._runQuery_done, start, t)
return d
def _runQuery_done(self, res, start, t):
self._end_operation(t)
self._add_query_time(start)
self._pending_operation_count -= 1
return res
def _add_query_time(self, start):
elapsed = self._getCurrentTime() - start
self._query_times.append(elapsed)
if len(self._query_times) > self.MAX_QUERY_TIMES:
self._query_times.popleft()
def runInteraction(self, *args, **kwargs):
assert self._started
self._pending_operation_count += 1
start = self._getCurrentTime()
t = self._start_operation()
d = self._pool.runInteraction(*args, **kwargs)
d.addBoth(self._runInteraction_done, start, t)
return d
def _runInteraction_done(self, res, start, t):
self._end_operation(t)
self._add_query_time(start)
self._pending_operation_count -= 1
return res
# ChangeManager methods
def addChangeToDatabase(self, change):
self.runInteractionNow(self._txn_addChangeToDatabase, change)
self._change_cache.add(change.number, change)
def _txn_addChangeToDatabase(self, t, change):
q = self.quoteq("INSERT INTO changes"
" (author,"
" comments, is_dir,"
" branch, revision, revlink,"
" when_timestamp, category,"
" repository, project)"
" VALUES (?, ?,?, ?,?,?, ?,?, ?,?)")
# TODO: map None to.. empty string?
values = (change.who,
change.comments, change.isdir,
change.branch, change.revision, change.revlink,
change.when, change.category, change.repository,
change.project)
t.execute(q, values)
change.number = t.lastrowid
for link in change.links:
t.execute(self.quoteq("INSERT INTO change_links (changeid, link) "
"VALUES (?,?)"),
(change.number, link))
for filename in change.files:
t.execute(self.quoteq("INSERT INTO change_files (changeid,filename)"
" VALUES (?,?)"),
(change.number, filename))
for propname,propvalue in change.properties.properties.items():
encoded_value = json.dumps(propvalue)
t.execute(self.quoteq("INSERT INTO change_properties"
" (changeid, property_name, property_value)"
" VALUES (?,?,?)"),
(change.number, propname, encoded_value))
self.notify("add-change", change.number)
def changeEventGenerator(self, branches=[], categories=[], committers=[], minTime=0):
q = "SELECT changeid FROM changes"
args = []
if branches or categories or committers:
q += " WHERE "
pieces = []
if branches:
pieces.append("branch IN %s" % self.parmlist(len(branches)))
args.extend(list(branches))
if categories:
pieces.append("category IN %s" % self.parmlist(len(categories)))
args.extend(list(categories))
if committers:
pieces.append("author IN %s" % self.parmlist(len(committers)))
args.extend(list(committers))
if minTime:
pieces.append("when_timestamp > %d" % minTime)
q += " AND ".join(pieces)
q += " ORDER BY changeid DESC"
rows = self.runQueryNow(q, tuple(args))
for (changeid,) in rows:
yield self.getChangeNumberedNow(changeid)
def getLatestChangeNumberNow(self, t=None):
if t:
return self._txn_getLatestChangeNumber(t)
else:
return self.runInteractionNow(self._txn_getLatestChangeNumber)
def _txn_getLatestChangeNumber(self, t):
q = self.quoteq("SELECT max(changeid) from changes")
t.execute(q)
row = t.fetchone()
if not row:
return 0
return row[0]
def getChangeNumberedNow(self, changeid, t=None):
# this is a synchronous/blocking version of getChangeByNumber
assert changeid >= 0
c = self._change_cache.get(changeid)
if c:
return c
if t:
c = self._txn_getChangeNumberedNow(t, changeid)
else:
c = self.runInteractionNow(self._txn_getChangeNumberedNow, changeid)
self._change_cache.add(changeid, c)
return c
def _txn_getChangeNumberedNow(self, t, changeid):
q = self.quoteq("SELECT author, comments,"
" is_dir, branch, revision, revlink,"
" when_timestamp, category,"
" repository, project"
" FROM changes WHERE changeid = ?")
t.execute(q, (changeid,))
rows = t.fetchall()
if not rows:
return None
(who, comments,
isdir, branch, revision, revlink,
when, category, repository, project) = rows[0]
branch = str_or_none(branch)
revision = str_or_none(revision)
q = self.quoteq("SELECT link FROM change_links WHERE changeid=?")
t.execute(q, (changeid,))
rows = t.fetchall()
links = [row[0] for row in rows]
links.sort()
q = self.quoteq("SELECT filename FROM change_files WHERE changeid=?")
t.execute(q, (changeid,))
rows = t.fetchall()
files = [row[0] for row in rows]
files.sort()
p = self.get_properties_from_db("change_properties", "changeid",
changeid, t)
c = Change(who=who, files=files, comments=comments, isdir=isdir,
links=links, revision=revision, when=when,
branch=branch, category=category, revlink=revlink,
repository=repository, project=project)
c.properties.updateFromProperties(p)
c.number = changeid
return c
def getChangeByNumber(self, changeid):
# return a Deferred that fires with a Change instance, or None if
# there is no Change with that number
assert changeid >= 0
c = self._change_cache.get(changeid)
if c:
return defer.succeed(c)
d1 = self.runQuery(self.quoteq("SELECT author, comments,"
" is_dir, branch, revision, revlink,"
" when_timestamp, category,"
" repository, project"
" FROM changes WHERE changeid = ?"),
(changeid,))
d2 = self.runQuery(self.quoteq("SELECT link FROM change_links"
" WHERE changeid=?"),
(changeid,))
d3 = self.runQuery(self.quoteq("SELECT filename FROM change_files"
" WHERE changeid=?"),
(changeid,))
d4 = self.runInteraction(self._txn_get_properties_from_db,
"change_properties", "changeid", changeid)
d = defer.gatherResults([d1,d2,d3,d4])
d.addCallback(self._getChangeByNumber_query_done, changeid)
return d
def _getChangeByNumber_query_done(self, res, changeid):
(rows, link_rows, file_rows, properties) = res
if not rows:
return None
(who, comments,
isdir, branch, revision, revlink,
when, category, repository, project) = rows[0]
branch = str_or_none(branch)
revision = str_or_none(revision)
links = [row[0] for row in link_rows]
links.sort()
files = [row[0] for row in file_rows]
files.sort()
c = Change(who=who, files=files, comments=comments, isdir=isdir,
links=links, revision=revision, when=when,
branch=branch, category=category, revlink=revlink,
repository=repository, project=project)
c.properties.updateFromProperties(properties)
c.number = changeid
self._change_cache.add(changeid, c)
return c
def getChangesGreaterThan(self, last_changeid, t=None):
"""Return a Deferred that fires with a list of all Change instances
with numbers greater than the given value, sorted by number. This is
useful for catching up with everything that's happened since you last
called this function."""
assert last_changeid >= 0
if t:
return self._txn_getChangesGreaterThan(t, last_changeid)
else:
return self.runInteractionNow(self._txn_getChangesGreaterThan,
last_changeid)
def _txn_getChangesGreaterThan(self, t, last_changeid):
q = self.quoteq("SELECT changeid FROM changes WHERE changeid > ?")
t.execute(q, (last_changeid,))
changes = [self.getChangeNumberedNow(changeid, t)
for (changeid,) in t.fetchall()]
changes.sort(key=lambda c: c.number)
return changes
def getChangesByNumber(self, changeids):
return defer.gatherResults([self.getChangeByNumber(changeid)
for changeid in changeids])
# SourceStamp-manipulating methods
def getSourceStampNumberedNow(self, ssid, t=None):
assert isinstance(ssid, (int, long))
ss = self._sourcestamp_cache.get(ssid)
if ss:
return ss
if t:
ss = self._txn_getSourceStampNumbered(t, ssid)
else:
ss = self.runInteractionNow(self._txn_getSourceStampNumbered,
ssid)
self._sourcestamp_cache.add(ssid, ss)
return ss
def _txn_getSourceStampNumbered(self, t, ssid):
assert isinstance(ssid, (int, long))
t.execute(self.quoteq("SELECT branch,revision,patchid,project,repository"
" FROM sourcestamps WHERE id=?"),
(ssid,))
r = t.fetchall()
if not r:
return None
(branch_u, revision_u, patchid, project, repository) = r[0]
branch = str_or_none(branch_u)
revision = str_or_none(revision_u)
patch = None
if patchid is not None:
t.execute(self.quoteq("SELECT patchlevel,patch_base64,subdir"
" FROM patches WHERE id=?"),
(patchid,))
r = t.fetchall()
assert len(r) == 1
(patch_level, patch_text_base64, subdir_u) = r[0]
patch_text = base64.b64decode(patch_text_base64)
if subdir_u:
patch = (patch_level, patch_text, str(subdir_u))
else:
patch = (patch_level, patch_text)
t.execute(self.quoteq("SELECT changeid FROM sourcestamp_changes"
" WHERE sourcestampid=?"
" ORDER BY changeid ASC"),
(ssid,))
r = t.fetchall()
changes = None
if r:
changes = [self.getChangeNumberedNow(changeid, t)
for (changeid,) in r]
ss = SourceStamp(branch, revision, patch, changes, project=project, repository=repository)
ss.ssid = ssid
return ss
# Properties methods
def get_properties_from_db(self, tablename, idname, id, t=None):
if t:
return self._txn_get_properties_from_db(t, tablename, idname, id)
else:
return self.runInteractionNow(self._txn_get_properties_from_db,
tablename, idname, id)
def _txn_get_properties_from_db(self, t, tablename, idname, id):
# apparently you can't use argument placeholders for table names. Don't
# call this with a weird-looking tablename.
q = self.quoteq("SELECT property_name,property_value FROM %s WHERE %s=?"
% (tablename, idname))
t.execute(q, (id,))
retval = Properties()
for key, valuepair in t.fetchall():
value, source = json.loads(valuepair)
retval.setProperty(str(key), value, source)
return retval
# Scheduler manipulation methods
def addSchedulers(self, added):
return self.runInteraction(self._addSchedulers, added)
def _addSchedulers(self, t, added):
for scheduler in added:
name = scheduler.name
assert name
class_name = "%s.%s" % (scheduler.__class__.__module__,
scheduler.__class__.__name__)
q = self.quoteq("""
SELECT schedulerid, class_name FROM schedulers WHERE
name=? AND
(class_name=? OR class_name='')
""")
t.execute(q, (name, class_name))
row = t.fetchone()
if row:
sid, db_class_name = row
if db_class_name == '':
# We're updating from an old schema where the class name
# wasn't stored.
# Update this row's class name and move on
q = self.quoteq("""UPDATE schedulers SET class_name=?
WHERE schedulerid=?""")
t.execute(q, (class_name, sid))
elif db_class_name != class_name:
# A different scheduler is being used with this name.
# Ignore the old scheduler and create a new one
sid = None
else:
sid = None
if sid is None:
# create a new row, with the latest changeid (so it won't try
# to process all of the old changes) new Schedulers are
# supposed to ignore pre-existing Changes
q = ("SELECT changeid FROM changes"
" ORDER BY changeid DESC LIMIT 1")
t.execute(q)
max_changeid = _one_or_else(t.fetchall(), 0)
state = scheduler.get_initial_state(max_changeid)
state_json = json.dumps(state)
q = self.quoteq("INSERT INTO schedulers"
" (name, class_name, state)"
" VALUES (?,?,?)")
t.execute(q, (name, class_name, state_json))
sid = t.lastrowid
log.msg("scheduler '%s' got id %d" % (scheduler.name, sid))
scheduler.schedulerid = sid
def scheduler_get_state(self, schedulerid, t):
q = self.quoteq("SELECT state FROM schedulers WHERE schedulerid=?")
t.execute(q, (schedulerid,))
state_json = _one_or_else(t.fetchall())
assert state_json is not None
return json.loads(state_json)
def scheduler_set_state(self, schedulerid, t, state):
state_json = json.dumps(state)
q = self.quoteq("UPDATE schedulers SET state=? WHERE schedulerid=?")
t.execute(q, (state_json, schedulerid))
def get_sourcestampid(self, ss, t):
"""Given a SourceStamp (which may or may not have an ssid), make sure
the contents are in the database, and return the ssid. If the
SourceStamp originally came from the DB (and thus already has an
ssid), just return the ssid. If not, create a new row for it."""
if ss.ssid is not None:
return ss.ssid
patchid = None
if ss.patch:
patchlevel = ss.patch[0]
diff = ss.patch[1]
subdir = None
if len(ss.patch) > 2:
subdir = ss.patch[2]
q = self.quoteq("INSERT INTO patches"
" (patchlevel, patch_base64, subdir)"
" VALUES (?,?,?)")
t.execute(q, (patchlevel, base64.b64encode(diff), subdir))
patchid = t.lastrowid
t.execute(self.quoteq("INSERT INTO sourcestamps"
" (branch, revision, patchid, project, repository)"
" VALUES (?,?,?,?,?)"),
(ss.branch, ss.revision, patchid, ss.project, ss.repository))
ss.ssid = t.lastrowid
q2 = self.quoteq("INSERT INTO sourcestamp_changes"
" (sourcestampid, changeid) VALUES (?,?)")
for c in ss.changes:
t.execute(q2, (ss.ssid, c.number))
return ss.ssid
def create_buildset(self, ssid, reason, properties, builderNames, t,
external_idstring=None):
# this creates both the BuildSet and the associated BuildRequests
now = self._getCurrentTime()
t.execute(self.quoteq("INSERT INTO buildsets"
" (external_idstring, reason,"
" sourcestampid, submitted_at)"
" VALUES (?,?,?,?)"),
(external_idstring, reason, ssid, now))
bsid = t.lastrowid
for propname, propvalue in properties.properties.items():
encoded_value = json.dumps(propvalue)
t.execute(self.quoteq("INSERT INTO buildset_properties"
" (buildsetid, property_name, property_value)"
" VALUES (?,?,?)"),
(bsid, propname, encoded_value))
brids = []
for bn in builderNames:
t.execute(self.quoteq("INSERT INTO buildrequests"
" (buildsetid, buildername, submitted_at)"
" VALUES (?,?,?)"),
(bsid, bn, now))
brid = t.lastrowid
brids.append(brid)
self.notify("add-buildset", bsid)
self.notify("add-buildrequest", *brids)
return bsid
def scheduler_classify_change(self, schedulerid, number, important, t):
q = self.quoteq("INSERT INTO scheduler_changes"
" (schedulerid, changeid, important)"
" VALUES (?,?,?)")
t.execute(q, (schedulerid, number, bool(important)))
def scheduler_get_classified_changes(self, schedulerid, t):
q = self.quoteq("SELECT changeid, important"
" FROM scheduler_changes"
" WHERE schedulerid=?")
t.execute(q, (schedulerid,))
important = []
unimportant = []
for (changeid, is_important) in t.fetchall():
c = self.getChangeNumberedNow(changeid, t)
if is_important:
important.append(c)
else:
unimportant.append(c)
return (important, unimportant)
def scheduler_retire_changes(self, schedulerid, changeids, t):
t.execute(self.quoteq("DELETE FROM scheduler_changes"
" WHERE schedulerid=? AND changeid IN ")
+ self.parmlist(len(changeids)),
(schedulerid,) + tuple(changeids))
def scheduler_subscribe_to_buildset(self, schedulerid, bsid, t):
# scheduler_get_subscribed_buildsets(schedulerid) will return
# information about all buildsets that were subscribed this way
t.execute(self.quoteq("INSERT INTO scheduler_upstream_buildsets"
" (buildsetid, schedulerid, active)"
" VALUES (?,?,?)"),
(bsid, schedulerid, 1))
def scheduler_get_subscribed_buildsets(self, schedulerid, t):
# returns list of (bsid, ssid, complete, results) pairs
t.execute(self.quoteq("SELECT bs.id, "
" bs.sourcestampid, bs.complete, bs.results"
" FROM scheduler_upstream_buildsets AS s,"
" buildsets AS bs"
" WHERE s.buildsetid=bs.id"
" AND s.schedulerid=?"
" AND s.active=1"),
(schedulerid,))
return t.fetchall()
def scheduler_unsubscribe_buildset(self, schedulerid, buildsetid, t):
t.execute(self.quoteq("UPDATE scheduler_upstream_buildsets"
" SET active=0"
" WHERE buildsetid=? AND schedulerid=?"),
(buildsetid, schedulerid))
# BuildRequest-manipulation methods
def getBuildRequestWithNumber(self, brid, t=None):
assert isinstance(brid, (int, long))
if t:
br = self._txn_getBuildRequestWithNumber(t, brid)
else:
br = self.runInteractionNow(self._txn_getBuildRequestWithNumber,
brid)
return br
def _txn_getBuildRequestWithNumber(self, t, brid):
assert isinstance(brid, (int, long))
t.execute(self.quoteq("SELECT br.buildsetid, bs.reason,"
" bs.sourcestampid, br.buildername,"
" bs.submitted_at, br.priority"
" FROM buildrequests AS br, buildsets AS bs"
" WHERE br.id=? AND br.buildsetid=bs.id"),
(brid,))
r = t.fetchall()
if not r:
return None
(bsid, reason, ssid, builder_name, submitted_at, priority) = r[0]
ss = self.getSourceStampNumberedNow(ssid, t)
properties = self.get_properties_from_db("buildset_properties",
"buildsetid", bsid, t)
br = BuildRequest(reason, ss, builder_name, properties)
br.submittedAt = submitted_at
br.priority = priority
br.id = brid
br.bsid = bsid
return br
def get_buildername_for_brid(self, brid):
assert isinstance(brid, (int, long))
return self.runInteractionNow(self._txn_get_buildername_for_brid, brid)
def _txn_get_buildername_for_brid(self, t, brid):
assert isinstance(brid, (int, long))
t.execute(self.quoteq("SELECT buildername FROM buildrequests"
" WHERE id=?"),
(brid,))
r = t.fetchall()
if not r:
return None
return r[0][0]
def get_unclaimed_buildrequests(self, buildername, old, master_name,
master_incarnation, t):
t.execute(self.quoteq("SELECT br.id"
" FROM buildrequests AS br, buildsets AS bs"
" WHERE br.buildername=? AND br.complete=0"
" AND br.buildsetid=bs.id"
" AND (br.claimed_at<?"
" OR (br.claimed_by_name=?"
" AND br.claimed_by_incarnation!=?))"
" ORDER BY br.priority DESC,bs.submitted_at ASC"),
(buildername, old, master_name, master_incarnation))
requests = [self.getBuildRequestWithNumber(brid, t)
for (brid,) in t.fetchall()]
return requests
def claim_buildrequests(self, now, master_name, master_incarnation, brids,
t=None):
if not brids:
return
if t:
self._txn_claim_buildrequests(t, now, master_name,
master_incarnation, brids)
else:
self.runInteractionNow(self._txn_claim_buildrequests,
now, master_name, master_incarnation, brids)
def _txn_claim_buildrequests(self, t, now, master_name, master_incarnation,
brids):
q = self.quoteq("UPDATE buildrequests"
" SET claimed_at = ?,"
" claimed_by_name = ?, claimed_by_incarnation = ?"
" WHERE id IN " + self.parmlist(len(brids)))
qargs = [now, master_name, master_incarnation] + list(brids)
t.execute(q, qargs)
def build_started(self, brid, buildnumber):
return self.runInteractionNow(self._txn_build_started, brid, buildnumber)
def _txn_build_started(self, t, brid, buildnumber):
now = self._getCurrentTime()
t.execute(self.quoteq("INSERT INTO builds (number, brid, start_time)"
" VALUES (?,?,?)"),
(buildnumber, brid, now))
bid = t.lastrowid
self.notify("add-build", bid)
return bid
def builds_finished(self, bids):
return self.runInteractionNow(self._txn_build_finished, bids)
def _txn_build_finished(self, t, bids):
now = self._getCurrentTime()
q = self.quoteq("UPDATE builds SET finish_time = ?"
" WHERE id IN " + self.parmlist(len(bids)))
qargs = [now] + list(bids)
t.execute(q, qargs)
def get_build_info(self, bid):
return self.runInteractionNow(self._txn_get_build_info, bid)
def _txn_get_build_info(self, t, bid):
# brid, buildername, buildnum
t.execute(self.quoteq("SELECT b.brid,br.buildername,b.number"
" FROM builds AS b, buildrequests AS br"
" WHERE b.id=? AND b.brid=br.id"),
(bid,))
res = t.fetchall()
if res:
return res[0]
return (None,None,None)
def get_buildnums_for_brid(self, brid):
return self.runInteractionNow(self._txn_get_buildnums_for_brid, brid)
def _txn_get_buildnums_for_brid(self, t, brid):
t.execute(self.quoteq("SELECT number FROM builds WHERE brid=?"),
(brid,))
return [number for (number,) in t.fetchall()]
def resubmit_buildrequests(self, brids):
return self.runInteraction(self._txn_resubmit_buildreqs, brids)
def _txn_resubmit_buildreqs(self, t, brids):
# the interrupted build that gets resubmitted will still have the
# same submitted_at value, so it should be re-started first
q = self.quoteq("UPDATE buildrequests"
" SET claimed_at=0,"
" claimed_by_name=NULL, claimed_by_incarnation=NULL"
" WHERE id IN " + self.parmlist(len(brids)))
t.execute(q, brids)
self.notify("add-buildrequest", *brids)
def retire_buildrequests(self, brids, results):
return self.runInteractionNow(self._txn_retire_buildreqs, brids,results)
def _txn_retire_buildreqs(self, t, brids, results):
now = self._getCurrentTime()
#q = self.db.quoteq("DELETE FROM buildrequests WHERE id IN "
# + self.db.parmlist(len(brids)))
q = self.quoteq("UPDATE buildrequests"
" SET complete=1, results=?, complete_at=?"
" WHERE id IN " + self.parmlist(len(brids)))
t.execute(q, [results, now]+brids)
# now, does this cause any buildsets to complete?
q = self.quoteq("SELECT bs.id"
" FROM buildsets AS bs, buildrequests AS br"
" WHERE br.buildsetid=bs.id AND bs.complete=0"
" AND br.id in "
+ self.parmlist(len(brids)))
t.execute(q, brids)
bsids = [bsid for (bsid,) in t.fetchall()]
for bsid in bsids:
self._check_buildset(t, bsid, now)
self.notify("retire-buildrequest", *brids)
self.notify("modify-buildset", *bsids)
def cancel_buildrequests(self, brids):
return self.runInteractionNow(self._txn_cancel_buildrequest, brids)
def _txn_cancel_buildrequest(self, t, brids):
# TODO: we aren't entirely sure if it'd be safe to just delete the
# buildrequest: what else might be waiting on it that would then just
# hang forever?. _check_buildset() should handle it well (an empty
# buildset will appear complete and SUCCESS-ful). But we haven't
# thought it through enough to be sure. So for now, "cancel" means
# "mark as complete and FAILURE".
if True:
now = self._getCurrentTime()
q = self.quoteq("UPDATE buildrequests"
" SET complete=1, results=?, complete_at=?"
" WHERE id IN " + self.parmlist(len(brids)))
t.execute(q, [FAILURE, now]+brids)
else:
q = self.quoteq("DELETE FROM buildrequests"
" WHERE id IN " + self.parmlist(len(brids)))
t.execute(q, brids)
# now, does this cause any buildsets to complete?
q = self.quoteq("SELECT bs.id"
" FROM buildsets AS bs, buildrequests AS br"
" WHERE br.buildsetid=bs.id AND bs.complete=0"
" AND br.id in "
+ self.parmlist(len(brids)))
t.execute(q, brids)
bsids = [bsid for (bsid,) in t.fetchall()]
for bsid in bsids:
self._check_buildset(t, bsid, now)
self.notify("cancel-buildrequest", *brids)
self.notify("modify-buildset", *bsids)
def _check_buildset(self, t, bsid, now):
q = self.quoteq("SELECT br.complete,br.results"
" FROM buildsets AS bs, buildrequests AS br"
" WHERE bs.complete=0"
" AND br.buildsetid=bs.id AND bs.id=?")
t.execute(q, (bsid,))
results = t.fetchall()
is_complete = True
bs_results = SUCCESS
for (complete, r) in results:
if not complete:
# still waiting
is_complete = False
if r == FAILURE:
bs_results = r
if is_complete:
# they were all successful
q = self.quoteq("UPDATE buildsets"
" SET complete=1, complete_at=?, results=?"
" WHERE id=?")
t.execute(q, (now, bs_results, bsid))
def get_buildrequestids_for_buildset(self, bsid):
return self.runInteractionNow(self._txn_get_buildrequestids_for_buildset,
bsid)
def _txn_get_buildrequestids_for_buildset(self, t, bsid):
t.execute(self.quoteq("SELECT buildername,id FROM buildrequests"
" WHERE buildsetid=?"),
(bsid,))
return dict(t.fetchall())
def examine_buildset(self, bsid):
return self.runInteractionNow(self._txn_examine_buildset, bsid)
def _txn_examine_buildset(self, t, bsid):
# "finished" means complete=1 for all builds. Return False until
# all builds are complete, then True.
# "successful" means complete=1 and results!=FAILURE for all builds.
# Returns None until the last success or the first failure. Returns
# False if there is at least one failure. Returns True if all are
# successful.
q = self.quoteq("SELECT br.complete,br.results"
" FROM buildsets AS bs, buildrequests AS br"
" WHERE br.buildsetid=bs.id AND bs.id=?")
t.execute(q, (bsid,))
results = t.fetchall()
finished = True
successful = None
for (c,r) in results:
if not c:
finished = False
if c and r not in (SUCCESS, WARNINGS):
successful = False
if finished and successful is None:
successful = True
return (successful, finished)
def get_active_buildset_ids(self):
return self.runInteractionNow(self._txn_get_active_buildset_ids)
def _txn_get_active_buildset_ids(self, t):
t.execute("SELECT id FROM buildsets WHERE complete=0")
return [bsid for (bsid,) in t.fetchall()]
def get_buildset_info(self, bsid):
return self.runInteractionNow(self._txn_get_buildset_info, bsid)
def _txn_get_buildset_info(self, t, bsid):
q = self.quoteq("SELECT external_idstring, reason, sourcestampid,"
" complete, results"
" FROM buildsets WHERE id=?")
t.execute(q, (bsid,))
res = t.fetchall()
if res:
(external, reason, ssid, complete, results) = res[0]
external_idstring = str_or_none(external)
reason = str_or_none(reason)
complete = bool(complete)
return (external_idstring, reason, ssid, complete, results)
return None # shouldn't happen
def get_pending_brids_for_builder(self, buildername):
return self.runInteractionNow(self._txn_get_pending_brids_for_builder,
buildername)
def _txn_get_pending_brids_for_builder(self, t, buildername):
# "pending" means unclaimed and incomplete. When a build is returned
# to the pool (self.resubmit_buildrequests), the claimed_at= field is
# reset to zero.
t.execute(self.quoteq("SELECT id FROM buildrequests"
" WHERE buildername=? AND"
" complete=0 AND claimed_at=0"),
(buildername,))
return [brid for (brid,) in t.fetchall()]
# test/debug methods
def has_pending_operations(self):
return bool(self._pending_operation_count)
threadable.synchronize(DBConnector)
|
centrumholdings/buildbot
|
buildbot/db/connector.py
|
Python
|
gpl-2.0
| 44,327
|
[
"Brian"
] |
38b57d0ad4d3c20fdc0bf92d06349cbf53dd96613e4d10e88672957d23fc7d91
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements classes and methods for processing LAMMPS output
files (log and dump).
"""
import glob
import re
from io import StringIO
import numpy as np
import pandas as pd
from monty.io import zopen
from monty.json import MSONable
from pymatgen.io.lammps.data import LammpsBox
__author__ = "Kiran Mathew, Zhi Deng"
__copyright__ = "Copyright 2018, The Materials Virtual Lab"
__version__ = "1.0"
__maintainer__ = "Zhi Deng"
__email__ = "z4deng@eng.ucsd.edu"
__date__ = "Aug 1, 2018"
class LammpsDump(MSONable):
"""
Object for representing dump data for a single snapshot.
"""
def __init__(self, timestep, natoms, box, data):
"""
Base constructor.
Args:
timestep (int): Current timestep.
natoms (int): Total number of atoms in the box.
box (LammpsBox): Simulation box.
data (pd.DataFrame): Dumped atomic data.
"""
self.timestep = timestep
self.natoms = natoms
self.box = box
self.data = data
@classmethod
def from_string(cls, string):
"""
Constructor from string parsing.
Args:
string (str): Input string.
"""
lines = string.split("\n")
timestep = int(lines[1])
natoms = int(lines[3])
box_arr = np.loadtxt(StringIO("\n".join(lines[5:8])))
bounds = box_arr[:, :2]
tilt = None
if "xy xz yz" in lines[4]:
tilt = box_arr[:, 2]
x = (0, tilt[0], tilt[1], tilt[0] + tilt[1])
y = (0, tilt[2])
bounds -= np.array([[min(x), max(x)], [min(y), max(y)], [0, 0]])
box = LammpsBox(bounds, tilt)
data_head = lines[8].replace("ITEM: ATOMS", "").split()
data = pd.read_csv(StringIO("\n".join(lines[9:])), names=data_head, delim_whitespace=True)
return cls(timestep, natoms, box, data)
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
LammpsDump
"""
items = {"timestep": d["timestep"], "natoms": d["natoms"]}
items["box"] = LammpsBox.from_dict(d["box"])
items["data"] = pd.read_json(d["data"], orient="split")
return cls(**items)
def as_dict(self):
"""
Returns: MSONable dict
"""
d = {}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["timestep"] = self.timestep
d["natoms"] = self.natoms
d["box"] = self.box.as_dict()
d["data"] = self.data.to_json(orient="split")
return d
def parse_lammps_dumps(file_pattern):
"""
Generator that parses dump file(s).
Args:
file_pattern (str): Filename to parse. The timestep wildcard
(e.g., dump.atom.'*') is supported and the files are parsed
in the sequence of timestep.
Yields:
LammpsDump for each available snapshot.
"""
files = glob.glob(file_pattern)
if len(files) > 1:
pattern = r"%s" % file_pattern.replace("*", "([0-9]+)")
pattern = pattern.replace("\\", "\\\\")
files = sorted(files, key=lambda f: int(re.match(pattern, f).group(1)))
for fname in files:
with zopen(fname, "rt") as f:
dump_cache = []
for line in f:
if line.startswith("ITEM: TIMESTEP"):
if len(dump_cache) > 0:
yield LammpsDump.from_string("".join(dump_cache))
dump_cache = [line]
else:
dump_cache.append(line)
yield LammpsDump.from_string("".join(dump_cache))
def parse_lammps_log(filename="log.lammps"):
"""
Parses log file with focus on thermo data. Both one and multi line
formats are supported. Any incomplete runs (no "Loop time" marker)
will not be parsed.
Notes:
SHAKE stats printed with thermo data are not supported yet.
They are ignored in multi line format, while they may cause
issues with dataframe parsing in one line format.
Args:
filename (str): Filename to parse.
Returns:
[pd.DataFrame] containing thermo data for each completed run.
"""
with zopen(filename, "rt") as f:
lines = f.readlines()
begin_flag = (
"Memory usage per processor =",
"Per MPI rank memory allocation (min/avg/max) =",
)
end_flag = "Loop time of"
begins, ends = [], []
for i, l in enumerate(lines):
if l.startswith(begin_flag):
begins.append(i)
elif l.startswith(end_flag):
ends.append(i)
def _parse_thermo(lines):
multi_pattern = r"-+\s+Step\s+([0-9]+)\s+-+"
# multi line thermo data
if re.match(multi_pattern, lines[0]):
timestep_marks = [i for i, l in enumerate(lines) if re.match(multi_pattern, l)]
timesteps = np.split(lines, timestep_marks)[1:]
dicts = []
kv_pattern = r"([0-9A-Za-z_\[\]]+)\s+=\s+([0-9eE\.+-]+)"
for ts in timesteps:
data = {}
data["Step"] = int(re.match(multi_pattern, ts[0]).group(1))
data.update({k: float(v) for k, v in re.findall(kv_pattern, "".join(ts[1:]))})
dicts.append(data)
df = pd.DataFrame(dicts)
# rearrange the sequence of columns
columns = ["Step"] + [k for k, v in re.findall(kv_pattern, "".join(timesteps[0][1:]))]
df = df[columns]
# one line thermo data
else:
df = pd.read_csv(StringIO("".join(lines)), delim_whitespace=True)
return df
runs = []
for b, e in zip(begins, ends):
runs.append(_parse_thermo(lines[b + 1 : e]))
return runs
|
vorwerkc/pymatgen
|
pymatgen/io/lammps/outputs.py
|
Python
|
mit
| 5,940
|
[
"LAMMPS",
"pymatgen"
] |
476f8849b041764da5e54eb0f68fd283817e7e02d480fbe12d09bdbe01f0e33a
|
# -*- python -*-
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2005 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
# Written by Lorenzo Cappelletti <lorenzo.cappelletti@email.it>, 2003
# Benny Malengier <benny.malengier@gramps-project.org, 2007
# Maria-Cristina Ciocci <see above>, 2007
#
"""
Italian-Specific classes for relationships.
"""
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import Person
import gramps.gen.relationship
#-------------------------------------------------------------------------
#
# Shared constants
#
#-------------------------------------------------------------------------
_level = [
"", "prim%(gen)s", "second%(gen)s", "terz%(gen)s", "quart%(gen)s",
"quint%(gen)s", "sest%(gen)s",
"settim%(gen)s", "ottav%(gen)s", "non%(gen)s", "decim%(gen)s",
"undicesim%(gen)s", "dodicesim%(gen)s",
"tredicesim%(gen)s", "quattordicesim%(gen)s", "quindicesim%(gen)s",
"sedicesim%(gen)s", "diciasettesim%(gen)s", "diciottesim%(gen)s",
"diciannovesim%(gen)s", "ventesim%(gen)s"
]
_level_m = [
"", "primo", "secondo", "terzo", "quarto",
"quinto", "sesto",
"settimo", "ottavo", "nono", "decimo",
"undicesimo", "dodicesimo",
"tredicesimo", "quattordicesimo", "quindicesimo",
"sedicesimo", "diciasettesimo", "diciottesimo",
"diciannovesimo", "ventesimo"
]
_level_f = [
"", "prima", "seconda", "terza", "quarta",
"quinta", "sesta",
"settima", "ottava", "nona", "decima",
"undicesima", "dodicesima",
"tredicesima", "quattordicesima", "quindicesima",
"sedicesima", "diciasettesima", "diciottesima",
"diciannovesima", "ventesima"
]
_father_level = [ "",
"il padre%(step)s%(inlaw)s",
"il nonno%(step)s%(inlaw)s",
"il bisnonno%(step)s%(inlaw)s",
"il trisnonno%(step)s%(inlaw)s",
]
_mother_level = [ "",
"la madre%(step)s%(inlaw)s",
"la nonna%(step)s%(inlaw)s",
"la bisnonna%(step)s%(inlaw)s",
"la trisnonna%(step)s%(inlaw)s",
]
_son_level = [ "", "il figlio%(step)s%(inlaw)s",
"il nipote%(step)s%(inlaw)s diretto",
"il pronipote%(step)s%(inlaw)s diretto"
]
_daughter_level = [ "", "la figlia%(step)s%(inlaw)s",
"la nipote%(step)s%(inlaw)s diretta",
"la pronipote%(step)s%(inlaw)s diretta"
]
_brother_level = [ "", "il fratello%(step)s%(inlaw)s",
"lo zio%(step)s%(inlaw)s",
"il prozio%(step)s%(inlaw)s",
]
_sister_level = [ "", "la sorella%(step)s%(inlaw)s",
"la zia%(step)s%(inlaw)s",
"la prozia%(step)s%(inlaw)s",
]
_nephew_level = [ "", "il nipote%(step)s%(inlaw)s",
"il pronipote%(step)s%(inlaw)s"
]
_niece_level = [ "", "la nipote%(step)s%(inlaw)s",
"la pronipote%(step)s%(inlaw)s"
]
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator):
"""
RelationshipCalculator Class
"""
INLAW = ' acquisit%(gen)s'
STEP = ' adottiv%(gen)s'
def __init__(self):
gramps.gen.relationship.RelationshipCalculator.__init__(self)
#-------------------------------------------------------------------------
#
# Specific relationship functions
#
# To be honest, I doubt that this relationship naming method is widely
# spread... If you know of a rigorous, italian naming convention,
# please, drop me an email.
#
#-------------------------------------------------------------------------
def __gen_suffix(self, gender):
if gender == Person.MALE:
return 'o'
return 'a'
def get_parents (self, level):
if level > len(_level)-1:
return "remote ancestors"
else:
return "%si genitori" % _level[level]
def get_father (self, level, step='', inlaw=''):
gen = "o"
if level < len(_father_level):
return _father_level[level] % {'step': step, 'inlaw': inlaw} \
% {'gen': gen}
elif level < len(_level):
return 'il nonno%(step)s%(inlaw)s della %(level_f)s generazione' % {
'level_f': _level_f[level],
'step': step, 'inlaw': inlaw} % {'gen': gen}
else:
return "l'avo%(step)s%(inlaw)s (%(level)d generazioni)" % {
'step': step, 'inlaw': inlaw,
'level': level} % {'gen': gen}
def get_mother (self, level, step='', inlaw=''):
gen = "a"
if level < len(_father_level):
return _mother_level[level] % {'step': step, 'inlaw': inlaw} \
% {'gen': gen}
elif level < len(_level):
return 'la nonna%(step)s%(inlaw)s della %(level_f)s generazione' % {
'level_f': _level_f[level],
'step': step, 'inlaw': inlaw} % {'gen': gen}
else:
return "l'ava%(step)s%(inlaw)s (%(level)d generazioni)" % {
'step': step, 'inlaw': inlaw,
'level': level} % {'gen': gen}
def get_parent_unknown(self, level, step='', inlaw=''):
gen = "o/a"
if level == 1:
return "uno dei genitori%(step)s%(inlaw)s" % {'step': step, 'inlaw': inlaw} \
% {'gen': gen}
elif level < len(_father_level):
return _mother_level[level] % {'step': step, 'inlaw': inlaw} \
% {'gen': gen}
elif level < len(_level):
return 'nonno/a%(step)s%(inlaw)s della %(level_f)s generazione' % {
'level_f': _level_f[level],
'step': step, 'inlaw': inlaw} % {'gen': gen}
else:
return "l'ava%(step)s%(inlaw)s (%(level)d generazioni)" % {
'step': step, 'inlaw': inlaw,
'level': level} % {'gen': gen}
def get_son (self, level, step="", inlaw=""):
gen = "o"
if level < len(_son_level):
return _son_level[level] % {'step': step, 'inlaw': inlaw} \
% {'gen': gen}
elif level < len(_level):
return 'il nipote%(step)s%(inlaw)s diretto della %(level_f)s generazione' % {
'level_f': _level_f[level],
'step': step, 'inlaw': inlaw} % {'gen': gen}
else:
return "il discendente%(step)s%(inlaw)s diretto (%(level)d generazioni)" % {
'step': step, 'inlaw': inlaw,
'level': level} % {'gen': gen}
def get_daughter (self, level, step="", inlaw=""):
gen = "a"
if level < len(_daughter_level):
return _daughter_level[level] % {'step': step, 'inlaw': inlaw} \
% {'gen': gen}
elif level < len(_level):
return 'la nipote%(step)s%(inlaw)s diretta della %(level_f)s generazione' % {
'level_f': _level_f[level],
'step': step, 'inlaw': inlaw} % {'gen': gen}
else:
return "la discendente%(step)s%(inlaw)s diretta (%(level)d generazioni)" % {
'step': step, 'inlaw': inlaw,
'level': level} % {'gen': gen}
def get_uncle (self, level, step="", inlaw=""):
gen = "o"
if level < len(_brother_level):
return _brother_level[level] % {'step': step, 'inlaw': inlaw} \
% {'gen': gen}
elif level < len(_level):
return 'lo zio%(step)s%(inlaw)s della %(level_f)s generazione' % {
'level_f': _level_f[level],
'step': step, 'inlaw': inlaw} % {'gen': gen}
else:
return 'uno zio%(step)s%(inlaw)s lontano (%(level)d generazioni)' % {
'step': step, 'inlaw': inlaw,
'level': level} % {'gen': gen}
def get_aunt (self, level, step="", inlaw=""):
gen = "a"
if level < len(_brother_level):
return _sister_level[level] % {'step': step, 'inlaw': inlaw} \
% {'gen': gen}
elif level < len(_level):
return 'la zia%(step)s%(inlaw)s della %(level_f)s generazione' % {
'level_f': _level_f[level],
'step': step, 'inlaw': inlaw} % {'gen': gen}
else:
return 'una zia%(step)s%(inlaw)s lontana (%(level)d generazioni)' % {
'step': step, 'inlaw': inlaw,
'level': level} % {'gen': gen}
def get_nephew(self, level, step="", inlaw=""):
gen = "o"
if level < len(_nephew_level):
return _nephew_level[level] % {'step': step, 'inlaw': inlaw} \
% {'gen': gen}
elif level < len(_level):
return 'il nipote%(step)s%(inlaw)s ' \
'della %(level_f)s generazione' % {
'level_f': _level_f[level],
'step': step, 'inlaw': inlaw} % {'gen': gen}
else:
return 'un nipote%(step)s%(inlaw)s lontano ('\
'%(level)d generazioni)' % {
'step': step, 'inlaw': inlaw,
'level': level} % {'gen': gen}
def get_niece(self, level, step="", inlaw=""):
gen = "a"
if level < len(_nephew_level):
return _niece_level[level] % {'step': step, 'inlaw': inlaw} \
% {'gen': gen}
elif level < len(_level):
return 'la nipote%(step)s%(inlaw)s ' \
'della %(level_f)s generazione' % {
'level_f': _level_f[level],
'step': step, 'inlaw': inlaw} % {'gen': gen}
else:
return 'una nipote%(step)s%(inlaw)s lontana ('\
'%(level)d generazioni)' % {
'step': step, 'inlaw': inlaw,
'level': level} % {'gen': gen}
def get_male_cousin (self, levelA, levelB, step="", inlaw=""):
gen = "o"
return "il cugino%(step)s%(inlaw)s di %(level)d° grado"\
"(%(levA)d-%(levB)d)" \
% {'level': levelA+levelB-1,
'step': step, 'inlaw': inlaw,
'levA': levelA,
'levB': levelB} % {'gen': gen}
def get_female_cousin (self, levelA, levelB, step="", inlaw=""):
gen = "a"
return "la cugina%(step)s%(inlaw)s di %(level)d° grado"\
"(%(levA)d-%(levB)d)" \
% {'level': levelA+levelB-1,
'step': step, 'inlaw': inlaw,
'levA': levelA,
'levB': levelB} % {'gen': gen}
#-------------------------------------------------------------------------
#
# get_relationship
#
#-------------------------------------------------------------------------
def get_relationship(self, db, orig_person, other_person):
"""
returns a string representing the relationshp between the two people,
along with a list of common ancestors (typically father, mother)
"""
if orig_person is None:
return ("non definito", [])
if orig_person.get_handle() == other_person.get_handle():
return ('', [])
is_spouse = self.is_spouse(db, orig_person, other_person)
if is_spouse:
return (is_spouse, [])
#get_relationship_distance changed, first data is relation to
#orig person, apperently secondRel in this function
(secondRel, firstRel, common) = \
self.get_relationship_distance_new(db, orig_person, other_person)
if isinstance(common, str):
return (common, [])
elif common:
person_handle = common[0]
else:
return ("", [])
firstRel = len(firstRel)
secondRel = len(secondRel)
if firstRel == 0:
if secondRel == 0:
return ('', common)
elif other_person.get_gender() == Person.MALE:
return (self.get_father(secondRel), common)
else:
return (self.get_mother(secondRel), common)
elif secondRel == 0:
if other_person.get_gender() == Person.MALE:
return (self.get_son(firstRel), common)
else:
return (self.get_daughter(firstRel), common)
elif firstRel == 1:
if other_person.get_gender() == Person.MALE:
return (self.get_uncle(secondRel), common)
else:
return (self.get_aunt(secondRel), common)
elif secondRel == 1:
if other_person.get_gender() == Person.MALE:
return (self.get_nephew(firstRel-1), common)
else:
return (self.get_niece(firstRel-1), common)
else:
if other_person.get_gender() == Person.MALE:
return (self.get_male_cousin(firstRel-1, secondRel-1), common)
else:
return (self.get_female_cousin(firstRel-1, secondRel-1), common)
def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b,
reltocommon_a, reltocommon_b,
only_birth=True,
in_law_a=False, in_law_b=False):
"""
See Comment in Relationship Class (relationship.py)
"""
if only_birth:
step = ''
else:
step = self.STEP
if in_law_a or in_law_b :
inlaw = self.INLAW
else:
inlaw = ''
if gender_b == Person.MALE:
rel_str = "un parente%s%s lontano" % (step, inlaw) % {'gen': 'o'}
elif gender_b == Person.FEMALE:
rel_str = "una parente%s%s lontana" % (step, inlaw) % {'gen': 'a'}
else:
rel_str = "uno dei parenti%s%s lontani" % (step, inlaw) % {'gen': 'i'}
if Gb == 0:
if Ga == 0:
rel_str = 'la stessa persona'
elif Ga == 1 and inlaw and not step:
if gender_b == Person.MALE:
rel_str = 'il suocero'
elif gender_b == Person.FEMALE:
rel_str = 'la suocera'
else:
rel_str = 'uno dei suoceri'
elif Ga == 1 and not inlaw and step:
if gender_b == Person.MALE:
rel_str = 'il patrigno'
elif gender_b == Person.FEMALE:
rel_str = 'la matrigna'
else:
rel_str = 'uno dei genitori adottivi'
elif gender_b == Person.MALE:
rel_str = self.get_father(Ga, step, inlaw)
elif gender_b == Person.FEMALE:
rel_str = self.get_mother(Ga, step, inlaw)
else:
rel_str = self.get_parent_unknown(Ga, step, inlaw)
elif Ga == 0:
if Gb == 1 and inlaw and not step:
if gender_b == Person.MALE:
rel_str = 'il genero'
elif gender_b == Person.FEMALE:
rel_str = 'la nuora'
else:
rel_str = 'genero/nuora'
elif gender_b == Person.MALE:
rel_str = self.get_son(Gb, step, inlaw)
else:
rel_str = self.get_daughter(Gb, step, inlaw)
elif Gb == 1:
if Ga == 1 and inlaw and not step:
if gender_b == Person.MALE:
rel_str = 'il cognato'
elif gender_b == Person.FEMALE:
rel_str = 'la cognata'
else:
rel_str = 'il cognato/a'
if gender_b == Person.MALE:
rel_str = self.get_uncle(Ga, step, inlaw)
else:
rel_str = self.get_aunt(Ga, step, inlaw)
elif Ga == 1:
if gender_b == Person.MALE:
rel_str = self.get_nephew(Gb-1, step, inlaw)
else:
rel_str = self.get_niece(Gb-1, step, inlaw)
else:
if gender_b == Person.MALE:
rel_str = self.get_male_cousin(Gb-1, Ga-1, step, inlaw)
else:
rel_str = self.get_female_cousin(Gb-1, Ga-1, step, inlaw)
return rel_str
def get_sibling_relationship_string(self, sib_type, gender_a, gender_b,
in_law_a=False, in_law_b=False):
"""
Determine the string giving the relation between two siblings of
type sib_type.
Eg: b is the brother of a
Here 'brother' is the string we need to determine
This method gives more details about siblings than
get_single_relationship_string can do.
.. warning:: DON'T TRANSLATE THIS PROCEDURE IF LOGIC IS EQUAL IN YOUR
LANGUAGE, AND SAME METHODS EXIST (get_uncle, get_aunt,
get_sibling)
"""
if in_law_a or in_law_b :
inlaw = self.INLAW
else:
inlaw = ''
if sib_type == self.NORM_SIB or sib_type == self.UNKNOWN_SIB:
if not inlaw:
if gender_b == Person.MALE:
rel_str = self.get_uncle(1, '', '')
else:
rel_str = self.get_aunt(1, '', '')
else:
if gender_b == Person.MALE:
rel_str = 'il cognato'
elif gender_b == Person.FEMALE:
rel_str = 'la cognata'
else:
rel_str = 'il cognato/a'
elif sib_type == self.HALF_SIB_FATHER \
or sib_type == self.HALF_SIB_MOTHER \
or sib_type == self.STEP_SIB:
#Italian has no difference between half and step sibling!
if not inlaw:
if gender_b == Person.MALE:
rel_str = 'il fratellastro'
elif gender_b == Person.FEMALE:
rel_str = 'la sorellastra'
else:
rel_str = 'il fratellastro/sorellastra'
else:
if gender_b == Person.MALE:
rel_str = 'il fratellastro acquisito'
elif gender_b == Person.FEMALE:
rel_str = 'la sorellastra acquisita'
else:
rel_str = 'il fratellastro/sorellastra acquisito/a'
return rel_str
if __name__ == "__main__":
# Test function. Call it as follows from the command line (so as to find
# imported modules):
# export PYTHONPATH=/path/to/gramps/src
# python src/plugins/rel/rel_it.py
"""TRANSLATORS, copy this if statement at the bottom of your
rel_xx.py module, and test your work with:
python src/plugins/rel/rel_xx.py
"""
from gramps.gen.relationship import test
RC = RelationshipCalculator()
test(RC, True)
|
SNoiraud/gramps
|
gramps/plugins/rel/rel_it.py
|
Python
|
gpl-2.0
| 21,059
|
[
"Brian"
] |
e3eb8e6930f5d464a94048c7ab492d723c4521afc342b86e9b07edc146e27ba5
|
import os
from os.path import join
import logging
import glob
import csv
from collections import namedtuple
import sys
import pprint
import pickle
import multiprocessing
cpu_count = multiprocessing.cpu_count()
# Project working directory, where samples will be linked to
samples_path = '/path/to/working_directory'
# Path to supporting files
supporting_files = '/path/to/Quinn2013_supporting_files'
original_path = join(samples_path, 'original')
alternate_path = join(samples_path, 'alternate')
coverage_cutoffs = join(supporting_files, 'cov_cutoffs.csv')
dros_gtf = join(supporting_files, 'iGenomes/Drosophila_melanogaster/Ensembl/'
'BDGP5.25/Annotation/Genes/genes.gtf')
dros_gtf_index = join(supporting_files, 'transcriptome_data')
dros_gtf_cds = join(supporting_files, 'CDSgtf/CDS.gtf')
all_snps = join(supporting_files, 'freeze2_sorted_nohead_small.vcf')
# Changed this to vcf so I could use it as the reference vcf in snps_combine.
# Used convert_Frank_vcfs_to_bed.py to convert.
dgrp_5_lines_sup = join(supporting_files, 'freeze2_Filter2homoState.bins__5.vcf')
dgrp_both_11 = join(supporting_files, 'freeze2_362_765_noINDEL_1-1.vcf')
current_snp_file = all_snps
current_genes_file = dros_gtf
# Software paths
varscan_path = join(supporting_files, 'VarScan.v2.3.5.jar')
gatk_path = join(supporting_files, 'GenomeAnalysisTK-2.4-9/GenomeAnalysisTK.jar')
trimmomatic_path = join(supporting_files, 'Trimmomatic-0.30/trimmomatic-0.30.jar')
# Create logger
log_filename = join(samples_path, 'pipeline_log.log')
format = '%(levelname)s [%(asctime)s] %(message)s'
datefmt = '%m/%d/%Y %H:%M:%S'
log = logging.getLogger('pipeline')
log.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_filename)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter(fmt=format, datefmt=datefmt)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
log.addHandler(fh)
log.addHandler(ch)
log.info('\n\n')
log.info('{:-^50}'.format('START'))
log.info('Creating directories')
if not os.path.exists(original_path):
os.mkdir(original_path)
if not os.path.exists(alternate_path):
os.mkdir(alternate_path)
# Link in sample
os.chdir(samples_path)
try:
os.symlink(join(supporting_files, 'massouras_FASTA', '16_A12_pUn_down.fastq'),
'16_A12_pUn_down.fastq')
except OSError:
pass
# Load in the samples list if it exists, or else build it
try:
with open(join(samples_path, 'samples_list.p'), 'rb') as pf:
samples_list = pickle.load(pf)
num_samples = list({f[:-6] for f in os.listdir(samples_path)
if f.endswith('.fastq') and
not (f.endswith('trim_R1.fastq') or f.endswith('trim_R2.fastq'))})
assert len(samples_list) == len(num_samples), 'unpickling error!'
log.info('unpickled samples_list')
except IOError:
samples_list = list({f[:-6] for f in os.listdir(samples_path)
if f.endswith('.fastq') and not
(f.endswith('trim_R1.fastq') or f.endswith('trim_R2.fastq'))})
with open(join(samples_path, 'samples_list.p'), 'wb') as pf:
pickle.dump(samples_list, pf)
log.info('built and pickled samples_list')
pretty_list = pprint.pformat(samples_list)
log.debug('Samples list:\n{}'.format(pretty_list))
if samples_list:
for sample in samples_list:
if not os.path.exists(join(original_path, sample)):
os.mkdir(join(original_path, sample))
if not os.path.exists(join(alternate_path, sample)):
os.mkdir(join(alternate_path, sample))
else:
raise AssertionError('Add samples to samples_path and restart')
original_fastas = ['genome.fa' for sample in samples_list]
alternate_fastas = [(sample + '.fa') for sample in samples_list]
def original_fasta_link():
'''
Link to original reference fastas - these are needed for alignment and
for building the alternate fastas later.
'''
sequence_path = join(supporting_files, 'iGenomes/Drosophila_melanogaster/Ensembl/'
'BDGP5.25/Sequence')
bowtie_files = glob.glob(join(sequence_path, 'Bowtie2Index', 'genome.*'))
for sample in samples_list:
os.chdir(join(original_path, sample))
# Link to bowtie index
for index_file in bowtie_files:
try:
os.symlink(index_file, os.path.basename(index_file))
except OSError:
pass
# Link to fasta index and dictionary (convenience to prevent mpileup from building)
try:
os.symlink(join(sequence_path, 'WholeGenomeFasta', 'genome.dict'),
'genome.dict')
except OSError:
pass
try:
os.symlink(join(sequence_path, 'WholeGenomeFasta', 'genome.fa.fai'),
'genome.fa.fai')
except OSError:
pass
original_fasta_link()
log.info('Setup complete!')
|
d-quinn/bio_quinn2013
|
SNP_calling/DGRP_all/mglobals.py
|
Python
|
mit
| 4,959
|
[
"Bowtie"
] |
b777e32a53a735c8c14fe0982b8305005ef197314b0b0211a6e40606028b1866
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cases for header_merger module."""
from collections import OrderedDict
import unittest
from pysam import libcbcf
from gcp_variant_transforms.beam_io import vcf_header_io
from gcp_variant_transforms.libs import vcf_field_conflict_resolver
from gcp_variant_transforms.libs.header_merger import HeaderMerger
FILE_1_LINES = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'##INFO=<ID=AF,Number=A,Type=Float,Description="Allele Frequency">\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n',
'##FORMAT=<ID=GQ,Number=1,Type=Integer,Description="GQ">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n']
FILE_2_LINES = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS2,Number=1,Type=Integer,Description="Number samples">\n',
'##INFO=<ID=AF,Number=A,Type=Float,Description="Allele Frequency">\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n',
'##FORMAT=<ID=GQ2,Number=1,Type=Integer,Description="GQ">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n']
class HeaderMergerTest(unittest.TestCase):
"""Test cases for HeaderMerger module."""
def _get_header_merger(self, split_alternate_allele_info_fields=True):
resolver = vcf_field_conflict_resolver.FieldConflictResolver(
split_alternate_allele_info_fields)
merger = HeaderMerger(resolver)
return merger
def _get_header_from_lines(self, lines):
header = libcbcf.VariantHeader()
for line in lines[:-1]:
header.add_line(line)
return vcf_header_io.VcfHeader(infos=header.info,
filters=header.filters,
alts=header.alts,
formats=header.formats,
contigs=header.contigs)
def test_merge_header_with_empty_one(self):
merger = self._get_header_merger()
header_1 = self._get_header_from_lines(FILE_1_LINES)
header_2 = vcf_header_io.VcfHeader()
merger.merge(header_1, header_2)
merger.merge(header_2, header_1)
self.assertCountEqual(list(header_1.infos.keys()), ['NS', 'AF'])
self.assertCountEqual(list(header_1.formats.keys()), ['GT', 'GQ'])
self.assertCountEqual(list(header_2.infos.keys()), ['NS', 'AF'])
self.assertCountEqual(list(header_2.formats.keys()), ['GT', 'GQ'])
def test_merge_two_headers(self):
main_header = self._get_header_from_lines(FILE_1_LINES)
secondary_header = self._get_header_from_lines(FILE_2_LINES)
merger = self._get_header_merger()
merger.merge(main_header, secondary_header)
self.assertCountEqual(list(main_header.infos.keys()), ['NS', 'AF', 'NS2'])
self.assertCountEqual(list(main_header.formats.keys()), ['GT', 'GQ', 'GQ2'])
def test_merge_two_type_conflicting_but_resolvable_headers(self):
# These two headers have type conflict (Integer vs Float), however pipeline
# doesn't raise error because the type conflict is resolvable.
lines_1 = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n']
lines_2 = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=1,Type=Float,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n']
main_header = self._get_header_from_lines(lines_1)
secondary_header = self._get_header_from_lines(lines_2)
merger = self._get_header_merger()
merger.merge(main_header, secondary_header)
self.assertCountEqual(list(main_header.infos.keys()), ['NS'])
self.assertCountEqual(main_header.infos['NS'],
OrderedDict([('id', 'NS'),
('num', 1),
('type', 'Float'),
('desc', 'Number samples'),
('source', None),
('version', None)]))
def test_merge_two_num_conflicting_but_resolvable_headers_1(self):
# These two headers have conflict in Number field (2 vs dot), however
# pipeline doesn't raise error because the conflict is resolvable.
lines_1 = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=2,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n']
lines_2 = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=.,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n']
main_header = self._get_header_from_lines(lines_1)
secondary_header = self._get_header_from_lines(lines_2)
merger = self._get_header_merger()
merger.merge(main_header, secondary_header)
self.assertCountEqual(list(main_header.infos.keys()), ['NS'])
self.assertCountEqual(main_header.infos['NS'],
OrderedDict([('id', 'NS'),
('num', '.'),
('type', 'Integer'),
('desc', 'Number samples'),
('source', None),
('version', None)]))
def test_merge_two_num_conflicting_but_resolvable_headers_2(self):
# These two headers have conflict in Number field (2 vs 3), however
# pipeline doesn't raise error because the conflict is resolvable.
lines_1 = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=2,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n']
lines_2 = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=3,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n']
main_header = self._get_header_from_lines(lines_1)
secondary_header = self._get_header_from_lines(lines_2)
merger = self._get_header_merger()
merger.merge(main_header, secondary_header)
self.assertCountEqual(list(main_header.infos.keys()), ['NS'])
self.assertCountEqual(main_header.infos['NS'],
OrderedDict([('id', 'NS'),
('num', '.'),
('type', 'Integer'),
('desc', 'Number samples'),
('source', None),
('version', None)]))
def test_merge_two_num_conflicting_but_not_resolvable_headers(self):
# Test with split_alternate_allele_info_fields=True
#
# These two headers have incompable Number field (A vs dot).
# `Number=A` is incompatible with dot when flag
# split_alternate_allele_info_fields is set.
lines_1 = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=A,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n']
lines_2 = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=.,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n']
main_header = self._get_header_from_lines(lines_1)
secondary_header = self._get_header_from_lines(lines_2)
merger = self._get_header_merger()
with self.assertRaises(ValueError):
merger.merge(main_header, secondary_header)
def test_merge_two_headers_with_bad_conflict(self):
# Type mistmach (String vs Float) cannot be resolved..
lines_1 = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=1,Type=String,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n']
lines_2 = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=1,Type=Float,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n']
main_header = self._get_header_from_lines(lines_1)
secondary_header = self._get_header_from_lines(lines_2)
merger = self._get_header_merger()
with self.assertRaises(ValueError):
merger.merge(main_header, secondary_header)
|
googlegenomics/gcp-variant-transforms
|
gcp_variant_transforms/libs/header_merger_test.py
|
Python
|
apache-2.0
| 9,036
|
[
"pysam"
] |
9946cac251443b38535281cae295bcca9dde8eb2b75ff978a2b345e4d8b4c1e5
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system:
|======================= In-process =====================|== Out-of-process ==|
+----------+ +----------+ +--------+ +-----------+ +---------+
| | => | Work Ids | | | | Call Q | | Process |
| | +----------+ | | +-----------+ | Pool |
| | | ... | | | | ... | +---------+
| | | 6 | => | | => | 5, call() | => | |
| | | 7 | | | | ... | | |
| Process | | ... | | Local | +-----------+ | Process |
| Pool | +----------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +----------- + | | +-----------+ | |
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
| | +------------+ | | +-----------+ | |
| | | 6: call() | | | | ... | | |
| | | future | | | | 4, result | | |
| | | ... | | | | 3, except | | |
+----------+ +------------+ +--------+ +-----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
- adds the id of the _WorkItem to the "Work Ids" queue
Local worker thread:
- reads work ids from the "Work Ids" queue and looks up the corresponding
WorkItem from the "Work Items" dict: if the work item has been cancelled then
it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
- reads _ResultItems from "Result Q", updates the future stored in the
"Work Items" dict and deletes the dict entry
Process #1..n:
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
_ResultItems in "Result Q"
"""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import atexit
import os
from concurrent.futures import _base
import queue
from queue import Full
import multiprocessing as mp
from multiprocessing.connection import wait
from multiprocessing.queues import Queue
import threading
import weakref
from functools import partial
import itertools
import sys
import traceback
# Workers are created as daemon threads and processes. This is done to allow the
# interpreter to exit when there are still idle processes in a
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
# allowing workers to die with the interpreter has two undesirable properties:
# - The workers would still be running during interpreter shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads/processes finish.
_threads_wakeups = weakref.WeakKeyDictionary()
_global_shutdown = False
class _ThreadWakeup:
def __init__(self):
self._reader, self._writer = mp.Pipe(duplex=False)
def close(self):
self._writer.close()
self._reader.close()
def wakeup(self):
self._writer.send_bytes(b"")
def clear(self):
while self._reader.poll():
self._reader.recv_bytes()
def _python_exit():
global _global_shutdown
_global_shutdown = True
items = list(_threads_wakeups.items())
for _, thread_wakeup in items:
thread_wakeup.wakeup()
for t, _ in items:
t.join()
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
# On Windows, WaitForMultipleObjects is used to wait for processes to finish.
# It can wait on, at most, 63 objects. There is an overhead of two objects:
# - the result queue reader
# - the thread wakeup reader
_MAX_WINDOWS_WORKERS = 63 - 2
# Hack to embed stringification of remote traceback in local traceback
class _RemoteTraceback(Exception):
def __init__(self, tb):
self.tb = tb
def __str__(self):
return self.tb
class _ExceptionWithTraceback:
def __init__(self, exc, tb):
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
self.tb = '\n"""\n%s"""' % tb
def __reduce__(self):
return _rebuild_exc, (self.exc, self.tb)
def _rebuild_exc(exc, tb):
exc.__cause__ = _RemoteTraceback(tb)
return exc
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem(object):
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem(object):
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
class _SafeQueue(Queue):
"""Safe Queue set exception to the future object linked to a job"""
def __init__(self, max_size=0, *, ctx, pending_work_items):
self.pending_work_items = pending_work_items
super().__init__(max_size, ctx=ctx)
def _on_queue_feeder_error(self, e, obj):
if isinstance(obj, _CallItem):
tb = traceback.format_exception(type(e), e, e.__traceback__)
e.__cause__ = _RemoteTraceback('\n"""\n{}"""'.format(''.join(tb)))
work_item = self.pending_work_items.pop(obj.work_id, None)
# work_item can be None if another process terminated. In this case,
# the queue_manager_thread fails all work_items with BrokenProcessPool
if work_item is not None:
work_item.future.set_exception(e)
else:
super()._on_queue_feeder_error(e, obj)
def _get_chunks(*iterables, chunksize):
""" Iterates over zip()ed iterables in chunks. """
it = zip(*iterables)
while True:
chunk = tuple(itertools.islice(it, chunksize))
if not chunk:
return
yield chunk
def _process_chunk(fn, chunk):
""" Processes a chunk of an iterable passed to map.
Runs the function passed to map() on a chunk of the
iterable passed to map.
This function is run in a separate process.
"""
return [fn(*args) for args in chunk]
def _sendback_result(result_queue, work_id, result=None, exception=None):
"""Safely send back the given result or exception"""
try:
result_queue.put(_ResultItem(work_id, result=result,
exception=exception))
except BaseException as e:
exc = _ExceptionWithTraceback(e, e.__traceback__)
result_queue.put(_ResultItem(work_id, exception=exc))
def _process_worker(call_queue, result_queue, initializer, initargs):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A ctx.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A ctx.Queue of _ResultItems that will written
to by the worker.
initializer: A callable initializer, or None
initargs: A tuple of args for the initializer
"""
if initializer is not None:
try:
initializer(*initargs)
except BaseException:
_base.LOGGER.critical('Exception in initializer:', exc_info=True)
# The parent will notice that the process stopped and
# mark the pool broken
return
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(os.getpid())
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException as e:
exc = _ExceptionWithTraceback(e, e.__traceback__)
_sendback_result(result_queue, call_item.work_id, exception=exc)
else:
_sendback_result(result_queue, call_item.work_id, result=r)
# Liberate the resource as soon as possible, to avoid holding onto
# open files or shared memory that is not needed anymore
del call_item
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Args:
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
def _queue_management_worker(executor_reference,
processes,
pending_work_items,
work_ids_queue,
call_queue,
result_queue,
thread_wakeup):
"""Manages the communication between this process and the worker processes.
This function is run in a local thread.
Args:
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
this thread. Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit.
process: A list of the ctx.Process instances used as
workers.
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
call_queue: A ctx.Queue that will be filled with _CallItems
derived from _WorkItems for processing by the process workers.
result_queue: A ctx.SimpleQueue of _ResultItems generated by the
process workers.
thread_wakeup: A _ThreadWakeup to allow waking up the
queue_manager_thread from the main Thread and avoid deadlocks
caused by permanently locked queues.
"""
executor = None
def shutting_down():
return (_global_shutdown or executor is None
or executor._shutdown_thread)
def shutdown_worker():
# This is an upper bound on the number of children alive.
n_children_alive = sum(p.is_alive() for p in processes.values())
n_children_to_stop = n_children_alive
n_sentinels_sent = 0
# Send the right number of sentinels, to make sure all children are
# properly terminated.
while n_sentinels_sent < n_children_to_stop and n_children_alive > 0:
for i in range(n_children_to_stop - n_sentinels_sent):
try:
call_queue.put_nowait(None)
n_sentinels_sent += 1
except Full:
break
n_children_alive = sum(p.is_alive() for p in processes.values())
# Release the queue's resources as soon as possible.
call_queue.close()
# If .join() is not called on the created processes then
# some ctx.Queue methods may deadlock on Mac OS X.
for p in processes.values():
p.join()
result_reader = result_queue._reader
wakeup_reader = thread_wakeup._reader
readers = [result_reader, wakeup_reader]
while True:
_add_call_item_to_queue(pending_work_items,
work_ids_queue,
call_queue)
# Wait for a result to be ready in the result_queue while checking
# that all worker processes are still running, or for a wake up
# signal send. The wake up signals come either from new tasks being
# submitted, from the executor being shutdown/gc-ed, or from the
# shutdown of the python interpreter.
worker_sentinels = [p.sentinel for p in processes.values()]
ready = wait(readers + worker_sentinels)
cause = None
is_broken = True
if result_reader in ready:
try:
result_item = result_reader.recv()
is_broken = False
except BaseException as e:
cause = traceback.format_exception(type(e), e, e.__traceback__)
elif wakeup_reader in ready:
is_broken = False
result_item = None
thread_wakeup.clear()
if is_broken:
# Mark the process pool broken so that submits fail right now.
executor = executor_reference()
if executor is not None:
executor._broken = ('A child process terminated '
'abruptly, the process pool is not '
'usable anymore')
executor._shutdown_thread = True
executor = None
bpe = BrokenProcessPool("A process in the process pool was "
"terminated abruptly while the future was "
"running or pending.")
if cause is not None:
bpe.__cause__ = _RemoteTraceback(
f"\n'''\n{''.join(cause)}'''")
# All futures in flight must be marked failed
for work_id, work_item in pending_work_items.items():
work_item.future.set_exception(bpe)
# Delete references to object. See issue16284
del work_item
pending_work_items.clear()
# Terminate remaining workers forcibly: the queues or their
# locks may be in a dirty state and block forever.
for p in processes.values():
p.terminate()
shutdown_worker()
return
if isinstance(result_item, int):
# Clean shutdown of a worker using its PID
# (avoids marking the executor broken)
assert shutting_down()
p = processes.pop(result_item)
p.join()
if not processes:
shutdown_worker()
return
elif result_item is not None:
work_item = pending_work_items.pop(result_item.work_id, None)
# work_item can be None if another process terminated (see above)
if work_item is not None:
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
# Delete references to object. See issue16284
del work_item
# Delete reference to result_item
del result_item
# Check whether we should start shutting down.
executor = executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
if shutting_down():
try:
# Flag the executor as shutting down as early as possible if it
# is not gc-ed yet.
if executor is not None:
executor._shutdown_thread = True
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not pending_work_items:
shutdown_worker()
return
except Full:
# This is not a problem: we will eventually be woken up (in
# result_queue.get()) and be able to send a sentinel again.
pass
executor = None
_system_limits_checked = False
_system_limited = None
def _check_system_limits():
global _system_limits_checked, _system_limited
if _system_limits_checked:
if _system_limited:
raise NotImplementedError(_system_limited)
_system_limits_checked = True
try:
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems_max == -1:
# indetermined limit, assume that limit is determined
# by available memory only
return
if nsems_max >= 256:
# minimum number of semaphores available
# according to POSIX
return
_system_limited = ("system provides too few semaphores (%d"
" available, 256 necessary)" % nsems_max)
raise NotImplementedError(_system_limited)
def _chain_from_iterable_of_lists(iterable):
"""
Specialized implementation of itertools.chain.from_iterable.
Each item in *iterable* should be a list. This function is
careful not to keep references to yielded objects.
"""
for element in iterable:
element.reverse()
while element:
yield element.pop()
class BrokenProcessPool(_base.BrokenExecutor):
"""
Raised when a process in a ProcessPoolExecutor terminated abruptly
while a future was in the running state.
"""
class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None, mp_context=None,
initializer=None, initargs=()):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
mp_context: A multiprocessing context to launch the workers. This
object should provide SimpleQueue, Queue and Process.
initializer: A callable used to initialize worker processes.
initargs: A tuple of arguments to pass to the initializer.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = os.cpu_count() or 1
if sys.platform == 'win32':
self._max_workers = min(_MAX_WINDOWS_WORKERS,
self._max_workers)
else:
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
elif (sys.platform == 'win32' and
max_workers > _MAX_WINDOWS_WORKERS):
raise ValueError(
f"max_workers must be <= {_MAX_WINDOWS_WORKERS}")
self._max_workers = max_workers
if mp_context is None:
mp_context = mp.get_context()
self._mp_context = mp_context
if initializer is not None and not callable(initializer):
raise TypeError("initializer must be a callable")
self._initializer = initializer
self._initargs = initargs
# Management thread
self._queue_management_thread = None
# Map of pids to processes
self._processes = {}
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self._broken = False
self._queue_count = 0
self._pending_work_items = {}
# Create communication channels for the executor
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
queue_size = self._max_workers + EXTRA_QUEUED_CALLS
self._call_queue = _SafeQueue(
max_size=queue_size, ctx=self._mp_context,
pending_work_items=self._pending_work_items)
# Killed worker processes can produce spurious "broken pipe"
# tracebacks in the queue's own worker thread. But we detect killed
# processes anyway, so silence the tracebacks.
self._call_queue._ignore_epipe = True
self._result_queue = mp_context.SimpleQueue()
self._work_ids = queue.Queue()
# _ThreadWakeup is a communication channel used to interrupt the wait
# of the main loop of queue_manager_thread from another thread (e.g.
# when calling executor.submit or executor.shutdown). We do not use the
# _result_queue to send the wakeup signal to the queue_manager_thread
# as it could result in a deadlock if a worker process dies with the
# _result_queue write lock still acquired.
self._queue_management_thread_wakeup = _ThreadWakeup()
def _start_queue_management_thread(self):
if self._queue_management_thread is None:
# When the executor gets garbarge collected, the weakref callback
# will wake up the queue management thread so that it can terminate
# if there is no pending work item.
def weakref_cb(_,
thread_wakeup=self._queue_management_thread_wakeup):
mp.util.debug('Executor collected: triggering callback for'
' QueueManager wakeup')
thread_wakeup.wakeup()
# Start the processes so that their sentinels are known.
self._adjust_process_count()
self._queue_management_thread = threading.Thread(
target=_queue_management_worker,
args=(weakref.ref(self, weakref_cb),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue,
self._queue_management_thread_wakeup),
name="QueueManagerThread")
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_threads_wakeups[self._queue_management_thread] = \
self._queue_management_thread_wakeup
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = self._mp_context.Process(
target=_process_worker,
args=(self._call_queue,
self._result_queue,
self._initializer,
self._initargs))
p.start()
self._processes[p.pid] = p
def submit(*args, **kwargs):
if len(args) >= 2:
self, fn, *args = args
elif not args:
raise TypeError("descriptor 'submit' of 'ProcessPoolExecutor' object "
"needs an argument")
elif 'fn' in kwargs:
fn = kwargs.pop('fn')
self, *args = args
else:
raise TypeError('submit expected at least 1 positional argument, '
'got %d' % (len(args)-1))
with self._shutdown_lock:
if self._broken:
raise BrokenProcessPool(self._broken)
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
if _global_shutdown:
raise RuntimeError('cannot schedule new futures after '
'interpreter shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._queue_management_thread_wakeup.wakeup()
self._start_queue_management_thread()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def map(self, fn, *iterables, timeout=None, chunksize=1):
"""Returns an iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
chunksize: If greater than one, the iterables will be chopped into
chunks of size chunksize and submitted to the process pool.
If set to one, the items in the list will be sent one at a time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
if chunksize < 1:
raise ValueError("chunksize must be >= 1.")
results = super().map(partial(_process_chunk, fn),
_get_chunks(*iterables, chunksize=chunksize),
timeout=timeout)
return _chain_from_iterable_of_lists(results)
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if self._queue_management_thread:
# Wake up queue management thread
self._queue_management_thread_wakeup.wakeup()
if wait:
self._queue_management_thread.join()
# To reduce the risk of opening too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
if self._call_queue is not None:
self._call_queue.close()
if wait:
self._call_queue.join_thread()
self._call_queue = None
self._result_queue = None
self._processes = None
if self._queue_management_thread_wakeup:
self._queue_management_thread_wakeup.close()
self._queue_management_thread_wakeup = None
shutdown.__doc__ = _base.Executor.shutdown.__doc__
atexit.register(_python_exit)
|
prefetchnta/questlab
|
bin/x64bin/python/37/Lib/concurrent/futures/process.py
|
Python
|
lgpl-2.1
| 28,719
|
[
"Brian"
] |
f7bc62f4da1cc04912b6da0e772a106a4f3b63a87a7fac2720448c782266d097
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
******************************************
espressopp.interaction.LennardJonesGromacs
******************************************
if :math:`d^2 > r_1^2`
.. math::
U = 4 \varepsilon (\frac{sigma^{12}}{d^{12}} - \frac{sigma^6}{d^6}) + (d-r_1)^3 (ljsw3 + ljsw4 (d-r_1) + ljsw5)
else
.. math::
U = 4 \varepsilon (\frac{\sigma^{12}}{d^{12}} - \frac{\sigma^6}{d^6})
.. function:: espressopp.interaction.LennardJonesGromacs(epsilon, sigma, r1, cutoff, shift)
:param epsilon: (default: 1.0)
:param sigma: (default: 1.0)
:param r1: (default: 0.0)
:param cutoff: (default: infinity)
:param shift: (default: "auto")
:type epsilon: real
:type sigma: real
:type r1: real
:type cutoff:
:type shift:
.. function:: espressopp.interaction.VerletListLennardJonesGromacs(vl)
:param vl:
:type vl:
.. function:: espressopp.interaction.VerletListLennardJonesGromacs.getPotential(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.VerletListLennardJonesGromacs.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.CellListLennardJonesGromacs(stor)
:param stor:
:type stor:
.. function:: espressopp.interaction.CellListLennardJonesGromacs.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.FixedPairListLennardJonesGromacs(system, vl, potential)
:param system:
:param vl:
:param potential:
:type system:
:type vl:
:type potential:
.. function:: espressopp.interaction.FixedPairListLennardJonesGromacs.setPotential(potential)
:param potential:
:type potential:
"""
"""
real sig2 = sigma * sigma;
real sig6 = sig2 * sig2 * sig2;
ff1 = 48 \varepsilon \sigma^{12}
ff2 = 24 \varepsilon \sigma^6
ef1 = 4 \varepsilon \sigma^{12}
ef2 = 4 \varepsilon \sigma^6
r1sq = r_^2
real t = r_c - r_1
real r6inv = \frac{1}{r_c^6}
real r8inv = \frac{1}{r_c^8}
real t2inv = \frac{1}{(r_c - r_1)^2}
real t3inv = \frac{1}{(r_c - r_1)^3}
real t3 = (r_c - r_1)^3
real a6 = \frac{7 r_1 - 10 r_c}{(r_c - r_1)^2 r_c^8}
real b6 = \frac{9 r_c - 7 r_1}{(r_c - r_1)^3 r_c^8};
real a12 = \frac{13 r_1 - 16 r_c}{(r_c - r_1)^2 r_c^{14}}
real b12 = \frac{15 r_c - 13 r_1}{(r_c - r_1)^3 r_c^{14}}
real c6 = \frac{1}{r_c^6} - (r_c - r_1)^3(\frac{42 r_1 - 60 r_c}{3(r_c - r_1)^2 r_c^8} + \frac{(54 r_c - 42 r_1)(r_c - r_1)}{4(r_c - r_1)^3 r_c^8});
real c12 = \frac{1}{r_c^{12}} - (r_c - r_1)^3(\frac{156 r_1 - 192 r_c}{3(r_c - r_1)^2 r_c^{14}} + \frac{(180 r_c - 156 r_1)(r_c - r_1}{4(r_c - r_1)^3 r_c^{14}});
ljsw3 = -4 \varepsilon \sigma^{12} (\frac{156 r_1 - 192 r_c}{3(r_c - r_1)^2 r_c^{14}}) + 4 \varepsilon \sigma^6 \frac{42 r_1 - 60 r_c}{3(r_c - r_1)^2 r_c^8}
ljsw4 = -4 \varepsilon \sigma^{12} (\frac{180 r_c - 156 r_1}{4(r_c - r_1)^3 r_c^{14}}) + 4 \varepsilon \sigma^6 \frac{54 r_c - 42 r_1}{4(r_c - r_1)^3 r_c^8}
ljsw5 = -4 \varepsilon \sigma^{12} (\frac{1}{r_c^{12}} - (r_c - r_1)^3(\frac{156 r_1 - 192 r_c}{3(r_c - r_1)^2 r_c^{14}} + \frac{(180 r_c - 156 r_1)(r_c - r_1}{4(r_c - r_1)^3 r_c^{14}})) + 4 \varepsilon \sigma^6 \frac{1}{r_c^6} - (r_c - r_1)^3(\frac{42 r_1 - 60 r_c}{3(r_c - r_1)^2 r_c^8} + \frac{(54 r_c - 42 r_1)(r_c - r_1)}{4(r_c - r_1)^3 r_c^8})
U = 4 \varepsilon (\frac{\sigma^{12}}{d^{12}} - \frac{\sigma^6}{d^6}) + (d-r_1)^3 ((((-4 \varepsilon \sigma^{12} (\frac{156 r_1 - 192 r_c}{3(r_c - r_1)^2 r_c^{14}}) + 4 \varepsilon \sigma^6 \frac{42 r_1 - 60 r_c}{3(r_c - r_1)^2 r_c^8}
) + (-4 \varepsilon \sigma^{12} (\frac{180 r_c - 156 r_1}{4(r_c - r_1)^3 r_c^{14}}) + 4 \varepsilon \sigma^6 \frac{54 r_c - 42 r_1}{4(r_c - r_1)^3 r_c^8}
) (d-r_1) + (-4 \varepsilon \sigma^{12} (\frac{1}{r_c^{12}} - (r_c - r_1)^3(\frac{156 r_1 - 192 r_c}{3(r_c - r_1)^2 r_c^{14}} + \frac{(180 r_c - 156 r_1)(r_c - r_1}{4(r_c - r_1)^3 r_c^{14}}))) + 4 \varepsilon \sigma^6 \frac{1}{r_c^6} - (r_c - r_1)^3(\frac{42 r_1 - 60 r_c}{3(r_c - r_1)^2 r_c^8} + \frac{(54 r_c - 42 r_1)(r_c - r_1)}{4(r_c - r_1)^3 r_c^8})
)))
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_LennardJonesGromacs, \
interaction_VerletListLennardJonesGromacs, \
interaction_CellListLennardJonesGromacs, \
interaction_FixedPairListLennardJonesGromacs
class LennardJonesGromacsLocal(PotentialLocal, interaction_LennardJonesGromacs):
def __init__(self, epsilon=1.0, sigma=1.0, r1=0.0,
cutoff=infinity, shift="auto"):
"""Initialize the local LennardJonesGromacs object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if shift =="auto":
cxxinit(self, interaction_LennardJonesGromacs,
epsilon, sigma, r1, cutoff)
else:
cxxinit(self, interaction_LennardJonesGromacs,
epsilon, sigma, r1, cutoff, shift)
class VerletListLennardJonesGromacsLocal(InteractionLocal, interaction_VerletListLennardJonesGromacs):
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListLennardJonesGromacs, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
class CellListLennardJonesGromacsLocal(InteractionLocal, interaction_CellListLennardJonesGromacs):
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListLennardJonesGromacs, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class FixedPairListLennardJonesGromacsLocal(InteractionLocal, interaction_FixedPairListLennardJonesGromacs):
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListLennardJonesGromacs, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
if pmi.isController:
class LennardJonesGromacs(Potential):
'The LennardJonesGromacs potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.LennardJonesGromacsLocal',
pmiproperty = ['epsilon', 'sigma', 'r1']
)
class VerletListLennardJonesGromacs(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListLennardJonesGromacsLocal',
pmicall = ['setPotential','getPotential']
)
class CellListLennardJonesGromacs(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.CellListLennardJonesGromacsLocal',
pmicall = ['setPotential']
)
class FixedPairListLennardJonesGromacs(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListLennardJonesGromacsLocal',
pmicall = ['setPotential']
)
|
espressopp/espressopp
|
src/interaction/LennardJonesGromacs.py
|
Python
|
gpl-3.0
| 9,755
|
[
"ESPResSo"
] |
cf6e572935974f436c556a1f3e72ce05f087e8434fadea504a15b0d9d38e6c38
|
__author__ = 'Mikhail Pedrosa <mikhailj.pedrosa@gmail.com> e Arthur Costa <arthur.opa@gmail.com>'
__description__ = 'Methods for Plot Images, Vector and Graphs'
__version__ = '0.1'
__date__ = '13/04/2015'
import pyart
import matplotlib.pyplot as plt
from filters import *
from memory_profiler import profile
#@profile()
def plot_image_no_map(radar):
"""
Funcao para Plotar Imagem do Radar
Plot function for Radar Image
:param localdir: Directory of PyFuncemeClimateTools - Diretorio do PyFuncemeClimateTools
:param radar: Radar Object - Objeto Radar
:return:
"""
#create the plot using RadarDisplay
display = pyart.graph.RadarDisplay(radar)
display.plot('velocity', 0, vmin=-35., vmax=35.)
# axislabels = '#000066', '#14217A', '#20398A', '#2D519B', '#3B6BAC', '#4A85BE', '#59A0D0', '#66B7E0', '#75D2F2', '#FFFFFF',
# '#FFEA00', '#FFCC00', '#FFB600', '#FF9900', '#FF7E00', '#FF6600', '#FF4900', '#FF3300', '#FF0000'
#display.plot_colorbar(field= ['#000066', '#20398A', '#3B6BAC', '#59A0D0', '#66B7E0', '#75D2F2', '#FFFFFF', '#FFEA00', '#FFCC00', '#FFB600', '#FF7E00', '#FF4900', '#FF0000'])
display.plot_range_rings([100., 200., 300., 400.])
#display.plot_cross_hair(radar.latitude['data'], radar.longitude['data'] )
#display.plot_point(radar.longitude['data'][0], radar.latitude['data'][0])
#plt.show()
plt.savefig('Radar_Qxb_Band_S - Image Velocity Wind (No Map).png', format='png')
plt.close()
#@profile()
def plot_graph_points_no_filters(radar, r):
azimuth = radar.azimuth['data'].reshape(10,360)
velocity = radar.fields['velocity']['data'].reshape(10,360,253)
y = velocity[2,:,r]
x = azimuth[2, :]
figure = plt.figure()
ax = figure.add_subplot(111)
plt.scatter (x, y, label='Raw Vr' )
ax.spines['bottom'].set_position('center')
# ax.spines['top'].set_color('none')
# ax.spines['left'].set_smart_bounds(True)
# ax.spines['bottom'].set_smart_bounds(True)
# ax.xaxis.set_ticks_position('bottom')
# ax.yaxis.set_ticks_position('left')
plt.grid()
plt.ylim(-35.,35.)
plt.xlim(0,360)
plt.title("Radar Quixeramobim - Velocity x Azimuth - ({:.2f} km Range)".format((r*1490)/1000.), fontstyle='italic')
plt.ylabel('Velocity (m/s)')
ax.set_xlabel('Azimuth (degree)')
ax.xaxis.set_label_coords(0.5,-0.05)
plt.legend(fontsize='10')
#plt.show()
plt.savefig("Radar_Qxb_Band_S - Velocity x Azimuth - ({:.2f} km Range) - Points (No Filter).png".format((r*1490)/1000.), format='png')
plt.close()
#@profile()
def plot_graph_lines_no_filters(radar, r):
azimuth = radar.azimuth['data'].reshape(10,360)
velocity = radar.fields['velocity']['data'].reshape(10,360,253)
y = velocity[2,:,r]
x = azimuth[2, :]
figure = plt.figure()
ax = figure.add_subplot(111)
plt.plot(x, y, lw='1', label='Raw Vr')
ax.spines['bottom'].set_position('center')
# ax.spines['top'].set_color('none')
# ax.spines['left'].set_smart_bounds(True)
# ax.spines['bottom'].set_smart_bounds(True)
# ax.xaxis.set_ticks_position('bottom')
# ax.yaxis.set_ticks_position('left')
plt.grid()
plt.ylim(-35.,35.)
plt.xlim(0,360)
plt.title("Radar Quixeramobim - Velocity x Azimuth ({:.2f} km Range)".format((r*1490)/1000.), fontstyle='italic')
plt.ylabel('Velocity (m/s)')
ax.set_xlabel('Azimuth (degree)')
ax.xaxis.set_label_coords(0.5,-0.05)
plt.legend(fontsize='10')
#plt.show()
plt.savefig('Radar_Qxb_Band_S - Velocity x Azimuth ({:.2f} km Range) - Line (No Filter).png'.format((r*1490)/1000.), format='png')
plt.close()
#@profile()
def plot_image_map(radar):
"""
Funcao para Plotar Imagem do Radar
Plot function for Radar Image
:param radar: Radar Object - Objeto Radar
:return:
"""
#create the plot using RadarMapDisplay
display = pyart.graph.RadarMapDisplay(radar)
display.plot_ppi_map('velocity', 0, vmin=-35., vmax=35.,
min_lat=-9.6, max_lat=-0.9,
max_lon=325.5, min_lon=316.,
lat_lines=radar.latitude['data'],
lon_lines=radar.longitude['data'])
display.plot_range_rings([100., 200., 300., 400.])
display.plot_point(radar.longitude['data'][0], radar.latitude['data'][0])
#plt.show()
plt.savefig("Radar_Qxb_Band_S - Image Velocity Wind.png", format='png')
plt.close()
#@profile()
def plot_graph_points_filters(radar, r):
azimuth = radar.azimuth['data'].reshape(10,360)
velocity = radar.fields['velocity']['data'].reshape(10,360,253)
y = velocity[2,:,r]
x = azimuth[2, :]
y_ma = moving_average(y,3)
y_median = median(y,3)
#y_gauss = gauss(y,3)
plt.figure()
ax1 = plt.subplot(211)
ax1.scatter(x, y_ma, lw='1', label='Moving Average')
ax2 = plt.subplot(212)
ax2.scatter(x, y_median, lw='1', label='Median')
ax1.spines['bottom'].set_position('center')
ax2.spines['bottom'].set_position('center')
ax1.grid()
ax2.grid()
ax1.set_ylim(-35.,35.)
ax1.set_xlim(0,360)
ax2.set_ylim(-35.,35.)
ax2.set_xlim(0,360)
ax1.set_title("Radar Quixeramobim - Velocity x Azimuth ({:.2f} km Range)".format((r*1490)/1000.), fontstyle='italic')
ax2.set_ylabel('Velocity (m/s)')
ax2.set_xlabel('Azimuth (degree)')
ax2.xaxis.set_label_coords(0.5,-0.05)
ax2.yaxis.set_label_coords(-0.1,1.0)
ax1.legend(fontsize='10')
ax2.legend(fontsize='10')
#plt.show()
plt.savefig("Radar_Qxb_Band_S - Velocity x Azimuth (Moving Average e Median)- ({:.2f} km Range) - Points.png".format((r*1490)/1000.), format='png')
plt.close()
#@profile()
def plot_graph_lines_filters(radar, r):
azimuth = radar.azimuth['data'].reshape(10,360)
velocity = radar.fields['velocity']['data'].reshape(10,360,253)
y = velocity[2,:,r]
x = azimuth[2, :]
y_ma = moving_average(y,3)
y_median = median(y,3)
#y_gauss = gauss(y,3)
plt.figure()
ax1 = plt.subplot(211)
ax1.plot(x, y_ma, lw='1', label='Moving Average')
ax2 = plt.subplot(212)
ax2.plot(x, y_median, lw='1', label='Median')
ax1.spines['bottom'].set_position('center')
ax2.spines['bottom'].set_position('center')
ax1.grid()
ax2.grid()
ax1.set_ylim(-35.,35.)
ax1.set_xlim(0,360)
ax2.set_ylim(-35.,35.)
ax2.set_xlim(0,360)
ax1.set_title("Radar Quixeramobim - Velocity x Azimuth ({:.2f} km Range)".format((r*1490)/1000.), fontstyle='italic')
ax2.set_ylabel('Velocity (m/s)')
ax2.set_xlabel('Azimuth (degree)')
ax2.xaxis.set_label_coords(0.5,-0.05)
ax2.yaxis.set_label_coords(-0.1,1.0)
ax1.legend(fontsize='10')
ax2.legend(fontsize='10')
#plt.show()
plt.savefig("Radar_Qxb_Band_S - Velocity x Azimuth (Moving Average e Median)- ({:.2f} km Range).png".format((r*1490)/1000.), format='png')
plt.close()
#@profile()
def plot_graph(radar, r):
azimuth = radar.azimuth['data'].reshape(10,360)
velocity = radar.fields['velocity']['data'].reshape(10,360,253)
y = velocity[2,:,r]
x = azimuth[2, :]
#y = moving_average(y,3)
#y = median(y,3)
y = gaussian(y)
print y.shape
figure = plt.figure()
ax = figure.add_subplot(111)
plt.plot(x, y, lw='1', label='Raw Vr')
#plt.scatter (x, y, label='Raw Vr' )
ax.spines['bottom'].set_position('center')
# ax.spines['top'].set_color('none')
# ax.spines['left'].set_smart_bounds(True)
# ax.spines['bottom'].set_smart_bounds(True)
# ax.xaxis.set_ticks_position('bottom')
# ax.yaxis.set_ticks_position('left')
plt.grid()
plt.ylim(-35.,35.)
plt.xlim(0,360)
plt.title("Radar Quixeramobim - Velocity x Azimuth ({:.2f} km Range)".format((r*1490)/1000.), fontstyle='italic')
plt.ylabel('Velocity (m/s)')
ax.set_xlabel('Azimuth (degree)')
ax.xaxis.set_label_coords(0.5,-0.05)
plt.legend(fontsize='10')
plt.show()
#plt.savefig("Radar_Qxb_Band_S - Velocity x Azimuth - Median ({:.2f} km Range).png".format((r*1490)/1000.), format='png')
plt.close()
#@profile()
def plot_vector_barbs(radar, r, u, v):
azimuth = radar.azimuth['data'].reshape(10,360)
rang = radar.range['data']
velocity_radial = radar.fields['velocity']['data'].reshape(10,360,253)
theta, ran = np.meshgrid(azimuth[3,:], rang[r])
plt.figure()
plt.subplot(111, polar=True)
plt.barbs(theta, ran, u[3,:,r], v[3,:,r], velocity_radial[3,:,r])
plt.show()
#plt.savefig('Radar_Qxb_Band_S - Barbs ({:.2f} km Range).png'.format((r*1490)/1000.), format='png')
plt.close()
#@profile()
def plot_vector_quiver(radar, r, u, v):
azimuth = radar.azimuth['data'].reshape(10,360)
rang = radar.range['data']
velocity_radial = radar.fields['velocity']['data'].reshape(10,360,253)
theta, ran = np.meshgrid(azimuth[3,:], rang[r])
plt.figure()
plt.subplot(111, polar=True)
plt.quiver(theta, ran, u[3,:,r], v[3,:,r], velocity_radial[3,:,r])
#plt.show()
plt.savefig('Radar_Qxb_Band_S - Quiver ({:.2f} km Range).png'.format((r*1490)/1000.), format='png')
plt.close()
|
mikhailpedrosa/radar_wind-field
|
graphical.py
|
Python
|
gpl-2.0
| 9,193
|
[
"Gaussian"
] |
1c08f0608627e91fa9c81fb70d2e880a5d367d5ef9a77510a57a015b81ad1a1d
|
#!/galaxy/home/mgehrin/hiclib/bin/python
"""
Tool for filtering a tabular data file. Fields are separated by tabs, the
header line is denoted by a '#' in the first byte, comments are denoted by
a '#' at the start of any subsequent line.
Expressions can use column names as well as numbers. The -c options allows
cutting, again using field name or numbers.
usage: %prog expression < table
-H, --header: keep header in output
-C, --comments: keep comments in output
--force-header: assume the first line is a header even if it does not start with "#"
-c, --cols=1,2: names or indexes of columns to keep
"""
import psyco_full
import sys
import sys
import bx.tabular.io
from bx.cookbook import doc_optparse
def __main__():
# Parse command line arguments
options, args = doc_optparse.parse( __doc__ )
try:
keep_header = bool( options.header )
keep_comments = bool( options.comments )
cols = []
if options.cols:
for c in options.cols.split( ',' ):
try:
v = int( c )
except:
v = c
cols.append( c )
if len( args ) > 0:
expr = args[0]
else:
expr = None
if options.force_header:
force_header = bx.tabular.io.FIRST_LINE_IS_HEADER
else:
force_header = None
except:
doc_optparse.exception()
# Compile expression for SPEED
if expr: expr = compile( expr, '<expr arg>', 'eval' )
for element in bx.tabular.io.TableReader( sys.stdin, force_header=force_header ):
if type( element ) is bx.tabular.io.Header:
if keep_header:
if cols:
print "#" + "\t".join( element[c] for c in cols )
else:
print element
elif type( element ) is bx.tabular.io.Comment:
if keep_comments:
print element
else:
if expr is None or bool( eval( expr, dict( row=element ) ) ):
if cols:
print "\t".join( [ element[c] for c in cols ] )
else:
print element
if __name__ == "__main__": __main__()
|
bxlab/HiFive_Paper
|
Scripts/HiCLib/bx-python-0.7.1/build/scripts-2.7/table_filter.py
|
Python
|
bsd-3-clause
| 2,263
|
[
"Galaxy"
] |
8cccee71e1170f3272ab6cc851cecb18a11b9fa41d0da4ac78fa9ea7f54e1766
|
#! /usr/bin/env python3
"""Main ProPhyle file.
Author: Karel Brinda <kbrinda@hsph.harvard.edu>
Licence: MIT
Example:
Download sequences:
$ prophyle download bacteria
Create an index for k=10 and the small testing bacterial tree:
$ prophyle index -k 10 -s 0.1 ~/prophyle/bacteria.nw ~/prophyle/viruses.nw test_idx
Classify some reads:
$ prophyle classify test_idx reads.fq > result.sam
"""
import argparse
import collections
import hashlib
import multiprocessing
import os
import sys
import tarfile
import tempfile
import textwrap
sys.path.append(os.path.dirname(__file__))
import prophylelib as pro
import version
CONFIG = {}
GITDIR = os.path.basename(sys.argv[0])[-3:] == ".py"
if GITDIR:
C_D = os.path.abspath(os.path.dirname(sys.argv[0]))
else:
C_D = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
TREE_D = os.path.join(C_D, "trees")
BWA = os.path.join(C_D, "prophyle_index", "bwa", "bwa")
IND = os.path.join(C_D, "prophyle_index", "prophyle_index")
ASM = os.path.join(C_D, "prophyle_assembler", "prophyle_assembler")
C_ASSIGN = os.path.join(C_D, "prophyle_assignment", "prophyle_assignment")
# executed from the git repo
if GITDIR:
PROPHYLE = os.path.join(C_D, "prophyle.py")
PY_ASSIGN = os.path.join(C_D, "prophyle_assignment.py")
ANALYZE = os.path.join(C_D, "prophyle_analyze.py")
PROPAGATION_POSTPROCESSING = os.path.join(C_D, "prophyle_propagation_postprocessing.py")
PROPAGATION_PREPROCESSING = os.path.join(C_D, "prophyle_propagation_preprocessing.py")
NEWICK2MAKEFILE = os.path.join(C_D, "prophyle_propagation_makefile.py")
READ = os.path.join(C_D, "prophyle_paired_end.py")
TEST_TREE = os.path.join(C_D, "prophyle_validate_tree.py")
SPLIT_FA = os.path.join(C_D, "prophyle_split_allseq.py")
# executed from a Python package
else:
PROPHYLE = "prophyle"
PY_ASSIGN = "prophyle_assignment.py"
ANALYZE = "prophyle_analyze.py"
PROPAGATION_POSTPROCESSING = "prophyle_propagation_postprocessing.py"
PROPAGATION_PREPROCESSING = "prophyle_propagation_preprocessing.py"
NEWICK2MAKEFILE = "prophyle_propagation_makefile.py"
READ = "prophyle_paired_end.py"
TEST_TREE = "prophyle_validate_tree.py"
SPLIT_FA = "prophyle_split_allseq.py"
DEFAULT_K = 31
DEFAULT_THREADS = multiprocessing.cpu_count()
# DEFAULT_THREADS=1
DEFAULT_MEASURE = 'h1'
DEFAULT_OUTPUT_FORMAT = 'sam'
DEFAULT_HOME_DIR = os.path.join(os.path.expanduser('~'), 'prophyle')
LIBRARIES = ['bacteria', 'viruses', 'plasmids', 'hmp']
ZENODO_URL = 'https://zenodo.org/record/1054426'
ANALYZE_IN_FMTS = ['sam', 'bam', 'cram', 'uncompressed_bam', 'kraken', 'histo']
ANALYZE_STATS = ['w', 'u', 'wl', 'ul']
FILES_TO_ARCHIVE = [
".complete.1",
".complete.2",
".complete.3",
"tree.nw",
"tree.preliminary.nw",
"index.json",
"index.fa.bwt",
"index.fa.ann",
"index.fa.amb", # but will be empty
'index.fa.kmers.tsv'
]
def _file_md5(fn, block_size=2**20):
md5 = hashlib.md5()
with open(fn, 'rb') as f:
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def _log_file_md5(fn, remark=None):
md5 = _file_md5(fn)
size = pro.file_sizes(fn)[0]
m = "File {}{} has md5 checksum {} and size {} B".format(
os.path.basename(fn),
" ({})".format(remark) if remark is not None else "",
md5,
size,
)
pro.message(m, only_log=True)
def _test_tree(fn):
"""Test if given tree is valid for ProPhyle.
Args:
fn (str): Newick/NHX tree.
"""
tree = pro.load_nhx_tree(fn, validate=False)
if not pro.validate_prophyle_nhx_tree(tree, verbose=True, throw_exceptions=False, output_fo=sys.stderr):
error("The tree '{}' could not be properly parsed.".format(fn))
def _compile_prophyle_bin(clean=False, parallel=False, silent=True, force=False):
"""Compile ProPhyle binaries if they don't exist yet. Recompile if not up-to-date.
Args:
clean (bool): Run make clean instead of make.
parallel (bool): Run make in parallel.
silent (bool): Run make silently.
force (bool): Force recompile (make -B).
"""
try:
command = ["make"]
if parallel:
command += ['-j']
if silent:
command += ['-s']
if force:
command += ['-B']
command += ["-C", C_D]
if clean:
command += ['clean']
pro.run_safe(command, output_fo=sys.stderr)
except RuntimeError:
if not os.path.isfile(IND) or not os.path.isfile(ASM):
pro.error(
"Error: ProPhyle executables could not be compiled. Please, the command '{}' manually.".format(
" ".join(command)
)
)
else:
print("Warning: ProPhyle executables could not be recompiled. Going to use the old ones.", file=sys.stderr)
def _add_configuration_parameter(parser, visible=True):
parser.add_argument(
'-c',
dest='config',
metavar='STR',
nargs='*',
type=str,
default=[],
help='advanced configuration (a JSON dictionary)' if visible else argparse.SUPPRESS,
)
#####################
# PROPHYLE DOWNLOAD #
#####################
def __mark_fn(d, i, name):
"""Create a mark name.
Args:
d (str): Directory.
i (int): Number of the step.
name (str): Name of the mark.
"""
if name is None:
return os.path.join(d, ".complete.{}".format(i))
else:
return os.path.join(d, ".complete.{}.{}".format(name, i))
def _mark_complete(d, i=1, name=None):
"""Create a mark file (an empty file to mark a finished step nb i).
Args:
d (str): Directory.
i (int): Number of the step.
name (str): Name of the mark.
"""
assert i > 0
pro.touch(__mark_fn(d, i, name))
def _is_complete(d, i=1, name=None, dont_check_previous=False):
"""Check if a mark file i exists AND is newer than the mark file (i-1).
Args:
d (str): Directory.
i (int): Number of the step.
name (str): Name of the mark.
"""
assert i > 0
fn = __mark_fn(d, i, name)
fn0 = __mark_fn(d, i - 1, name)
if i == 1 or dont_check_previous:
return os.path.isfile(fn)
else:
return pro.existing_and_newer(fn0, fn)
def _missing_library(d):
"""Check if library has been already downloaded.
Args:
d (str): Directory.
"""
l = os.path.dirname(d)
pro.makedirs(d)
if _is_complete(d, 1):
pro.message("Skipping downloading library '{}' (already exists)".format(l))
return False
else:
pro.message("Downloading library '{}'".format(l))
return True
def _pseudo_fai(d):
"""Generate a psedudofai file for given directory (directory/*.fa => directory.fai).
Pseudofai format = TSV with 2 two columns: filename, sequence header (text after > in FASTA).
Args:
d (str): Directory.
"""
l = os.path.dirname(d)
pseudofai_fn = d + ".pseudofai"
pro.makedirs(d)
if _is_complete(d, 2) and os.path.isfile(pseudofai_fn):
pro.message("Skipping generating pseudofai for library '{}' (already exists)".format(l))
else:
pro.message("Generating pseudofai for library '{}'".format(l))
assert d[-1] != "/"
# cmd=['grep -r --include=\\*.{fa,ffn,fna}', '">"', d, '| sed "s/:>/\t/"']
cmd = [
'find', d, '-name', "'*.fa'", "-o", "-name", "'*.ffn'", "-o", "-name", "'*.fna'", "-exec", "grep", "-H",
'">"', "{}", "\\;", "|", 'sed', '"s/\:>/\t/"'
]
pro.run_safe(cmd, output_fn=pseudofai_fn)
_mark_complete(d, 2)
def prophyle_download(library, library_dir, force=False):
"""Create a library Download genomic library and copy the corresponding tree.
Args:
library (str): Library to download (bacteria / viruses / ...)
library_dir (str): Directory where download files will be downloaded.
TODO:
* Add support for alternative URLs (http / ftp, backup refseq sites, etc.).
* http://downloads.hmpdacc.org/data/HMREFG/all_seqs.fa.bz2
* ftp://public-ftp.hmpdacc.org/HMREFG/all_seqs.fa.bz2
"""
if library == "all":
for l in LIBRARIES:
prophyle_download(l, library_dir, force)
return
else:
assert library in LIBRARIES
if library_dir is None:
d = os.path.join(os.path.expanduser("~/prophyle"), library)
else:
d = os.path.join(library_dir, library)
# print('making',d, file=sys.stderr)
# os.makedirs(d, exist_ok=True)
pro.makedirs(d)
#pro.message("Checking library '{}' in '{}'".format(library, d))
lib_missing = _missing_library(d)
if library == 'bacteria':
if lib_missing or force:
cmd = [
'cd', d + "/..", '&&', 'curl', '-O', ZENODO_URL + '/files/bacteria.nw', '&&', 'curl',
ZENODO_URL + '/files/bacteria.tar.gz', '|', 'tar', 'xz'
]
pro.run_safe(cmd)
_mark_complete(d, 1)
# _pseudo_fai(d)
elif library == 'viruses':
if lib_missing or force:
cmd = [
'cd', d + "/..", '&&', 'curl', '-O', ZENODO_URL + '/files/viruses.nw', '&&', 'curl',
ZENODO_URL + '/files/viruses.tar.gz', '|', 'tar', 'xz'
]
pro.run_safe(cmd)
_mark_complete(d, 1)
# _pseudo_fai(d)
elif library == 'plasmids':
if lib_missing or force:
cmd = [
'cd', d + "/..", '&&', 'curl', '-O', ZENODO_URL + '/files/plasmids.nw', '&&', 'curl',
ZENODO_URL + '/files/plasmids.tar.gz', '|', 'tar', 'xz'
]
pro.run_safe(cmd)
_mark_complete(d, 1)
# _pseudo_fai(d)
elif library == 'hmp':
if lib_missing or force:
# fix when error appears
cmd = [
'cd', d, '&&', 'curl', 'http://downloads.hmpdacc.org/data/HMREFG/all_seqs.fa.bz2', '|', 'bzip2', '-d',
'|', SPLIT_FA,
os.path.abspath(d)
]
pro.run_safe(cmd)
_mark_complete(d, 1)
# _pseudo_fai(d)
else:
pro.error('Unknown library "{}"'.format(library))
##################
# PROPHYLE INDEX #
##################
def _create_makefile(index_dir, k, library_dir, mask_repeats=False):
"""Create a Makefile for k-mer propagation.
Args:
index_dir (str): Index directory.
k (int): K-mer size.
library_dir (library_dir): Library directory.
mask_repeats (bool): Mask repeats using DustMasker.
TODO:
* Add checking of params.mk
"""
pro.message('Creating Makefile for k-mer propagation')
propagation_dir = os.path.join(index_dir, 'propagation')
pro.makedirs(propagation_dir)
makefile = os.path.join(propagation_dir, 'Makefile')
tree_fn = os.path.join(index_dir, 'tree.preliminary.nw')
_test_tree(tree_fn)
# pro.test_files(NEWICK2MAKEFILE, tree_fn)
command = [NEWICK2MAKEFILE, '-k', k, tree_fn, os.path.abspath(library_dir), './', makefile]
config = collections.OrderedDict()
config['prophyle-version'] = version.VERSION
config['prophyle-revision'] = version.REVCOUNT
config['prophyle-commit'] = version.SHORTHASH
config['k'] = k
pro.save_index_config(index_dir, config)
with open(os.path.join(propagation_dir, "params.mk"), "w+") as f:
f.write('PRG_ASM="{}"\n'.format(ASM))
f.write("K={}\n".format(k))
if mask_repeats:
f.write("MASKREP=1\n")
pro.run_safe(command)
_log_file_md5(makefile)
def _propagate(index_dir, threads, nonprop=0):
"""Run k-mer propagation.
Args:
index_dir (str): Index directory.
threads (int): Number of threads for Makefile.
nonprop (bool): Switch propagation off.
"""
pro.message('Running k-mer propagation')
propagation_dir = os.path.join(index_dir, 'propagation')
pro.test_files(os.path.join(propagation_dir, 'Makefile'), test_nonzero=True)
if nonprop:
nonprop_cmd_str = "NONPROP=1"
else:
nonprop_cmd_str = ""
# test if input files for propagation exist
command = ['make', '-j', '-C', propagation_dir, '-n', '-s', nonprop_cmd_str, '>', '/dev/null']
pro.run_safe(
command,
err_msg="Some FASTA files needed for k-mer propagation are probably missing, see the messages above.",
thr_exc=False,
silent=True,
)
# run propagation
# TODO: progress report is switched off; come up with a better way than
# counting files
command = ['make', '-j', threads, '-C', propagation_dir, nonprop_cmd_str, 'V=1', 'PRINT_PROGRESS=']
pro.run_safe(
command,
err_msg="K-mer propagation has not been finished because of an error. See messages above.",
thr_exc=False,
)
def _propagation_preprocessing(in_trees, out_tree, no_prefixes, sampling_rate, autocomplete):
"""Merge input trees into a single tree.
Args:
in_trees (list of str): Input NHX trees (possibly with a root specifier).
out_tree (str): Output NHX tree.
no_prefixes (bool): Don't prepend prefixes to node names during tree merging.
sampling rate (float): Sampling rate for subsampling the tree or None for no subsampling.
"""
pro.message('Generating index tree')
# existence already checked
# pro.test_files(*in_trees)
command = [PROPAGATION_PREPROCESSING]
if sampling_rate is not None:
command += ['-s', sampling_rate]
command += in_trees + [out_tree]
if no_prefixes:
command += ['-P']
if autocomplete:
command += ['-A']
pro.run_safe(
command,
err_msg="The main tree could not be generated.",
thr_exc=False,
)
_log_file_md5(out_tree)
def _remove_tmp_propagation_files(index_dir):
"""Run k-mer propagation.
Args:
index_dir (str): Index directory.
"""
pro.message('Removing temporary files')
propagation_dir = os.path.join(index_dir, 'propagation')
command = ['make', '-C', propagation_dir, 'clean', '>', '/dev/null']
pro.run_safe(command)
def _merge_kmer_stats(index_dir):
"""Create a file with k-mer statistics.
Args:
index_dir (str): Index directory.
"""
tsv_fn = os.path.join(index_dir, "index.fa.kmers.tsv")
propagation_dir = os.path.join(index_dir, 'propagation')
command = [
"find", propagation_dir, "-name", "'*.tsv'", \
"|", "sort", \
"|", "xargs", "cat", \
"|", "grep", "-v", "^#",
"|", "sort", \
"|", "uniq", \
'>', tsv_fn]
pro.run_safe(
command,
err_msg="A file with k-mer statistics could not be created.",
thr_exc=False,
)
def _propagation_postprocessing(index_dir, in_tree_fn, out_tree_fn):
"""Merge reduced FASTA files after k-mer propagation and create index.fa.
Args:
index_dir (str): Index directory.
in_tree_fn (str): Input tree in Newick/NHX.
out_tree_fn (str): Output tree in Newick/NHX.
"""
pro.message('Propagation post-processing')
propagation_dir = os.path.join(index_dir, 'propagation')
index_fa = os.path.join(index_dir, "index.fa")
_merge_kmer_stats(index_dir)
tsv_fn = os.path.join(index_dir, "index.fa.kmers.tsv")
command = [PROPAGATION_POSTPROCESSING, propagation_dir, index_fa, in_tree_fn, tsv_fn, out_tree_fn]
pro.run_safe(
command,
err_msg="Main ProPhyle FASTA file could not be generated",
thr_exc=True,
)
pro.touch(index_fa + ".complete")
_log_file_md5(index_fa)
_log_file_md5(in_tree_fn)
_log_file_md5(out_tree_fn)
def _fa2pac(fa_fn):
"""Run `bwa fa2pac` (FA => 2bit).
Args:
fa_fn (str): FASTA file.
"""
#pro.message('Generating packed FASTA file')
pro.test_files(BWA, fa_fn)
command = [BWA, 'fa2pac', fa_fn, fa_fn]
pro.run_safe(
command,
err_msg="Packaged file could not be created.",
thr_exc=True,
)
_log_file_md5(fa_fn + ".pac")
def _pac2bwt(fa_fn):
"""Run `bwa pac2bwtgen` (2bit => BWT).
Args:
fa_fn (str): FASTA file.
"""
#pro.message('Generating BWT')
pro.test_files(BWA, fa_fn + ".pac")
command = [BWA, 'pac2bwtgen', fa_fn + ".pac", fa_fn + ".bwt"]
pro.run_safe(
command,
err_msg="Burrows-Wheeler Transform could not be computed.",
thr_exc=True,
)
_log_file_md5(fa_fn + ".bwt", remark="without OCC")
def _bwt2bwtocc(fa_fn):
"""Run `bwa bwtupdate` (BWT => BWT+OCC).
Args:
fa_fn (str): FASTA file.
"""
#pro.message('Generating sampled OCC array')
pro.test_files(BWA, fa_fn + ".bwt")
command = [BWA, 'bwtupdate', fa_fn + ".bwt"]
pro.run_safe(
command,
err_msg="OCC array could not be computed.",
thr_exc=True,
)
_log_file_md5(fa_fn + ".bwt", remark="with OCC")
def _bwtocc2sa(fa_fn):
"""Run `bwa bwt2sa` (BWT+, remark="with OCC"OCC => SSA).
Args:
fa_fn (str): FASTA file.
"""
#pro.message('Generating sampled SA')
pro.test_files(BWA, fa_fn + ".bwt")
command = [BWA, 'bwt2sa', fa_fn + ".bwt", fa_fn + ".sa"]
pro.run_safe(
command,
err_msg="Sampled Suffix Array computation failed.",
thr_exc=True,
)
_log_file_md5(fa_fn + ".sa")
def _bwtocc2klcp(fa_fn, k):
"""Create k-LCP `` (BWT => k-LCP).
Args:
fa_fn (str): FASTA file.
k (int): K-mer size.
"""
#pro.message('Generating k-LCP array')
pro.test_files(IND, fa_fn + ".bwt")
command = [IND, 'build', '-k', k, fa_fn]
pro.run_safe(
command,
err_msg="k-Longest Common Prefix array construction failed.",
thr_exc=True,
)
_log_file_md5("{}.{}.klcp".format(fa_fn, k))
def _bwtocc2sa_klcp(fa_fn, k):
"""Create k-LCP `` (BWT => k-LCP).
Args:
fa_fn (str): FASTA file.
k (int): K-mer size.
"""
pro.message('Generating k-LCP array and SA in parallel')
pro.test_files(IND, fa_fn + ".bwt")
command = [IND, 'build', '-s', '-k', k, fa_fn]
pro.run_safe(
command,
err_msg="Parallel construction of k-Longest Common Prefix array and Sampled Suffix Array failed.",
thr_exc=True,
)
_log_file_md5(fa_fn + ".sa")
_log_file_md5("{}.{}.klcp".format(fa_fn, k))
def prophyle_index(
index_dir,
threads,
k,
trees_fn,
library_dir,
construct_klcp,
force,
no_prefixes,
stop_after_propagation,
mask_repeats,
keep_tmp_files,
sampling_rate,
autocomplete,
nonprop,
):
"""Build a ProPhyle index.
Args:
index_dir (str): Index directory.
threads (int): Number of threads in k-mer propagation.
k (int): K-mer size.
trees_fn (list of str): Newick/NHX tree, possibly with a root spec (@root).
library_dir (str): Library directory.
klcp (bool): Generate klcp.
force (bool): Rewrite files if they already exist.
no_prefixes (bool): Don't prepend prefixes to node names during tree merging.
stop_after_propagation (bool): Stop after k-mer propagation.
mask_repeats (bool): Mask repeats using DustMasker.
keep_tmp_files (bool): Keep temporary files from k-mer propagation.
sampling rate (float): Sampling rate for subsampling the tree or None for no subsampling.
autocomplete (bool): Autocomplete names of internal nodes and fasta paths.
nonprop (bool): Switch propagation off.
"""
assert isinstance(k, int)
assert isinstance(threads, int)
assert k > 1
assert threads > 0
assert sampling_rate is None or 0.0 <= float(sampling_rate) <= 1.0
_compile_prophyle_bin(parallel=True)
index_fa = os.path.join(index_dir, 'index.fa')
index_tree_1 = os.path.join(index_dir, 'tree.preliminary.nw')
index_tree_2 = os.path.join(index_dir, 'tree.nw')
# recompute = recompute everything from now on
# force==True => start to recompute everything from beginning
recompute = force
# make index dir
pro.makedirs(index_dir)
#
# 1) Newick
#
#if not _is_complete(index_dir, 1) or not pro.existing_and_newer_list(trees_fn, index_tree_1):
if not _is_complete(index_dir, 1):
recompute = True
if recompute:
pro.message('[1/6] Copying/merging trees', upper=True)
for tree_fn in trees_fn:
tree_fn, _, root = tree_fn.partition("@")
tree = pro.load_nhx_tree(tree_fn, validate=False)
# postpone for autocomplete
if not autocomplete:
pro.validate_prophyle_nhx_tree(tree)
if root != "":
if len(tree.search_nodes(name=root)) == 0:
pro.error("Node '{}' does not exist in '{}'.".format(root, tree_fn))
if len(trees_fn) != 1:
pro.message('Merging {} trees'.format(len(trees_fn)))
_propagation_preprocessing(
trees_fn, index_tree_1, no_prefixes=no_prefixes, sampling_rate=sampling_rate, autocomplete=autocomplete
)
_test_tree(index_tree_1)
_mark_complete(index_dir, 1)
else:
pro.message('[1/6] Tree already exists, skipping its creation', upper=True)
#
# 2) Create and run Makefile for propagation, and merge FASTA files
#
if not _is_complete(index_dir, 2):
recompute = True
if recompute:
pro.message('[2/6] Running k-mer propagation', upper=True)
_create_makefile(index_dir, k, library_dir, mask_repeats=mask_repeats)
_propagate(index_dir, threads=threads, nonprop=nonprop)
_propagation_postprocessing(index_dir, index_tree_1, index_tree_2)
_test_tree(index_tree_2)
if not keep_tmp_files:
_remove_tmp_propagation_files(index_dir)
else:
pro.message('Keeping temporary files')
_mark_complete(index_dir, 2)
else:
pro.message('[2/6] K-mers have already been propagated, skipping propagation', upper=True)
if stop_after_propagation:
pro.message('Stop after propagation requested. Propagation finished; going to stop.', upper=True)
return
#
# 3) BWT
#
if not _is_complete(index_dir, 3) and not _is_complete(index_dir, 4, dont_check_previous=True):
recompute = True
if recompute:
pro.message('[3/6] Constructing BWT', upper=True)
pro.rm(index_fa + '.bwt', index_fa + '.bwt.complete')
_fa2pac(index_fa)
_pac2bwt(index_fa)
_mark_complete(index_dir, 3)
else:
pro.message('[3/6] BWT already exists, skipping its construction', upper=True)
#
# 3) OCC
#
if not _is_complete(index_dir, 4):
recompute = True
if recompute:
pro.message('[4/6] Constructing OCC', upper=True)
_bwt2bwtocc(index_fa)
_mark_complete(index_dir, 4)
else:
pro.message('[4/6] OCC already exists, skipping their construction', upper=True)
#
# 4) SA + 5) KLCP (compute SA + KLCP in parallel)
#
klcp_fn = "{}.{}.klcp".format(index_fa, k)
if construct_klcp:
if not _is_complete(index_dir, 5):
# SA not computed yet => compute it in parallel with KLCP
recompute = True
if recompute:
pro.message('[5/6],[6/6] Constructing SA + KLCP in parallel ', upper=True)
_bwtocc2sa_klcp(index_fa, k)
_mark_complete(index_dir, 5)
_mark_complete(index_dir, 6)
return
#
# 5) SA (compute only SA)
#
if not _is_complete(index_dir, 5):
recompute = True
if recompute:
pro.message('[5/6] Constructing SA', upper=True)
_bwtocc2sa(index_fa)
else:
pro.message('[5/6] SA already exists, skipping its construction', upper=True)
#
# 6) KLCP (compute only KLCP)
#
if construct_klcp:
if not _is_complete(index_dir, 6):
recompute = True
if recompute:
pro.message('[6/6] Constructing k-LCP', upper=True)
_bwtocc2klcp(index_fa, k)
_mark_complete(index_dir, 6)
else:
pro.message('[6/6] k-LCP already exists, skipping its construction', upper=True)
#####################
# PROPHYLE CLASSIFY #
#####################
def prophyle_classify(
index_dir, fq_fn, fq_pe_fn, k, out_format, mimic_kraken, measure, annotate, tie_lca, kmer_lca, print_seq, cimpl,
force_restarted_search, prophyle_conf_string
):
"""Run ProPhyle classification.
Args:
index_dir (str): Index directory.
fq_fn (str): Input reads (single-end or first of paired-end).
fq_pe_fn (str): Input reads (second paired-end, None if single-end)
k (int): K-mer size (None => detect automatically).
out_format (str): Output format: sam / kraken.
mimic_kraken (bool): Mimic Kraken algorithm (compute LCA for each k-mer).
measure (str): Measure used for classification (h1 / h2 / c1 / c2).
annotate (bool): Annotate assignments (insert annotations from Newick to SAM).
tie_lca (bool): If multiple equally good assignments found, compute their LCA.
kmer_lca (bool): Replace k-mer matches by their LCA.
print_seq (bool): Print sequencing in SAM.
cimpl (bool): Use the C++ implementation.
force_restarted_search (bool): Force restarted search.
prophyle_conf_string (str): ProPhyle configuration string.
"""
_compile_prophyle_bin(parallel=True)
index_fa = os.path.join(index_dir, 'index.fa')
index_tree = os.path.join(index_dir, 'tree.nw')
if k is None:
k = pro.detect_k_from_index(index_dir)
pro.message("Automatic detection of k-mer length: k={}".format(k))
_test_tree(index_tree)
if fq_pe_fn:
pro.test_files(fq_fn, fq_pe_fn, allow_pipes=False)
elif fq_fn != '-':
pro.test_files(fq_fn, allow_pipes=False)
pro.test_files(IND)
pro.test_files(
index_fa + '.bwt',
#index_fa + '.pac',
index_fa + '.sa',
index_fa + '.ann',
#index_fa + '.amb',
)
(bwt_s, sa_s) = pro.file_sizes(index_fa + '.bwt', index_fa + '.sa')
if not abs(bwt_s - 2 * sa_s) < 1000:
pro.error('Inconsistent index (SA vs. BWT)')
#assert abs(bwt_s - 2 * pac_s) < 1000, 'Inconsistent index (PAC vs. BWT)'
klcp_fn = "{}.{}.klcp".format(index_fa, k)
if force_restarted_search:
pro.message("Restarted search forced")
use_rolling_window = False
else:
use_rolling_window = os.path.isfile(klcp_fn)
if use_rolling_window:
pro.message("k-LCP file found, going to use rolling window")
pro.test_files(klcp_fn)
(klcp_s, ) = pro.file_sizes(klcp_fn)
if not abs(bwt_s - 4 * klcp_s) < 1000:
pro.error('Inconsistent index (KLCP vs. BWT)')
else:
pro.message("k-LCP file not found, going to use restarted search")
if cimpl:
ASSIGN = C_ASSIGN
else:
ASSIGN = PY_ASSIGN
if mimic_kraken:
measure = "h1"
tie_lca = True
kmer_lca = True
out_format = "kraken"
cmd_assign = [ASSIGN]
if not cimpl and prophyle_conf_string:
cmd_assign += ['-c', prophyle_conf_string]
cmd_assign += ['-m', measure, '-f', out_format]
if annotate:
cmd_assign += ['-A']
if tie_lca:
cmd_assign += ['-L']
if kmer_lca:
cmd_assign += ['-X']
cmd_assign += [index_tree, k, '-']
if fq_pe_fn:
cmd_read = [READ, fq_fn, fq_pe_fn, '|']
in_read = '-'
else:
cmd_read = []
# fq_fn can be '-' as well
in_read = fq_fn
cmd_query = [
IND, 'query', '-k', k, '-u' if use_rolling_window else '', '-b' if print_seq else '', index_fa, in_read, '|'
]
command = cmd_read + cmd_query + cmd_assign
pro.run_safe(command)
####################
# PROPHYLE ANALYZE #
####################
def prophyle_analyze(index_dir, out_prefix, input_fns, stats, in_format):
cmd_analyze = [ANALYZE, '-s', stats, index_dir, out_prefix] + input_fns
if in_format is not None:
cmd_analyze += ['-f', in_format]
pro.test_files(*filter(lambda x: x != "-", input_fns), test_nonzero=True)
pro.run_safe(cmd_analyze)
######################
# PROPHYLE FOOTPRINT #
######################
def prophyle_footprint(index_dir):
bwt_size = pro.file_sizes(os.path.join(index_dir, "index.fa.bwt"))[0]
index_size = 2 * bwt_size
print(pro.sizeof_fmt(index_size))
#####################
# PROPHYLE COMPRESS #
#####################
def prophyle_compress(index_dir, archive):
_compile_prophyle_bin(parallel=True)
tmp_dir = tempfile.mkdtemp()
arcdir = index_dir.rstrip("/").split("/")[-1]
tmp_arc_dir = os.path.join(tmp_dir, arcdir)
# todo: should create a correct directory
pro.message("Creating a temporary directory for files to compress")
pro.makedirs(tmp_arc_dir)
for x in FILES_TO_ARCHIVE:
if x == "index.fa.bwt":
continue
pro.cp_to_dir(os.path.join(index_dir, x), tmp_arc_dir)
bwt_fn_1 = os.path.join(index_dir, "index.fa.bwt")
bwt_fn_2 = os.path.join(tmp_arc_dir, "index.fa.bwt")
cmd = [IND, "debwtupdate", bwt_fn_1, bwt_fn_2]
pro.run_safe(cmd)
pro.message("Creating '{}'".format(archive))
with tarfile.open(archive, "w:gz") as tar:
tar.add(tmp_arc_dir, arcname=arcdir)
pro.message("File '{}' has been created".format(archive))
#######################
# PROPHYLE DECOMPRESS #
#######################
def prophyle_decompress(archive, output_dir, klcp):
pro.test_files(archive)
if not os.path.isdir(output_dir):
pro.error("Directory '{}' does not exist.".format(output_dir))
_compile_prophyle_bin(parallel=True)
with tarfile.open(archive) as tar:
names = tar.getnames()
index_name = names[0]
for x in FILES_TO_ARCHIVE:
if not os.path.join(index_name, x) in names:
pro.error("File '{}' is missing in the archive".format(x))
index_dir = os.path.join(output_dir, index_name)
index_exists = True
for i in range(1, 7):
fn = os.path.join(index_dir, ".complete.{}".format(i))
if not os.path.isfile(fn):
index_exists = False
break
if index_exists:
pro.message("Index already exists")
return
_compile_prophyle_bin(parallel=True)
pro.message("Decompressing core index files")
cmd = ["tar", "xvf", archive, "-C", output_dir]
pro.run_safe(cmd)
fn = os.path.join(index_dir, ".complete.4")
pro.rm(fn)
pro.message("Reconstructing the index")
pro.touch(os.path.join(index_dir, "index.fa"))
pro.touch(os.path.join(index_dir, "index.fa.pac"))
if klcp:
config = pro.load_index_config(index_dir)
cmd = [PROPHYLE, "index", "-k", config['k'], os.path.join(index_dir, "tree.nw"), index_dir]
else:
cmd = [PROPHYLE, "index", "-K", os.path.join(index_dir, "tree.nw"), index_dir]
pro.run_safe(cmd)
pro.message("Index reconstruction finished")
####################
# PROPHYLE COMPILE #
####################
def prophyle_compile(clean, parallel, force):
_compile_prophyle_bin(clean=clean, parallel=parallel, force=force, silent=False)
########
# MAIN #
########
def parser():
class MyParser(argparse.ArgumentParser):
def error(self, message):
if len(sys.argv) == 2:
self.print_help()
sys.exit(2)
else:
pro.error(message, 2)
desc = """\
Program: prophyle (phylogeny-based metagenomic classification)
Version: {V}
Authors: Karel Brinda, Kamil Salikhov, Simone Pignotti, Gregory Kucherov
Contact: kbrinda@hsph.harvard.edu
Usage: prophyle <command> [options]
""".format(V=version.VERSION)
parser = MyParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(desc))
parser.add_argument(
'-v',
'--version',
action='version',
version='ProPhyle {} (rev {}, commit {})'.format(version.VERSION, version.REVCOUNT, version.SHORTHASH),
)
_add_configuration_parameter(parser, visible=False)
subparsers = parser.add_subparsers(help="", description=argparse.SUPPRESS, dest='subcommand', metavar="")
fc = lambda prog: argparse.HelpFormatter(prog, max_help_position=27)
##########
parser_download = subparsers.add_parser(
'download',
help='download a genomic database',
# description='Download RefSeq and HMP databases.',
formatter_class=fc,
)
parser_download.add_argument(
'library',
metavar='<library>',
nargs='+',
choices=LIBRARIES + ['all'],
help='genomic library {}'.format(LIBRARIES + ['all']),
)
parser_download.add_argument(
'-d',
metavar='DIR',
dest='home_dir',
type=str,
default=None,
help='directory for the tree and the sequences [~/prophyle]',
)
parser_download.add_argument(
'-l',
dest='log_fn',
metavar='STR',
type=str,
help='log file',
default=None,
)
parser_download.add_argument(
'-F',
dest='force',
action='store_true',
help='rewrite library files if they already exist',
)
_add_configuration_parameter(parser_download)
##########
parser_index = subparsers.add_parser(
'index',
help='build index',
formatter_class=fc,
)
parser_index.add_argument(
'tree',
metavar='<tree.nw>',
type=str,
nargs='+',
help='phylogenetic tree (in Newick/NHX)',
)
parser_index.add_argument(
'index_dir',
metavar='<index.dir>',
type=str,
help='index directory (will be created)',
)
parser_index.add_argument(
'-g',
metavar='DIR',
dest='library_dir',
type=str,
help='directory with the library sequences [dir. of the first tree]',
default=None,
# required=True,
)
parser_index.add_argument(
'-j',
metavar='INT',
dest='threads',
type=int,
help='number of threads [auto ({})]'.format(DEFAULT_THREADS),
default=DEFAULT_THREADS,
)
parser_index.add_argument(
'-k',
dest='k',
metavar='INT',
type=int,
help='k-mer length [{}]'.format(DEFAULT_K),
default=DEFAULT_K,
)
parser_index.add_argument(
'-l',
dest='log_fn',
metavar='STR',
type=str,
help='log file [<index.dir>/log.txt]',
default=None,
)
parser_index.add_argument(
'-s',
metavar='FLOAT',
help='rate of sampling of the tree [no sampling]',
dest='sampling_rate',
type=str,
default=None,
)
parser_index.add_argument(
'-F',
dest='force',
action='store_true',
help='rewrite index files if they already exist',
)
parser_index.add_argument(
'-M',
action='store_true',
dest='mask_repeats',
help='mask repeats/low complexity regions (using DustMasker)',
)
parser_index.add_argument(
'-P',
dest='no_prefixes',
action='store_true',
help='do not add prefixes to node names when multiple trees are used',
)
parser_index.add_argument(
'-S',
dest='stop_after_propagation',
action='store_true',
help='stop after k-mer propagation (no BWT index construction)',
)
parser_index.add_argument(
'-K',
dest='klcp',
action='store_false',
help='skip k-LCP construction (then restarted search only)',
)
parser_index.add_argument(
'-T',
dest='keep_tmp_files',
action='store_true',
help='keep temporary files from k-mer propagation',
)
parser_index.add_argument(
'-A',
help='autocomplete tree (names of internal nodes and FASTA paths)',
dest='autocomplete',
action='store_true',
)
parser_index.add_argument(
'-R',
help='switch propagation off (only re-assemble leaves)',
dest='nonprop',
action='store_true',
)
_add_configuration_parameter(parser_index)
##########
parser_classify = subparsers.add_parser(
'classify',
help='classify reads',
# description='Classify reads.',
formatter_class=fc,
)
parser_classify.add_argument(
'index_dir',
metavar='<index.dir>',
type=str,
help='index directory',
)
parser_classify.add_argument(
'reads',
metavar='<reads1.fq>',
type=str,
help='first file with reads in FASTA/FASTQ (- for standard input)',
)
parser_classify.add_argument(
'reads_pe',
metavar='<reads2.fq>',
type=str,
help='second file with reads in FASTA/FASTQ',
nargs='?',
default=None,
)
parser_classify.add_argument(
'-k',
dest='k',
metavar='INT',
type=int,
help='k-mer length [detect automatically from the index]',
default=None,
)
parser_classify.add_argument(
'-m',
dest='measure',
choices=['h1', 'c1', 'h2', 'c2'],
help='measure: h1=hit count, c1=coverage, h2=norm.hit count, c2=norm.coverage [{}]'.format(DEFAULT_MEASURE),
default=DEFAULT_MEASURE,
)
parser_classify.add_argument(
'-f',
dest='oform',
choices=['kraken', 'sam'],
default=DEFAULT_OUTPUT_FORMAT,
help='output format [{}]'.format(DEFAULT_OUTPUT_FORMAT),
)
parser_classify.add_argument(
'-l',
dest='log_fn',
metavar='STR',
type=str,
help='log file',
default=None,
)
parser_classify.add_argument(
'-P',
dest='print_seq',
action='store_true',
help='incorporate sequences and qualities into SAM records',
)
parser_classify.add_argument(
'-A',
dest='annotate',
action='store_true',
help='annotate assignments (using tax. information from NHX)',
)
parser_classify.add_argument(
'-L',
dest='tie_lca',
action='store_true',
help='replace read assignments by their LCA',
)
parser_classify.add_argument(
'-X',
dest='kmer_lca',
action='store_true',
help='replace k-mer matches by their LCA',
)
parser_classify.add_argument(
'-M',
dest='mimic',
action='store_true',
help='mimic Kraken (equivalent to "-m h1 -f kraken -L -X")',
)
parser_classify.add_argument(
'-C',
dest='cimpl',
action='store_true',
help='use C++ impl. of the assignment algorithm (experimental)',
#help=argparse.SUPPRESS,
)
parser_classify.add_argument(
'-K',
dest='force_restarted_search',
action='store_true',
help='force restarted search mode',
)
_add_configuration_parameter(parser_classify)
##########
parser_analyze = subparsers.add_parser(
'analyze',
help='analyze results (experimental)',
formatter_class=fc,
)
parser_analyze.add_argument(
'index_dir', metavar='{index_dir, tree.nw}', type=str, help='index directory or phylogenetic tree'
)
parser_analyze.add_argument(
'out_prefix',
metavar='<out.pref>',
type=str,
help="output prefix",
)
parser_analyze.add_argument(
'input_fns',
metavar='<classified.bam>',
type=str,
nargs='+',
default=None,
help="classified reads (use '-' for stdin)",
)
parser_analyze.add_argument(
'-s', metavar=ANALYZE_STATS, type=str, dest='stats', choices=ANALYZE_STATS, default=ANALYZE_STATS[0],
help="""statistics to use for the computation of histograms:
w (default) => weighted assignments;
u => unique assignments, non-weighted;
wl => weighted assignments, propagated to leaves;
ul => unique assignments, propagated to leaves."""
)
parser_analyze.add_argument(
'-f', metavar=ANALYZE_IN_FMTS, type=str, dest='in_format', choices=ANALYZE_IN_FMTS, default=None,
help="""Input format of assignments [auto]"""
)
_add_configuration_parameter(parser_analyze)
##########
parser_footprint = subparsers.add_parser(
'footprint',
help='estimate memory footprint',
formatter_class=fc,
)
parser_footprint.add_argument(
'index_dir',
metavar='<index.dir>',
type=str,
help='index directory',
)
_add_configuration_parameter(parser_footprint)
##########
parser_compress = subparsers.add_parser(
'compress',
help='compress a ProPhyle index',
formatter_class=fc,
)
parser_compress.add_argument(
'index_dir',
metavar='<index.dir>',
type=str,
help='index directory',
)
parser_compress.add_argument(
'archive',
metavar='<archive.tar.gz>',
type=str,
default=None,
nargs="?",
help='output archive [<index.dir>.tar.gz]',
)
_add_configuration_parameter(parser_compress)
##########
parser_decompress = subparsers.add_parser(
'decompress',
help='decompress a compressed ProPhyle index',
formatter_class=fc,
)
parser_decompress.add_argument(
'archive',
metavar='<archive.tar.gz>',
type=str,
help='output archive',
)
parser_decompress.add_argument(
'output_dir',
metavar='<output.dir>',
type=str,
nargs="?",
default="./",
help='output directory [./]',
)
parser_decompress.add_argument(
'-K',
dest='klcp',
action='store_false',
help='skip k-LCP construction',
)
_add_configuration_parameter(parser_decompress)
##########
parser_compile = subparsers.add_parser(
'compile',
help='compile auxiliary ProPhyle programs',
formatter_class=fc,
)
parser_compile.add_argument(
'-C',
dest='clean',
action='store_true',
help='clean files instead of compiling',
)
parser_compile.add_argument(
'-F',
dest='force',
action='store_true',
help='force recompilation',
)
parser_compile.add_argument(
'-P',
dest='parallel',
action='store_true',
help='run compilation in parallel',
)
_add_configuration_parameter(parser_compile)
##########
return parser
def main():
try:
par = parser()
args = par.parse_args()
subcommand = args.subcommand
global CONFIG
prophyle_conf_string = pro.load_prophyle_conf(CONFIG, args.config)
if subcommand == "download":
pro.open_log(args.log_fn)
for single_lib in args.library:
pro.message('Downloading "{}" started'.format(single_lib))
prophyle_download(
library=single_lib,
library_dir=args.home_dir,
force=args.force,
)
pro.message('Downloading "{}" finished'.format(single_lib))
pro.close_log()
elif subcommand == "index":
if args.library_dir is None:
library_dir = os.path.dirname(args.tree[0])
else:
library_dir = args.library_dir
if args.log_fn is None:
args.log_fn = os.path.join(args.index_dir, "log.txt")
pro.open_log(args.log_fn)
pro.message('Index construction started')
prophyle_index(
index_dir=args.index_dir,
threads=args.threads,
k=args.k,
trees_fn=args.tree,
library_dir=library_dir,
force=args.force,
construct_klcp=args.klcp,
no_prefixes=args.no_prefixes,
stop_after_propagation=args.stop_after_propagation,
mask_repeats=args.mask_repeats,
keep_tmp_files=args.keep_tmp_files,
sampling_rate=args.sampling_rate,
autocomplete=args.autocomplete,
nonprop=args.nonprop,
)
pro.message('Index construction finished')
pro.close_log()
elif subcommand == "classify":
# if args.log_fn is None:
# args.log_fn = os.path.join(args.index_dir, "log.txt")
pro.open_log(args.log_fn)
pro.message('Classification started')
prophyle_classify(
index_dir=args.index_dir,
fq_fn=args.reads,
fq_pe_fn=args.reads_pe,
k=args.k,
out_format=args.oform,
mimic_kraken=args.mimic,
measure=args.measure,
tie_lca=args.tie_lca,
kmer_lca=args.kmer_lca,
annotate=args.annotate,
print_seq=args.print_seq,
cimpl=args.cimpl,
force_restarted_search=args.force_restarted_search,
prophyle_conf_string=prophyle_conf_string, # already preprocessed
)
pro.message('Classification finished')
pro.close_log()
elif subcommand == "analyze":
prophyle_analyze(
index_dir=args.index_dir,
out_prefix=args.out_prefix,
input_fns=args.input_fns,
stats=args.stats,
in_format=args.in_format,
)
elif subcommand == "footprint":
prophyle_footprint(index_dir=args.index_dir, )
elif subcommand == "compress":
if args.archive is None:
archive = args.index_dir.rstrip("/") + ".tar.gz"
else:
archive = args.archive
prophyle_compress(
index_dir=args.index_dir,
archive=archive,
)
elif subcommand == "decompress":
prophyle_decompress(
archive=args.archive,
output_dir=args.output_dir,
klcp=args.klcp,
)
elif subcommand == "compile":
prophyle_compile(
clean=args.clean,
parallel=args.parallel,
force=args.force,
)
else:
msg_lns = par.format_help().split("\n")[2:]
msg_lns = [x for x in msg_lns if x.find("optional arguments") == -1 and x.find("--") == -1]
msg = "\n".join(msg_lns)
msg = msg.replace("\n\n", '\n').replace("subcommands:\n", "Command:\n").replace("Usage", "\nUsage")
msg = msg.replace("\n compress", "\n\n compress")
print(file=sys.stderr)
print(msg, file=sys.stderr)
sys.exit(2)
except BrokenPipeError:
# pipe error (e.g., when head is used)
sys.stderr.close()
sys.stdout.close()
exit(0)
except KeyboardInterrupt:
pro.error("Error: Keyboard interrupt")
finally:
sys.stdout.flush()
sys.stderr.flush()
if __name__ == "__main__":
main()
|
karel-brinda/prophyle
|
prophyle/prophyle.py
|
Python
|
mit
| 48,225
|
[
"BWA"
] |
ffe9a11b202539a90a99448d2935ffba9055831a9e9d115c158d5f57240d08ee
|
# Placeholder because units moved
# Remove this in version 1.0
from __future__ import absolute_import
import warnings
with warnings.catch_warnings():
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(("units has moved to MDAnalysis.units "
"and will be removed from here in release 1.0"),
DeprecationWarning)
from ..units import *
|
kain88-de/mdanalysis
|
package/MDAnalysis/core/units.py
|
Python
|
gpl-2.0
| 394
|
[
"MDAnalysis"
] |
ece0995e2f588c98dcbe547071ac084acaa9d1f262b47555214d8724d3c6209a
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import numpy as np
import unittest
import os
from pymatgen.analysis.find_dimension import find_dimension
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.vasp.outputs import Xdatcar
from pymatgen import Element, Structure, Lattice
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class FindDimensionTest(PymatgenTest):
def test_get_dimensionality(self):
s = self.get_structure('LiFePO4')
self.assertEqual(find_dimension(s), 'intercalated ion')
s = self.get_structure('Graphite')
self.assertEqual(find_dimension(s), '2D')
def test_get_dimensionality_with_bonds(self):
s = self.get_structure('CsCl')
self.assertEqual(find_dimension(s), 'intercalated ion')
self.assertEqual(find_dimension(s, ldict={"Cs":3.7, "Cl": 3}), '3D')
if __name__ == '__main__':
unittest.main()
|
nisse3000/pymatgen
|
pymatgen/analysis/tests/test_find_dimension.py
|
Python
|
mit
| 1,109
|
[
"VASP",
"pymatgen"
] |
14274f369ae8f5946b0dc90cc83115ea3af00aa498a000e69abf628946d36c33
|
#!/usr/bin/env python
import sys
from g03 import *
from g09 import *
from g16 import *
from gamess import *
from nwchem import *
from orca import *
from qchem import *
from xyz import *
def guess(filename):
'''Returns the correct class needed to parse filename, if it exists.'''
#
# Dictionary of unique sentences in QM packages output files to guess
# the correct parser to use
#
filetypes = {}
filetypes["This is the Gaussian(R) 03 program."] = G03
filetypes["This is part of the Gaussian(R) 09 program."] = G09
filetypes["This is part of the Gaussian(R) 16 program."] = G16
filetypes["GAMESS VERSION"] = Gamess
filetypes["Northwest Computational Chemistry Package (NWChem)"] = NWChem
filetypes["* O R C A *"] = Orca
filetypes["A Quantum Leap Into The Future Of Chemistry"] = QChem
filetype = None
done = False
with open(filename) as f:
for line in f:
for sentence in filetypes.keys():
if sentence in line:
filetype = filetypes[sentence]
done = True
break
# once the type has been identified, exit the cycles
if done:
break
if not filetype:
try:
XYZ(filename)
filetype = XYZ
except:
pass
if not filetype:
print(" %s" % filename)
print(" File type not known")
sys.exit()
return filetype(filename)
if __name__ == '__main__':
testfile = sys.argv[1]
parser = guess(testfile)
data = parser(testfile)
print data.energies
pass
|
dpadula85/ExSPy
|
stable/QM_parser/parser/guess.py
|
Python
|
gpl-3.0
| 1,657
|
[
"GAMESS",
"Gaussian",
"NWChem",
"ORCA"
] |
9ef2edd70a263b27f55376b4a14467f05ae5d8bf6833ea7d5ad21ec4a025601a
|
import logging
from functools import reduce
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import re
from Bio import SeqIO
import concurrent.futures as cfutures
from itertools import repeat
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["channel", "start_time", "duration", "sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["channelIDs", "time", "duration", "lengths", "quals", "barcode"]
else:
datadf.columns = ["channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, ut.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
if len(chromosomes) > 100 or kwargs["huge"]:
logging.info("Nanoget: lots of contigs (>100) or --huge, not running in separate processes")
datadf = pd.DataFrame(
data=extract_from_bam(bam, None, kwargs["keep_supp"]),
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
else:
unit = chromosomes
with cfutures.ProcessPoolExecutor(max_workers=kwargs["threads"]) as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam,
repeat(bam),
unit,
repeat(kwargs["keep_supp"]))
for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info(f"Nanoget: bam {bam} contains {datadf['lengths'].size} primary alignments.")
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
if len(chromosomes) > 100:
unit = [None]
logging.info("Nanoget: lots of contigs (>100), not running in separate processes")
else:
unit = chromosomes
with cfutures.ProcessPoolExecutor(max_workers=kwargs["threads"]) as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam,
repeat(cram), unit, repeat(kwargs["keep_supp"]))
for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info(f"Nanoget: cram {cram} contains {datadf['lengths'].size} primary alignments.")
return ut.reduce_memory_usage(datadf)
def extract_from_bam(bam, chromosome, keep_supplementary=True):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
samfile = pysam.AlignmentFile(bam, "rb")
if keep_supplementary:
return [
(read.query_name,
ut.ave_qual(read.query_qualities),
ut.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary and not read.is_unmapped]
else:
return [
(read.query_name,
ut.ave_qual(read.query_qualities),
ut.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary and not read.is_unmapped and not read.is_supplementary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
match = reduce(lambda x, y: x + y[1] if y[0] in (0, 7, 8) else x, read.cigartuples, 0)
ins = reduce(lambda x, y: x + y[1] if y[0] == 1 else x, read.cigartuples, 0)
delt = reduce(lambda x, y: x + y[1] if y[0] == 2 else x, read.cigartuples, 0)
alignment_length = match + ins + delt
try:
return (1 - read.get_tag("NM") / alignment_length) * 100
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples)) /
alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield ut.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
ut.ave_qual(rec.letter_annotations["phred_quality"]),
None)
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(ut.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget
|
nanoget/extraction_functions.py
|
Python
|
gpl-3.0
| 18,527
|
[
"pysam"
] |
d42461c32e146c526faff721abac5bd8269946b2ccdf2e4cfe10c1ecc139732a
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2009-2011 by the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import rmgpy.molecule
"""
This module provides functionality for estimating the symmetry number of a
molecule from its chemical graph representation.
"""
def calculateAtomSymmetryNumber(molecule, atom):
"""
Return the symmetry number centered at `atom` in the structure. The
`atom` of interest must not be in a cycle.
"""
symmetryNumber = 1
single = 0; double = 0; triple = 0; benzene = 0
numNeighbors = 0
for bond in atom.edges.values():
if bond.isSingle(): single += 1
elif bond.isDouble(): double += 1
elif bond.isTriple(): triple += 1
elif bond.isBenzene(): benzene += 1
numNeighbors += 1
# If atom has zero or one neighbors, the symmetry number is 1
if numNeighbors < 2: return symmetryNumber
# Create temporary structures for each functional group attached to atom
molecule0 = molecule
molecule = molecule0.copy(True)
atom = molecule.vertices[molecule0.vertices.index(atom)]
molecule.removeAtom(atom)
groups = molecule.split()
# Determine equivalence of functional groups around atom
groupIsomorphism = dict([(group, dict()) for group in groups])
for group1 in groups:
for group2 in groups:
if group1 is not group2 and group2 not in groupIsomorphism[group1]:
groupIsomorphism[group1][group2] = group1.isIsomorphic(group2)
groupIsomorphism[group2][group1] = groupIsomorphism[group1][group2]
elif group1 is group2:
groupIsomorphism[group1][group1] = True
count = [sum([int(groupIsomorphism[group1][group2]) for group2 in groups]) for group1 in groups]
for i in range(count.count(2) / 2):
count.remove(2)
for i in range(count.count(3) / 3):
count.remove(3); count.remove(3)
for i in range(count.count(4) / 4):
count.remove(4); count.remove(4); count.remove(4)
count.sort(); count.reverse()
if atom.radicalElectrons == 0:
if single == 4:
# Four single bonds
if count == [4]: symmetryNumber *= 12
elif count == [3, 1]: symmetryNumber *= 3
elif count == [2, 2]: symmetryNumber *= 2
elif count == [2, 1, 1]: symmetryNumber *= 1
elif count == [1, 1, 1, 1]: symmetryNumber *= 1
elif single == 3:
# Three single bonds
if count == [3]: symmetryNumber *= 3
elif count == [2, 1]: symmetryNumber *= 1
elif count == [1, 1, 1]: symmetryNumber *= 1
elif single == 2:
# Two single bonds
if count == [2]: symmetryNumber *= 2
elif double == 2:
# Two double bonds
if count == [2]: symmetryNumber *= 2
elif atom.radicalElectrons == 1:
if single == 3:
# Three single bonds
if count == [3]: symmetryNumber *= 6
elif count == [2, 1]: symmetryNumber *= 2
elif count == [1, 1, 1]: symmetryNumber *= 1
elif atom.radicalElectrons == 2:
if single == 2:
# Two single bonds
if count == [2]:
symmetryNumber *= 2
if atom.isNitrogen():
for groupN in groups:
if groupN.toSMILES() == "[N+](=O)[O-]":
symmetryNumber *= 2
return symmetryNumber
################################################################################
def calculateBondSymmetryNumber(molecule, atom1, atom2):
"""
Return the symmetry number centered at `bond` in the structure.
"""
bond = atom1.edges[atom2]
symmetryNumber = 1
if bond.isSingle() or bond.isDouble() or bond.isTriple():
if atom1.equivalent(atom2):
# An O-O bond is considered to be an "optical isomer" and so no
# symmetry correction will be applied
if atom1.atomType.label == 'Os' and atom2.atomType.label == 'Os' and atom1.radicalElectrons == atom2.radicalElectrons == 0:
return symmetryNumber
# If the molecule is diatomic, then we don't have to check the
# ligands on the two atoms in this bond (since we know there
# aren't any)
elif len(molecule.vertices) == 2:
symmetryNumber = 2
else:
molecule.removeBond(bond)
structure = molecule.copy(True)
molecule.addBond(bond)
atom1 = structure.atoms[molecule.atoms.index(atom1)]
atom2 = structure.atoms[molecule.atoms.index(atom2)]
fragments = structure.split()
if len(fragments) != 2: return symmetryNumber
fragment1, fragment2 = fragments
if atom1 in fragment1.atoms: fragment1.removeAtom(atom1)
if atom2 in fragment1.atoms: fragment1.removeAtom(atom2)
if atom1 in fragment2.atoms: fragment2.removeAtom(atom1)
if atom2 in fragment2.atoms: fragment2.removeAtom(atom2)
groups1 = fragment1.split()
groups2 = fragment2.split()
# Test functional groups for symmetry
if len(groups1) == len(groups2) == 1:
if groups1[0].isIsomorphic(groups2[0]): symmetryNumber *= 2
elif len(groups1) == len(groups2) == 2:
if groups1[0].isIsomorphic(groups2[0]) and groups1[1].isIsomorphic(groups2[1]): symmetryNumber *= 2
elif groups1[1].isIsomorphic(groups2[0]) and groups1[0].isIsomorphic(groups2[1]): symmetryNumber *= 2
elif len(groups1) == len(groups2) == 3:
if groups1[0].isIsomorphic(groups2[0]) and groups1[1].isIsomorphic(groups2[1]) and groups1[2].isIsomorphic(groups2[2]): symmetryNumber *= 2
elif groups1[0].isIsomorphic(groups2[0]) and groups1[1].isIsomorphic(groups2[2]) and groups1[2].isIsomorphic(groups2[1]): symmetryNumber *= 2
elif groups1[0].isIsomorphic(groups2[1]) and groups1[1].isIsomorphic(groups2[2]) and groups1[2].isIsomorphic(groups2[0]): symmetryNumber *= 2
elif groups1[0].isIsomorphic(groups2[1]) and groups1[1].isIsomorphic(groups2[0]) and groups1[2].isIsomorphic(groups2[2]): symmetryNumber *= 2
elif groups1[0].isIsomorphic(groups2[2]) and groups1[1].isIsomorphic(groups2[0]) and groups1[2].isIsomorphic(groups2[1]): symmetryNumber *= 2
elif groups1[0].isIsomorphic(groups2[2]) and groups1[1].isIsomorphic(groups2[1]) and groups1[2].isIsomorphic(groups2[0]): symmetryNumber *= 2
return symmetryNumber
################################################################################
def calculateAxisSymmetryNumber(molecule):
"""
Get the axis symmetry number correction. The "axis" refers to a series
of two or more cumulated double bonds (e.g. C=C=C, etc.). Corrections
for single C=C bonds are handled in getBondSymmetryNumber().
Each axis (C=C=C) has the potential to double the symmetry number.
If an end has 0 or 1 groups (eg. =C=CJJ or =C=C-R) then it cannot
alter the axis symmetry and is disregarded::
A=C=C=C.. A-C=C=C=C-A
s=1 s=1
If an end has 2 groups that are different then it breaks the symmetry
and the symmetry for that axis is 1, no matter what's at the other end::
A\ A\ /A
T=C=C=C=C-A T=C=C=C=T
B/ A/ \B
s=1 s=1
If you have one or more ends with 2 groups, and neither end breaks the
symmetry, then you have an axis symmetry number of 2::
A\ /B A\
C=C=C=C=C C=C=C=C-B
A/ \B A/
s=2 s=2
"""
symmetryNumber = 1
# List all double bonds in the structure
doubleBonds = []
for atom1 in molecule.vertices:
for atom2 in atom1.edges:
if atom1.edges[atom2].isDouble() and molecule.vertices.index(atom1) < molecule.vertices.index(atom2):
doubleBonds.append((atom1, atom2))
# Search for adjacent double bonds
cumulatedBonds = []
for i, bond1 in enumerate(doubleBonds):
atom11, atom12 = bond1
for bond2 in doubleBonds[i+1:]:
atom21, atom22 = bond2
if atom11 is atom21 or atom11 is atom22 or atom12 is atom21 or atom12 is atom22:
listToAddTo = None
for cumBonds in cumulatedBonds:
if (atom11, atom12) in cumBonds or (atom21, atom22) in cumBonds:
listToAddTo = cumBonds
if listToAddTo is not None:
if (atom11, atom12) not in listToAddTo: listToAddTo.append((atom11, atom12))
if (atom21, atom22) not in listToAddTo: listToAddTo.append((atom21, atom22))
else:
cumulatedBonds.append([(atom11, atom12), (atom21, atom22)])
# Also keep isolated double bonds
for bond1 in doubleBonds:
for bonds in cumulatedBonds:
if bond1 in bonds:
break
else:
cumulatedBonds.append([bond1])
# For each set of adjacent double bonds, check for axis symmetry
for bonds in cumulatedBonds:
# Do nothing if less than two cumulated bonds
if len(bonds) < 1: continue
# Do nothing if axis is in cycle
found = False
for atom1, atom2 in bonds:
if molecule.isBondInCycle(atom1.edges[atom2]): found = True
if found: continue
# Find terminal atoms in axis
# Terminal atoms labelled T: T=C=C=C=T
axis = []
for bond in bonds: axis.extend(bond)
terminalAtoms = []
for atom in axis:
if axis.count(atom) == 1: terminalAtoms.append(atom)
if len(terminalAtoms) != 2: continue
# Remove axis from (copy of) structure
bondlist = []
for atom1, atom2 in bonds:
bond = atom1.edges[atom2]
bondlist.append(bond)
molecule.removeBond(bond)
structure = molecule.copy(True)
terminalAtoms = [structure.vertices[molecule.vertices.index(atom)] for atom in terminalAtoms]
for bond in bondlist:
molecule.addBond(bond)
atomsToRemove = []
for atom in structure.vertices:
if len(atom.edges) == 0 and atom not in terminalAtoms: # it's not bonded to anything
atomsToRemove.append(atom)
for atom in atomsToRemove: structure.removeAtom(atom)
# Split remaining fragments of structure
end_fragments = structure.split()
#
# there can be two groups at each end A\ /B
# T=C=C=C=T
# A/ \B
# to start with nothing has broken symmetry about the axis
symmetry_broken=False
end_fragments_to_remove = []
for fragment in end_fragments: # a fragment is one end of the axis
# remove the atom that was at the end of the axis and split what's left into groups
terminalAtom = None
for atom in terminalAtoms:
if atom in fragment.atoms:
terminalAtom = atom
fragment.removeAtom(atom)
break
else:
continue
groups = []
if len(fragment.atoms) > 0:
groups = fragment.split()
# If end has only one group then it can't contribute to (nor break) axial symmetry
# Eg. this has no axis symmetry: A-T=C=C=C=T-A
# so we remove this end from the list of interesting end fragments
if len(groups) == 0:
end_fragments_to_remove.append(fragment)
continue # next end fragment
elif len(groups)==1 and terminalAtom.radicalElectrons == 0:
if terminalAtom.atomType.label == 'N3d':
symmetry_broken = True
else:
end_fragments_to_remove.append(fragment)
continue # next end fragment
elif len(groups)==1 and terminalAtom.radicalElectrons != 0:
symmetry_broken = True
elif len(groups)==2:
if not groups[0].isIsomorphic(groups[1]):
# this end has broken the symmetry of the axis
symmetry_broken = True
for fragment in end_fragments_to_remove:
end_fragments.remove(fragment)
# If there are end fragments left that can contribute to symmetry,
# and none of them broke it, then double the symmetry number
# NB>> This assumes coordination number of 4 (eg. Carbon).
# And would be wrong if we had /B
# =C=C=C=C=T-B
# \B
# (for some T with coordination number 5).
if end_fragments and not symmetry_broken:
symmetryNumber *= 2
return symmetryNumber
################################################################################
def calculateCyclicSymmetryNumber(molecule):
"""
Get the symmetry number correction for cyclic regions of a molecule.
For complicated fused rings the smallest set of smallest rings is used.
"""
from rdkit.Chem.rdmolops import SanitizeMol
from rdkit.Chem.rdchem import Mol
mcopy = molecule.toRDKitMol(removeHs=True, returnMapping=False)
SanitizeMol(mcopy)
symmetryNumber = 1
# Get symmetry number for each ring in structure
rings = molecule.getSmallestSetOfSmallestRings()
for ring0 in rings:
# Make copy of structure
structure = molecule.copy(True)
ring = [structure.atoms[molecule.atoms.index(atom)] for atom in ring0]
# Figure out which atoms and bonds are aromatic and reassign appropriately:
for i, atom1 in enumerate(ring0):
for atom2 in ring0[i+1:]:
if molecule.hasBond(atom1, atom2):
if mcopy.GetBondBetweenAtoms(i,i+1) is not None:
if str(mcopy.GetBondBetweenAtoms(i,i+1).GetBondType()) == 'AROMATIC':
bond = molecule.getBond(atom1, atom2)
bond.applyAction(['CHANGE_BOND', atom1, 'B', atom2])
atom1.atomType = atom2.atomType = rmgpy.molecule.atomTypes['Cb']
else:
pass
# Remove bonds of ring from structure
for i, atom1 in enumerate(ring):
for atom2 in ring[i+1:]:
if structure.hasBond(atom1, atom2):
structure.removeBond(atom1.edges[atom2])
structures = structure.split()
groups = []
for struct in structures:
for atom in ring:
if struct.hasAtom(atom): struct.removeAtom(atom)
groups.append(struct.split())
# Find equivalent functional groups on ring
equivalentGroups = []; equivalentGroupCount = []
for group in groups:
found = False
for i, eqGroup in enumerate(equivalentGroups):
if not found and len(group) == len(eqGroup):
for g, eg in zip(group, eqGroup):
if not g.isIsomorphic(eg):
# The groups do not match
break
else:
# The groups match
found = True
if found:
# We've found a matching group, so increment its count
equivalentGroupCount[i] += 1
break
else:
# No matching group found, so add it as a new group
equivalentGroups.append(group)
equivalentGroupCount.append(1)
# Find equivalent bonds on ring
equivalentBonds = []
for i, atom1 in enumerate(ring0):
for atom2 in ring0[i+1:]:
if molecule.hasBond(atom1, atom2):
bond = molecule.getBond(atom1, atom2)
found = False
for eqBond in equivalentBonds:
if not found:
if bond.equivalent(eqBond[0]):
eqBond.append(group)
found = True
if not found:
equivalentBonds.append([bond])
# Find maximum number of equivalent groups and bonds
minEquivalentGroups = min(equivalentGroupCount)
maxEquivalentGroups = max(equivalentGroupCount)
minEquivalentBonds = None
maxEquivalentBonds = 0
for bonds in equivalentBonds:
N = len(bonds)
if minEquivalentBonds is None or N < minEquivalentBonds:
minEquivalentBonds = N
if N > maxEquivalentBonds:
maxEquivalentBonds = N
if maxEquivalentGroups == maxEquivalentBonds == len(ring):
symmetryNumber *= len(ring) * 2
else:
symmetryNumber *= min(minEquivalentGroups, minEquivalentBonds)
#print len(ring), minEquivalentGroups, maxEquivalentGroups, minEquivalentBonds, maxEquivalentBonds, symmetryNumber
return symmetryNumber
################################################################################
def calculateSymmetryNumber(molecule):
"""
Return the symmetry number for the structure. The symmetry number
includes both external and internal modes.
"""
symmetryNumber = 1
for atom in molecule.vertices:
if not molecule.isAtomInCycle(atom):
symmetryNumber *= calculateAtomSymmetryNumber(molecule, atom)
for atom1 in molecule.vertices:
for atom2 in atom1.edges:
if molecule.vertices.index(atom1) < molecule.vertices.index(atom2) and not molecule.isBondInCycle(atom1.edges[atom2]):
symmetryNumber *= calculateBondSymmetryNumber(molecule, atom1, atom2)
symmetryNumber *= calculateAxisSymmetryNumber(molecule)
if molecule.isCyclic():
symmetryNumber *= calculateCyclicSymmetryNumber(molecule)
return symmetryNumber
|
faribas/RMG-Py
|
rmgpy/molecule/symmetry.py
|
Python
|
mit
| 20,083
|
[
"RDKit"
] |
cc788b55124cb248691f2445c33f4acf85897cf600e0241131206890a13c0c61
|
# -*- coding: utf-8 -*-
"""
Bok choy acceptance tests for LTI xblock
"""
from __future__ import absolute_import
import os
from common.test.acceptance.pages.lms.instructor_dashboard import (
GradeBookPage,
InstructorDashboardPage,
StudentAdminPage
)
from common.test.acceptance.pages.lms.progress import ProgressPage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...pages.lms.courseware import CoursewarePage, LTIContentIframe
from ..helpers import UniqueCourseTest, auto_auth, select_option_by_text
class TestLTIConusmer(UniqueCourseTest):
"""
Base class for tests of LTI xblock in the LMS.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
host = os.environ.get('BOK_CHOY_HOSTNAME', '127.0.0.1')
def setUp(self):
super(TestLTIConusmer, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.lti_iframe = LTIContentIframe(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
self.progress_page = ProgressPage(self.browser, self.course_id)
self.instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
self.grade_book_page = GradeBookPage(self.browser)
# Install a course
display_name = "Test Course" + self.unique_id
self.course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], display_name=display_name
)
def test_lti_no_launch_url_is_not_rendered(self):
"""
Scenario: LTI component in LMS with no launch_url is not rendered
Given the course has correct LTI credentials with registered Instructor
the course has an LTI component with no_launch_url fields:
Then I view the LTI and error is shown
"""
metadata_advance_settings = "correct_lti_id:test_client_key:test_client_secret"
metadata_lti_xblock = {
'launch_url': '',
'open_in_a_new_page': False
}
self.set_advance_settings(metadata_advance_settings)
self.create_lti_xblock(metadata_lti_xblock)
auto_auth(self.browser, self.USERNAME, self.EMAIL, True, self.course_id)
self.courseware_page.visit()
self.assertTrue(self.courseware_page.is_error_message_present())
self.assertFalse(self.courseware_page.is_iframe_present())
self.assertFalse(self.courseware_page.is_launch_url_present())
def test_incorrect_lti_id_is_rendered_incorrectly(self):
"""
Scenario: LTI component in LMS with incorrect lti_id is rendered incorrectly
Given the course has correct LTI credentials with registered Instructor
the course has an LTI component with incorrect_lti_id fields:
Then I view the LTI but incorrect_signature warning is rendered
"""
metadata_advance_settings = "test_lti_id:test_client_key:test_client_secret"
metadata_lti_xblock = {
'lti_id': 'incorrect_lti_id',
'launch_url': 'http://{}:{}/{}'.format(self.host, '8765', 'correct_lti_endpoint'),
'open_in_a_new_page': False
}
self.set_advance_settings(metadata_advance_settings)
self.create_lti_xblock(metadata_lti_xblock)
auto_auth(self.browser, self.USERNAME, self.EMAIL, True, self.course_id)
self.courseware_page.visit()
self.assertTrue(self.courseware_page.is_iframe_present())
self.assertFalse(self.courseware_page.is_launch_url_present())
self.assertFalse(self.courseware_page.is_error_message_present())
self.courseware_page.go_to_lti_container()
self.assertEqual("Wrong LTI signature", self.lti_iframe.lti_content)
def test_incorrect_lti_credentials_is_rendered_incorrectly(self):
"""
Scenario: LTI component in LMS with icorrect LTI credentials is rendered incorrectly
Given the course has incorrect LTI credentials with registered Instructor
the course has an LTI component with correct fields:
I view the LTI but incorrect_signature warning is rendered
"""
metadata_advance_settings = "test_lti_id:test_client_key:incorrect_lti_secret_key"
metadata_lti_xblock = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://{}:{}/{}'.format(self.host, '8765', 'correct_lti_endpoint'),
'open_in_a_new_page': False
}
self.set_advance_settings(metadata_advance_settings)
self.create_lti_xblock(metadata_lti_xblock)
auto_auth(self.browser, self.USERNAME, self.EMAIL, True, self.course_id)
self.courseware_page.visit()
self.assertTrue(self.courseware_page.is_iframe_present())
self.assertFalse(self.courseware_page.is_launch_url_present())
self.assertFalse(self.courseware_page.is_error_message_present())
self.courseware_page.go_to_lti_container()
self.assertEqual("Wrong LTI signature", self.lti_iframe.lti_content)
def test_lti_is_rendered_in_iframe_correctly(self):
"""
Scenario: LTI component in LMS is correctly rendered in iframe
Given the course has correct LTI credentials with registered Instructor
the course has an LTI component with correct fields:
I view the LTI and it is rendered in iframe correctly
"""
metadata_advance_settings = "correct_lti_id:test_client_key:test_client_secret"
metadata_lti_xblock = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://{}:{}/{}'.format(self.host, '8765', 'correct_lti_endpoint'),
'open_in_a_new_page': False
}
self.set_advance_settings(metadata_advance_settings)
self.create_lti_xblock(metadata_lti_xblock)
auto_auth(self.browser, self.USERNAME, self.EMAIL, True, self.course_id)
self.courseware_page.visit()
self.assertTrue(self.courseware_page.is_iframe_present())
self.assertFalse(self.courseware_page.is_launch_url_present())
self.assertFalse(self.courseware_page.is_error_message_present())
self.courseware_page.go_to_lti_container()
self.assertEqual("This is LTI tool. Success.", self.lti_iframe.lti_content)
def test_lti_graded_component_for_staff(self):
"""
Scenario: Graded LTI component in LMS is correctly works for staff
Given the course has correct LTI credentials with registered Instructor
the course has an LTI component with correct fields:
verify scores on progress and grade book pages.
"""
metadata_advance_settings = "correct_lti_id:test_client_key:test_client_secret"
metadata_lti_xblock = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://{}:{}/{}'.format(self.host, '8765', 'correct_lti_endpoint'),
'open_in_a_new_page': False,
'weight': 10,
'graded': True,
'has_score': True
}
expected_scores = [(5, 10)]
self.set_advance_settings(metadata_advance_settings)
self.create_lti_xblock(metadata_lti_xblock)
auto_auth(self.browser, self.USERNAME, self.EMAIL, True, self.course_id)
self.courseware_page.visit()
self.courseware_page.go_to_lti_container()
self.lti_iframe.submit_lti_answer('#submit-button')
self.assertIn("LTI consumer (edX) responded with XML content", self.lti_iframe.lti_content)
self.lti_iframe.switch_to_default()
self.tab_nav.go_to_tab('Progress')
actual_scores = self.progress_page.scores("Test Chapter", "Test Section")
self.assertEqual(actual_scores, expected_scores)
self.assertEqual(['Overall Score', 'Overall Score\n1%'], self.progress_page.graph_overall_score())
self.tab_nav.go_to_tab('Instructor')
student_admin_section = self.instructor_dashboard_page.select_student_admin(StudentAdminPage)
student_admin_section.click_grade_book_link()
self.assertEqual("50", self.grade_book_page.get_value_in_the_grade_book('Homework 1 - Test Section', 1))
self.assertEqual("1", self.grade_book_page.get_value_in_the_grade_book('Total', 1))
def test_lti_switch_role_works_correctly(self):
"""
Scenario: Graded LTI component in LMS role's masquerading correctly works
Given the course has correct LTI credentials with registered Instructor
the course has an LTI component with correct fields:
switch role from instructor to learner and verify that it works correctly
"""
metadata_advance_settings = "correct_lti_id:test_client_key:test_client_secret"
metadata_lti_xblock = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://{}:{}/{}'.format(self.host, '8765', 'correct_lti_endpoint'),
'open_in_a_new_page': False,
'has_score': True
}
self.set_advance_settings(metadata_advance_settings)
self.create_lti_xblock(metadata_lti_xblock)
auto_auth(self.browser, self.USERNAME, self.EMAIL, True, self.course_id)
self.courseware_page.visit()
self.assertTrue(self.courseware_page.is_iframe_present())
self.assertFalse(self.courseware_page.is_launch_url_present())
self.assertFalse(self.courseware_page.is_error_message_present())
self.courseware_page.go_to_lti_container()
self.assertEqual("This is LTI tool. Success.", self.lti_iframe.lti_content)
self.assertEqual("Role: Instructor", self.lti_iframe.get_user_role)
self.lti_iframe.switch_to_default()
select_option_by_text(self.courseware_page.get_role_selector, 'Learner')
self.courseware_page.wait_for_ajax()
self.assertTrue(self.courseware_page.is_iframe_present())
self.assertFalse(self.courseware_page.is_launch_url_present())
self.assertFalse(self.courseware_page.is_error_message_present())
self.courseware_page.go_to_lti_container()
self.assertEqual("This is LTI tool. Success.", self.lti_iframe.lti_content)
self.assertEqual("Role: Student", self.lti_iframe.get_user_role)
def test_lti_graded_component_for_learner(self):
"""
Scenario: Graded LTI component in LMS is correctly works for learners
Given the course has correct LTI credentials with registered Instructor
the course has an LTI component with correct fields:
verify scores on progress
"""
metadata_advance_settings = "correct_lti_id:test_client_key:test_client_secret"
metadata_lti_xblock = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://{}:{}/{}'.format(self.host, '8765', 'correct_lti_endpoint'),
'open_in_a_new_page': False,
'weight': 10,
'graded': True,
'has_score': True
}
expected_scores = [(5, 10)]
self.set_advance_settings(metadata_advance_settings)
self.create_lti_xblock(metadata_lti_xblock)
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
self.courseware_page.visit()
self.courseware_page.go_to_lti_container()
self.lti_iframe.submit_lti_answer('#submit-button')
self.assertIn("LTI consumer (edX) responded with XML content", self.lti_iframe.lti_content)
self.lti_iframe.switch_to_default()
self.tab_nav.go_to_tab('Progress')
actual_scores = self.progress_page.scores("Test Chapter", "Test Section")
self.assertEqual(actual_scores, expected_scores)
self.assertEqual(['Overall Score', 'Overall Score\n1%'], self.progress_page.graph_overall_score())
def test_lti_v2_callback_graded_component(self):
"""
Scenario: Graded LTI component in LMS is correctly works with LTI2v0 PUT callback
Given the course has correct LTI credentials with registered Instructor
the course has an LTI component with correct fields:
verify scores on progress and grade book pages.
verify feedback in LTI component.
"""
metadata_advance_settings = "correct_lti_id:test_client_key:test_client_secret"
metadata_lti_xblock = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://{}:{}/{}'.format(self.host, '8765', 'correct_lti_endpoint'),
'open_in_a_new_page': False,
'weight': 10,
'graded': True,
'has_score': True
}
expected_scores = [(8, 10)]
problem_score = '(8.0 / 10.0 points)'
self.set_advance_settings(metadata_advance_settings)
self.create_lti_xblock(metadata_lti_xblock)
auto_auth(self.browser, self.USERNAME, self.EMAIL, True, self.course_id)
self.courseware_page.visit()
self.courseware_page.go_to_lti_container()
self.lti_iframe.submit_lti_answer("#submit-lti2-button")
self.assertIn("LTI consumer (edX) responded with HTTP 200", self.lti_iframe.lti_content)
self.lti_iframe.switch_to_default()
self.tab_nav.go_to_tab('Progress')
actual_scores = self.progress_page.scores("Test Chapter", "Test Section")
self.assertEqual(actual_scores, expected_scores)
self.assertEqual(['Overall Score', 'Overall Score\n1%'], self.progress_page.graph_overall_score())
self.tab_nav.go_to_tab('Instructor')
student_admin_section = self.instructor_dashboard_page.select_student_admin(StudentAdminPage)
student_admin_section.click_grade_book_link()
self.assertEqual("80", self.grade_book_page.get_value_in_the_grade_book('Homework 1 - Test Section', 1))
self.assertEqual("1", self.grade_book_page.get_value_in_the_grade_book('Total', 1))
self.tab_nav.go_to_tab('Course')
self.assertEqual(problem_score, self.courseware_page.get_elem_text('.problem-progress'))
self.assertEqual("This is awesome.", self.courseware_page.get_elem_text('.problem-feedback'))
def test_lti_delete_callback_graded_component(self):
"""
Scenario: Graded LTI component in LMS is correctly works with LTI2v0 PUT delete callback
Given the course has correct LTI credentials with registered Instructor
the course has an LTI component with correct fields:
Verify LTI provider deletes my grade on progress and grade book page
verify LTI provider deletes feedback from LTI Component
"""
metadata_advance_settings = "correct_lti_id:test_client_key:test_client_secret"
metadata_lti_xblock = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://{}:{}/{}'.format(self.host, '8765', 'correct_lti_endpoint'),
'open_in_a_new_page': False,
'weight': 10,
'graded': True,
'has_score': True
}
expected_scores = [(0, 10)]
problem_score = '(8.0 / 10.0 points)'
self.set_advance_settings(metadata_advance_settings)
self.create_lti_xblock(metadata_lti_xblock)
auto_auth(self.browser, self.USERNAME, self.EMAIL, True, self.course_id)
self.courseware_page.visit()
self.courseware_page.go_to_lti_container()
self.lti_iframe.submit_lti_answer("#submit-lti2-button")
self.assertIn("LTI consumer (edX) responded with HTTP 200", self.lti_iframe.lti_content)
self.lti_iframe.switch_to_default()
self.courseware_page.visit()
self.assertEqual(problem_score, self.courseware_page.get_elem_text('.problem-progress'))
self.assertEqual("This is awesome.", self.courseware_page.get_elem_text('.problem-feedback'))
self.courseware_page.go_to_lti_container()
self.lti_iframe.submit_lti_answer("#submit-lti-delete-button")
self.courseware_page.visit()
self.assertEqual("(10.0 points possible)", self.courseware_page.get_elem_text('.problem-progress'))
self.assertFalse(self.courseware_page.is_lti_component_present('.problem-feedback'))
self.tab_nav.go_to_tab('Progress')
actual_scores = self.progress_page.scores("Test Chapter", "Test Section")
self.assertEqual(actual_scores, expected_scores)
self.assertEqual(['Overall Score', 'Overall Score\n0%'], self.progress_page.graph_overall_score())
self.tab_nav.go_to_tab('Instructor')
student_admin_section = self.instructor_dashboard_page.select_student_admin(StudentAdminPage)
student_admin_section.click_grade_book_link()
self.assertEqual("0", self.grade_book_page.get_value_in_the_grade_book('Homework 1 - Test Section', 1))
self.assertEqual("0", self.grade_book_page.get_value_in_the_grade_book('Total', 1))
def test_lti_hide_launch_shows_no_button(self):
"""
Scenario: LTI component that set to hide_launch and open_in_a_new_page shows no button
Given the course has correct LTI credentials with registered Instructor
the course has an LTI component with correct fields:
verify LTI component don't show launch button with text "LTI (External resource)"
"""
metadata_advance_settings = "correct_lti_id:test_client_key:test_client_secret"
metadata_lti_xblock = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://{}:{}/{}'.format(self.host, '8765', 'correct_lti_endpoint'),
'open_in_a_new_page': False,
'hide_launch': True
}
self.set_advance_settings(metadata_advance_settings)
self.create_lti_xblock(metadata_lti_xblock)
auto_auth(self.browser, self.USERNAME, self.EMAIL, True, self.course_id)
self.courseware_page.visit()
self.assertFalse(self.courseware_page.is_lti_component_present('.link_lti_new_window'))
self.assertEqual("LTI (External resource)", self.courseware_page.get_elem_text('.problem-header'))
def test_lti_hide_launch_shows_no_iframe(self):
"""
Scenario: LTI component that set to hide_launch and not open_in_a_new_page shows no iframe
Given the course has correct LTI credentials with registered Instructor
the course has an LTI component with correct fields:
verify LTI component don't show LTI iframe with text "LTI (External resource)"
"""
metadata_advance_settings = "correct_lti_id:test_client_key:test_client_secret"
metadata_lti_xblock = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://{}:{}/{}'.format(self.host, '8765', 'correct_lti_endpoint'),
'open_in_a_new_page': True,
'hide_launch': True
}
self.set_advance_settings(metadata_advance_settings)
self.create_lti_xblock(metadata_lti_xblock)
auto_auth(self.browser, self.USERNAME, self.EMAIL, True, self.course_id)
self.courseware_page.visit()
self.assertFalse(self.courseware_page.is_lti_component_present('.ltiLaunchFrame'))
self.assertEqual("LTI (External resource)", self.courseware_page.get_elem_text('.problem-header'))
def test_lti_button_text_correctly_displayed(self):
"""
Scenario: LTI component button text is correctly displayed
Given the course has correct LTI credentials with registered Instructor
the course has an LTI component with correct fields:
verify LTI component button with text "Launch Application"
"""
metadata_advance_settings = "correct_lti_id:test_client_key:test_client_secret"
metadata_lti_xblock = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://{}:{}/{}'.format(self.host, '8765', 'correct_lti_endpoint'),
'button_text': 'Launch Application'
}
self.set_advance_settings(metadata_advance_settings)
self.create_lti_xblock(metadata_lti_xblock)
auto_auth(self.browser, self.USERNAME, self.EMAIL, True, self.course_id)
self.courseware_page.visit()
self.assertEqual("Launch Application", self.courseware_page.get_elem_text('.link_lti_new_window'))
def test_lti_component_description_correctly_displayed(self):
"""
Scenario: LTI component description is correctly displayed
Given the course has correct LTI credentials with registered Instructor
the course has an LTI component with correct fields:
LTI component description with text "Application description"
"""
metadata_advance_settings = "correct_lti_id:test_client_key:test_client_secret"
metadata_lti_xblock = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://{}:{}/{}'.format(self.host, '8765', 'correct_lti_endpoint'),
'description': 'Application description'
}
self.set_advance_settings(metadata_advance_settings)
self.create_lti_xblock(metadata_lti_xblock)
auto_auth(self.browser, self.USERNAME, self.EMAIL, True, self.course_id)
self.courseware_page.visit()
self.assertEqual("Application description", self.courseware_page.get_elem_text('.lti-description'))
def set_advance_settings(self, metadata_advance_settings):
# Set value against advanced modules in advanced settings
self.course_fix.add_advanced_settings({
"advanced_modules": {"value": ["lti_consumer"]},
'lti_passports': {"value": [metadata_advance_settings]}
})
def create_lti_xblock(self, metadata_lti_xblock):
self.course_fix.add_children(
XBlockFixtureDesc(category='chapter', display_name='Test Chapter').add_children(
XBlockFixtureDesc(
category='sequential', display_name='Test Section', grader_type='Homework', graded=True
).add_children(
XBlockFixtureDesc(category='lti', display_name='LTI', metadata=metadata_lti_xblock).add_children(
)
)
)
).install()
|
ESOedX/edx-platform
|
common/test/acceptance/tests/lms/test_lms_lti.py
|
Python
|
agpl-3.0
| 22,185
|
[
"VisIt"
] |
e3bc1033dcde5f52f8dcc9dae16831c25862d6277582e49e8d036a33167cdfd6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.