text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""Test Axis device."""
from copy import deepcopy
from unittest import mock
import axis as axislib
from axis.api_discovery import URL as API_DISCOVERY_URL
from axis.applications import URL_LIST as APPLICATIONS_URL
from axis.applications.vmd4 import URL as VMD4_URL
from axis.basic_device_info import URL as BASIC_DEVICE_INFO_URL
from axis.event_stream import OPERATION_INITIALIZED
from axis.light_control import URL as LIGHT_CONTROL_URL
from axis.mqtt import URL_CLIENT as MQTT_CLIENT_URL
from axis.param_cgi import (
BRAND as BRAND_URL,
INPUT as INPUT_URL,
IOPORT as IOPORT_URL,
OUTPUT as OUTPUT_URL,
PROPERTIES as PROPERTIES_URL,
STREAM_PROFILES as STREAM_PROFILES_URL,
)
from axis.port_management import URL as PORT_MANAGEMENT_URL
import pytest
from homeassistant import config_entries
from homeassistant.components import axis
from homeassistant.components.axis.const import (
CONF_EVENTS,
CONF_MODEL,
DOMAIN as AXIS_DOMAIN,
)
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.config_entries import SOURCE_ZEROCONF
from homeassistant.const import (
CONF_HOST,
CONF_MAC,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
STATE_ON,
)
from tests.async_mock import AsyncMock, Mock, patch
from tests.common import MockConfigEntry, async_fire_mqtt_message
MAC = "00408C12345"
MODEL = "model"
NAME = "name"
ENTRY_OPTIONS = {CONF_EVENTS: True}
ENTRY_CONFIG = {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "root",
CONF_PASSWORD: "pass",
CONF_PORT: 80,
CONF_MAC: MAC,
CONF_MODEL: MODEL,
CONF_NAME: NAME,
}
API_DISCOVERY_RESPONSE = {
"method": "getApiList",
"apiVersion": "1.0",
"data": {
"apiList": [
{"id": "api-discovery", "version": "1.0", "name": "API Discovery Service"},
{"id": "param-cgi", "version": "1.0", "name": "Legacy Parameter Handling"},
]
},
}
API_DISCOVERY_BASIC_DEVICE_INFO = {
"id": "basic-device-info",
"version": "1.1",
"name": "Basic Device Information",
}
API_DISCOVERY_MQTT = {"id": "mqtt-client", "version": "1.0", "name": "MQTT Client API"}
API_DISCOVERY_PORT_MANAGEMENT = {
"id": "io-port-management",
"version": "1.0",
"name": "IO Port Management",
}
APPLICATIONS_LIST_RESPONSE = """<reply result="ok">
<application Name="vmd" NiceName="AXIS Video Motion Detection" Vendor="Axis Communications" Version="4.2-0" ApplicationID="143440" License="None" Status="Running" ConfigurationPage="local/vmd/config.html" VendorHomePage="http://www.axis.com" />
</reply>"""
BASIC_DEVICE_INFO_RESPONSE = {
"apiVersion": "1.1",
"data": {
"propertyList": {
"ProdNbr": "M1065-LW",
"ProdType": "Network Camera",
"SerialNumber": "00408C12345",
"Version": "9.80.1",
}
},
}
LIGHT_CONTROL_RESPONSE = {
"apiVersion": "1.1",
"method": "getLightInformation",
"data": {
"items": [
{
"lightID": "led0",
"lightType": "IR",
"enabled": True,
"synchronizeDayNightMode": True,
"lightState": False,
"automaticIntensityMode": False,
"automaticAngleOfIlluminationMode": False,
"nrOfLEDs": 1,
"error": False,
"errorInfo": "",
}
]
},
}
MQTT_CLIENT_RESPONSE = {
"apiVersion": "1.0",
"context": "some context",
"method": "getClientStatus",
"data": {"status": {"state": "active", "connectionStatus": "Connected"}},
}
PORT_MANAGEMENT_RESPONSE = {
"apiVersion": "1.0",
"method": "getPorts",
"data": {
"numberOfPorts": 1,
"items": [
{
"port": "0",
"configurable": False,
"usage": "",
"name": "PIR sensor",
"direction": "input",
"state": "open",
"normalState": "open",
}
],
},
}
VMD4_RESPONSE = {
"apiVersion": "1.4",
"method": "getConfiguration",
"context": "Axis library",
"data": {
"cameras": [{"id": 1, "rotation": 0, "active": True}],
"profiles": [
{"filters": [], "camera": 1, "triggers": [], "name": "Profile 1", "uid": 1}
],
},
}
BRAND_RESPONSE = """root.Brand.Brand=AXIS
root.Brand.ProdFullName=AXIS M1065-LW Network Camera
root.Brand.ProdNbr=M1065-LW
root.Brand.ProdShortName=AXIS M1065-LW
root.Brand.ProdType=Network Camera
root.Brand.ProdVariant=
root.Brand.WebURL=http://www.axis.com
"""
PORTS_RESPONSE = """root.Input.NbrOfInputs=1
root.IOPort.I0.Configurable=no
root.IOPort.I0.Direction=input
root.IOPort.I0.Input.Name=PIR sensor
root.IOPort.I0.Input.Trig=closed
root.Output.NbrOfOutputs=0
"""
PROPERTIES_RESPONSE = """root.Properties.API.HTTP.Version=3
root.Properties.API.Metadata.Metadata=yes
root.Properties.API.Metadata.Version=1.0
root.Properties.EmbeddedDevelopment.Version=2.16
root.Properties.Firmware.BuildDate=Feb 15 2019 09:42
root.Properties.Firmware.BuildNumber=26
root.Properties.Firmware.Version=9.10.1
root.Properties.Image.Format=jpeg,mjpeg,h264
root.Properties.Image.NbrOfViews=2
root.Properties.Image.Resolution=1920x1080,1280x960,1280x720,1024x768,1024x576,800x600,640x480,640x360,352x240,320x240
root.Properties.Image.Rotation=0,180
root.Properties.System.SerialNumber=00408C12345
"""
STREAM_PROFILES_RESPONSE = """root.StreamProfile.MaxGroups=26
root.StreamProfile.S0.Description=profile_1_description
root.StreamProfile.S0.Name=profile_1
root.StreamProfile.S0.Parameters=videocodec=h264
root.StreamProfile.S1.Description=profile_2_description
root.StreamProfile.S1.Name=profile_2
root.StreamProfile.S1.Parameters=videocodec=h265
"""
async def vapix_request(self, session, url, **kwargs):
"""Return data based on url."""
if API_DISCOVERY_URL in url:
return API_DISCOVERY_RESPONSE
if APPLICATIONS_URL in url:
return APPLICATIONS_LIST_RESPONSE
if BASIC_DEVICE_INFO_URL in url:
return BASIC_DEVICE_INFO_RESPONSE
if LIGHT_CONTROL_URL in url:
return LIGHT_CONTROL_RESPONSE
if MQTT_CLIENT_URL in url:
return MQTT_CLIENT_RESPONSE
if PORT_MANAGEMENT_URL in url:
return PORT_MANAGEMENT_RESPONSE
if VMD4_URL in url:
return VMD4_RESPONSE
if BRAND_URL in url:
return BRAND_RESPONSE
if IOPORT_URL in url or INPUT_URL in url or OUTPUT_URL in url:
return PORTS_RESPONSE
if PROPERTIES_URL in url:
return PROPERTIES_RESPONSE
if STREAM_PROFILES_URL in url:
return STREAM_PROFILES_RESPONSE
async def setup_axis_integration(hass, config=ENTRY_CONFIG, options=ENTRY_OPTIONS):
"""Create the Axis device."""
config_entry = MockConfigEntry(
domain=AXIS_DOMAIN,
data=deepcopy(config),
connection_class=config_entries.CONN_CLASS_LOCAL_PUSH,
options=deepcopy(options),
version=2,
)
config_entry.add_to_hass(hass)
with patch("axis.vapix.Vapix.request", new=vapix_request), patch(
"axis.rtsp.RTSPClient.start",
return_value=True,
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return config_entry
async def test_device_setup(hass):
"""Successful setup."""
with patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
) as forward_entry_setup:
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
assert device.api.vapix.firmware_version == "9.10.1"
assert device.api.vapix.product_number == "M1065-LW"
assert device.api.vapix.product_type == "Network Camera"
assert device.api.vapix.serial_number == "00408C12345"
entry = device.config_entry
assert len(forward_entry_setup.mock_calls) == 4
assert forward_entry_setup.mock_calls[0][1] == (entry, "binary_sensor")
assert forward_entry_setup.mock_calls[1][1] == (entry, "camera")
assert forward_entry_setup.mock_calls[2][1] == (entry, "light")
assert forward_entry_setup.mock_calls[3][1] == (entry, "switch")
assert device.host == ENTRY_CONFIG[CONF_HOST]
assert device.model == ENTRY_CONFIG[CONF_MODEL]
assert device.name == ENTRY_CONFIG[CONF_NAME]
assert device.serial == ENTRY_CONFIG[CONF_MAC]
async def test_device_info(hass):
"""Verify other path of device information works."""
api_discovery = deepcopy(API_DISCOVERY_RESPONSE)
api_discovery["data"]["apiList"].append(API_DISCOVERY_BASIC_DEVICE_INFO)
with patch.dict(API_DISCOVERY_RESPONSE, api_discovery):
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
assert device.api.vapix.firmware_version == "9.80.1"
assert device.api.vapix.product_number == "M1065-LW"
assert device.api.vapix.product_type == "Network Camera"
assert device.api.vapix.serial_number == "00408C12345"
async def test_device_support_mqtt(hass, mqtt_mock):
"""Successful setup."""
api_discovery = deepcopy(API_DISCOVERY_RESPONSE)
api_discovery["data"]["apiList"].append(API_DISCOVERY_MQTT)
with patch.dict(API_DISCOVERY_RESPONSE, api_discovery):
await setup_axis_integration(hass)
mqtt_mock.async_subscribe.assert_called_with(f"{MAC}/#", mock.ANY, 0, "utf-8")
topic = f"{MAC}/event/tns:onvif/Device/tns:axis/Sensor/PIR/$source/sensor/0"
message = b'{"timestamp": 1590258472044, "topic": "onvif:Device/axis:Sensor/PIR", "message": {"source": {"sensor": "0"}, "key": {}, "data": {"state": "1"}}}'
assert len(hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)) == 0
async_fire_mqtt_message(hass, topic, message)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)) == 1
pir = hass.states.get(f"{BINARY_SENSOR_DOMAIN}.{NAME}_pir_0")
assert pir.state == STATE_ON
assert pir.name == f"{NAME} PIR 0"
async def test_update_address(hass):
"""Test update address works."""
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
assert device.api.config.host == "1.2.3.4"
with patch("axis.vapix.Vapix.request", new=vapix_request), patch(
"homeassistant.components.axis.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.config_entries.flow.async_init(
AXIS_DOMAIN,
data={
"host": "2.3.4.5",
"port": 80,
"hostname": "name",
"properties": {"macaddress": MAC},
},
context={"source": SOURCE_ZEROCONF},
)
await hass.async_block_till_done()
assert device.api.config.host == "2.3.4.5"
assert len(mock_setup_entry.mock_calls) == 1
async def test_device_unavailable(hass):
"""Successful setup."""
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
device.async_connection_status_callback(status=False)
assert not device.available
async def test_device_reset(hass):
"""Successfully reset device."""
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
result = await device.async_reset()
assert result is True
async def test_device_not_accessible(hass):
"""Failed setup schedules a retry of setup."""
with patch.object(axis.device, "get_device", side_effect=axis.errors.CannotConnect):
await setup_axis_integration(hass)
assert hass.data[AXIS_DOMAIN] == {}
async def test_device_unknown_error(hass):
"""Unknown errors are handled."""
with patch.object(axis.device, "get_device", side_effect=Exception):
await setup_axis_integration(hass)
assert hass.data[AXIS_DOMAIN] == {}
async def test_new_event_sends_signal(hass):
"""Make sure that new event send signal."""
entry = Mock()
entry.data = ENTRY_CONFIG
axis_device = axis.device.AxisNetworkDevice(hass, entry)
with patch.object(axis.device, "async_dispatcher_send") as mock_dispatch_send:
axis_device.async_event_callback(action=OPERATION_INITIALIZED, event_id="event")
await hass.async_block_till_done()
assert len(mock_dispatch_send.mock_calls) == 1
assert len(mock_dispatch_send.mock_calls[0]) == 3
async def test_shutdown():
"""Successful shutdown."""
hass = Mock()
entry = Mock()
entry.data = ENTRY_CONFIG
axis_device = axis.device.AxisNetworkDevice(hass, entry)
axis_device.api = Mock()
axis_device.api.vapix.close = AsyncMock()
await axis_device.shutdown(None)
assert len(axis_device.api.stream.stop.mock_calls) == 1
assert len(axis_device.api.vapix.close.mock_calls) == 1
async def test_get_device_fails(hass):
"""Device unauthorized yields authentication required error."""
with patch(
"axis.vapix.Vapix.request", side_effect=axislib.Unauthorized
), pytest.raises(axis.errors.AuthenticationRequired):
await axis.device.get_device(hass, host="", port="", username="", password="")
async def test_get_device_device_unavailable(hass):
"""Device unavailable yields cannot connect error."""
with patch(
"axis.vapix.Vapix.request", side_effect=axislib.RequestError
), pytest.raises(axis.errors.CannotConnect):
await axis.device.get_device(hass, host="", port="", username="", password="")
async def test_get_device_unknown_error(hass):
"""Device yield unknown error."""
with patch(
"axis.vapix.Vapix.request", side_effect=axislib.AxisException
), pytest.raises(axis.errors.AuthenticationRequired):
await axis.device.get_device(hass, host="", port="", username="", password="")
|
tboyce021/home-assistant
|
tests/components/axis/test_device.py
|
Python
|
apache-2.0
| 14,005
|
[
"VMD"
] |
d8fefe279d98fd56c471f0fd4df8d21e68c3f45ee7d7c2a0c5b94d56732ae14b
|
#########
# ps_drone.py
# (w)+(c) J. Philipp de Graaff, www.playsheep.de, drone@playsheep.de, 2012-2014
# Project homepage: www.playsheep.de/drone and https://sourceforge.net/projects/ps-drone/
# Dependencies: a POSIX OS, openCV2 for video-support.
# Base-program of the PS-Drone API: "An open and enhanced API for universal control of the Parrot AR.Drone 2.0 quadcopter."
##########
# Modified and advanced version, based on a part of the master of computer science degree dissertation "Universelle
# Kontrolle und Ueberwachung einer Parrot AR.Drone 2.0 auf Basis eines offenen und erweiterten Toolkits"
# by J. Philipp de Graaff, faculty of computer science, Prof. Dr. Hedrich, at the University of Frankfurt / Germany
# Linked at http://www.em.cs.uni-frankfurt.de/index.php?id=43&L=1
# For further details, information, documentation or tutorials visit: www.playsheep.de/drone
##########
# LICENCE:
# Artistic License 2.0 as seen on http://opensource.org/licenses/artistic-license-2.0 (retrieved December 2014)
# If the terms of this license do not permit the full use that you propose to make of PS-Drone, please contact me for a
# different licensing arrangement.
# Visit www.playsheep.de/drone or see the PS-Drone-API-documentation for an abstract from the Artistic License 2.0.
##########
# Dedicated to my beloved wife.
###########
import threading, select, socket, time, tempfile, multiprocessing, struct, os, sys
import thread, signal, subprocess
if os.name == 'posix': import termios, fcntl # for getKey(), ToDo: Reprogram for Windows
commitsuicideV, showVid, vCruns, lockV, debugV = False, False, False, threading.Lock(), False # Global variables for video-decoding
offsetND, suicideND, commitsuicideND = 0, False, False # Global variables for NavDava-decoding
class Drone(object):
######################################=-
### Start and stop using the drone ###=-
######################################=-
###### Bootup and base configuration
def __init__(self):
self.__Version = "2.0.2"
self.__lock = threading.Lock() # To prevent semaphores
self.__startTime = time.time()
self.__speed = 0.2 # Default drone moving speed in percent.
self.showCommands = False # Shows all sent commands (but not the keepalives)
self.debug = False # Shows some additional debug information
self.valueCorrection = False
self.selfRotation = 0.0185 # use this value, if not checked by getSelfRotation()
self.stopOnComLoss = False # when there is a communication-problem, drone will land or not
# Drone communication variables
self.DroneIP = "192.168.1.1"
self.NavDataPort = 5554
self.VideoPort = 5555
self.CmdPort = 5556
self.CTLPort = 5559
# NavData variables
self.__NavData = ""
self.__State = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
self.__NavDataCount = 0
self.__NavDataTimeStamp = 0.0
self.__NavDataDecodingTime = 0.0
self.__NoNavData = False
# Video variables
self.__VideoImage = None
self.__VideoImageCount = 0
self.__VideoDecodeTimeStamp = 0
self.__VideoDecodeTime = 0
self.__VideoReady = False
self.__vKey = ""
self.__SaveVideo = False
# Config variables
self.__ConfigData = []
self.__ConfigDataCount = 0
self.__ConfigDataTimeStamp = 0
self.__ConfigSending = True
self.__ConfigSessionID = "03016321"
self.__ConfigUserID = "0a100407"
self.__ConfigApplicationID = "03016321"
self.sendConfigSaveMode = False
# Internal variables
self.__NavDataProcess = ""
self.__VideoProcess = ""
self.__vDecodeProcess = ""
self.__ConfigQueue = []
self.__networksuicide = False
self.__receiveDataRunning = False
self.__sendConfigRunning = False
self.__shutdown = False
self.__pDefaultStr = "\033[0m"
self.__pRedStr = "\033[91m"
self.__pGreenStr = "\033[92m"
self.__pYellowStr = "\033[93m"
self.__pBlueStr = "\033[94m"
self.__pPurpleStr = "\033[95m"
self.__pLineUpStr = "\033[1A"
###### Connect to the drone and start all procedures
def startup(self):
# Check for drone in the network and wake it up
try:
socket.socket().connect((self.DroneIP, 21))
socket.socket().close()
except:
self.printRed()
print "Drone is not online"
self.printDefault()
sys.exit(9)
# Internal variables
self.__CmdCounter = 3 # as there are two raw commands, send next steps
self.__calltime = 0 # to get some time-values to debug
#send the first four initial-commands to the drone
self.__sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Open network connection
self.__sock.setblocking(0) # Network should not block
self.__sendrawmsg("\r") # Wakes up command port
time.sleep(0.01)
self.__sendrawmsg("AT*PMODE=1,2\rAT*MISC=2,2,20,2000,3000\r") # Initialising drone as sniffed from datastream demo-tool to AR.Drone
##### Initialising timed thread(s) for drone communication
# Opening NavData- and Video- Processes
self.__VidPipePath = tempfile.gettempdir()+"/dronevid-"+str(threading.enumerate()[0])[-12:-2]+"-"+str(time.time())[-7:].replace(".","")+".h264"
self.__net_pipes = []
self.__NavData_pipe, navdataChild_pipe = multiprocessing.Pipe()
self.__Video_pipe, videoChild_pipe = multiprocessing.Pipe()
self.__vdecode_pipe, self.__vdecodeChild_pipe = multiprocessing.Pipe()
self.__NavDataProcess = multiprocessing.Process( target=mainloopND, args=(self.DroneIP,self.NavDataPort,navdataChild_pipe,os.getpid()))
self.__NavDataProcess.start()
self.__VideoProcess = multiprocessing.Process( target=mainloopV, args=(self.DroneIP,self.VideoPort,self.__VidPipePath,videoChild_pipe,os.getpid()))
self.__VideoProcess.start()
self.__vDecodeProcess = multiprocessing.Process( target=vDecode, args=(self.__VidPipePath,self.__vdecodeChild_pipe,os.getpid()))
# There is a third process called "self.__vDecodeProcess" for decoding video, initiated and started around line 880
# Final settings
self.useDemoMode(True) # This entry is necessary for the drone's firmware, otherwise the NavData contains just header and footer
self.setConfig("custom:session_id","-all")
self.getNDpackage(["demo"])
time.sleep(1)
#setup Network-thread
while not self.__receiveDataRunning or not self.__sendConfigRunning or len(self.__ConfigQueue): # sometimes they would not start why ever, so TK has to double-check
if not self.__receiveDataRunning:
self.__threadReceiveData=threading.Thread(target=self.__receiveData)
self.__threadReceiveData.start()
time.sleep(0.05)
if not self.__sendConfigRunning:
self.__threadSendConfig=threading.Thread(target=self.__sendConfig)
self.__threadSendConfig.start()
time.sleep(0.05)
time.sleep(0.01)
###### Clean Shutdown
def shutdown(self):
if self.__shutdown: sys.exit()
self.__shutdown = True
if self.debug: print "Shutdown..."
self.land()
self.thrust(0,0,0,0)
try: self.__NavData_pipe.send("die!")
except: pass
self.__Video_pipe.send("uninit")
t=time.time()
while self.__VideoReady and (time.time()-t)<5: time.sleep(0.1)
try: self.__Video_pipe.send("die!")
except: pass
time.sleep(0.5)
try: self.__VideoProcess.terminate()
except: pass
try: self.__vDecodeProcess.terminate()
except: pass
try: self.__NavDataProcess.terminate()
except: pass
self.__stopnetwork()
try: self.__threadSendConfig.join()
except: pass
try: self.__threadReceiveData.join()
except: pass
self.__keepalive.cancel()
sys.exit()
##############################################################=-
### Make internal variables to external read-only variables ###=-
##############################################################=-
@property
def Version(self): return self.__Version
@property
def startTime(self): return self.__startTime
@property
def speed(self): return self.__speed
@property
def NavData(self): return self.__NavData
@property
def State(self): return self.__State
@property
def NavDataCount(self): return self.__NavDataCount
@property
def NavDataTimeStamp(self): return self.__NavDataTimeStamp
@property
def NavDataDecodingTime(self): return self.__NavDataDecodingTime
@property
def NoNavData(self): return self.__NoNavData
@property
def VideoImage(self): return self.__VideoImage
@property
def VideoImageCount(self): return self.__VideoImageCount
@property
def VideoDecodeTimeStamp(self): return self.__VideoDecodeTimeStamp
@property
def VideoDecodeTime(self): return self.__VideoDecodeTime
@property
def VideoReady(self): return self.__VideoReady
@property
def SaveVideo(self): return self.__SaveVideo
@property
def ConfigData(self): return self.__ConfigData
@property
def ConfigDataCount(self): return self.__ConfigDataCount
@property
def ConfigDataTimeStamp(self): return self.__ConfigDataTimeStamp
@property
def ConfigSending(self): return self.__ConfigSending
@property
def ConfigSessionID(self): return self.__ConfigSessionID
@property
def ConfigUserID(self): return self.__ConfigUserID
@property
def ConfigApplicationID(self): return self.__ConfigApplicationID
######################=-
### Drone commands ###=-
######################=-
###### Commands for configuration
# change some value
def setConfig(self, name, value): # e.g. drone.setConfig(control:altitude_max","5000")
self.__ConfigQueue.append([str(name), str(value), False]) # Note: changes are not immediately and could take some time
# change some value and send the configuration Identifier (sendConfigIDs) ahead
def setMConfig(self, name, value): # Usage like setConfig
self.__ConfigQueue.append([str(name), str(value), True]) # Note: changes are not immediately and could take some time
# get actual configuration
def getConfig(self): # Stored in "ConfigData"
self.at("CTRL", [5,0]) # Wow, that is new, was not necessary before
self.at("CTRL", [4,0]) # Note: Actual configuration data will be received after setting...
if self.showCommands: self.__calltime = time.time() # ... automatically. An update will take up to 0.015 sec)
# setting IDs to store Konfigurations for later
def setConfigSessionID(self, *args):
try:
value = float(*args[0])
self.__ConfigSessionID = normalLen8(value)
self.setConfig("custom:session_id", self.__ConfigSessionID)
except: return (self.__ConfigSessionID)
def setConfigUserID(self, *args):
try:
value = float(*args[0])
self.__ConfigUserID = normalLen8(value)
self.setConfig("custom:profile_id", self.__ConfigUserID)
except: return (self.__ConfigUserID)
def setConfigApplicationID(self, *args):
try:
value = float(*args[0])
self.__ConfigApplicationID = normalLen8(value)
self.setConfig("custom:application_id", self.__ConfigApplicationID)
except: return (self.__ConfigApplicationID)
def setConfigAllID(self):
self.setConfig("custom:session_id", self.__ConfigSessionID)
self.setConfig("custom:profile_id", self.__ConfigUserID)
self.setConfig("custom:application_id", self.__ConfigApplicationID)
# Reminds the drone which IDs it has to use (important for e.g. switch cameras)
def sendConfigIDs(self):
self.at("CONFIG_IDS", [self.__ConfigSessionID,self.__ConfigUserID,self.__ConfigApplicationID])
###### Calibration
def trim(self):
self.at("FTRIM", [])
def mtrim(self):
self.at("CALIB", [0])
def mantrim(self, thetaAngle, phiAngle, yawAngle): # manual Trim
if self.valueCorrection:
try: thetaAngle = float(thetaAngle)
except: thetaAngle = 0.0
try: phiAngle = float(phiAngle)
except: phiAngle = 0.0
try: yawAngle = float(yawAngle)
except: yawAngle = 0.0
self.at("MTRIM", [thetaAngle,phiAngle,yawAngle]) # floats
def getSelfRotation(self, wait):
if self.valueCorrection:
try: wait = float(wait)
except: wait = 1.0
reftime = time.time()
oangle = self.__NavData["demo"][2][2] # detects the self-rotation-speed of the yaw-sensor
time.sleep(wait)
self.selfRotation = (self.__NavData["demo"][2][2]-oangle)/(time.time()-reftime)
return self.selfRotation
###### Movement
# Default speed of movement
def setSpeed(self, *speed):
try: self.__speed = self.__checkSpeedValue(*speed)
except: pass
return self.__speed
# Absolute movement in x, y and z-direction and rotation
def move(self, leftright, backwardforward, downup, turnleftright): # Absolute movement in x, y and z-direction and rotation
if self.valueCorrection:
try: leftright = float(leftright)
except: leftright = 0.0
try: backwardforward = float(backwardforward)
except: backwardforward = 0.0
try: downup = float(downup)
except: downup = 0.0
try: turnleftright = float(turnleftright)
except: turnleftright = 0.0
if leftright > 1.0: leftright = 1.0
if leftright < -1.0: leftright = -1.0
if backwardforward > 1.0: backwardforward = 1.0
if backwardforward < -1.0: backwardforward = -1.0
if downup > 1.0: downup = 1.0
if downup < -1.0: downup = -1.0
if turnleftright > 1.0: turnleftright = 1.0
if turnleftright < -1.0: turnleftright = -1.0
self.at("PCMD", [3 ,leftright, -backwardforward, downup, turnleftright])
# Relative movement to controller in x, y and z-direction and rotation
def relMove(self, leftright, backwardforward, downup, turnleftright, eastwest, northturnawayaccuracy):
if self.valueCorrection:
try: leftright = float(leftright)
except: leftright = 0.0
try: backwardforward = float(backwardforward)
except: backwardforward = 0.0
try: downup = float(downup)
except: downup = 0.0
try: turnleftright = float(turnleftright)
except: turnleftright = 0.0
if leftright > 1.0: leftright = 1.0
if leftright < -1.0: leftright = -1.0
if backwardforward > 1.0: backwardforward = 1.0
if backwardforward < -1.0: backwardforward = -1.0
if downup > 1.0: downup = 1.0
if downup < -1.0: downup = -1.0
if turnleftright > 1.0: turnleftright = 1.0
if turnleftright < -1.0: turnleftright = -1.0
self.at("PCMD_MAG", [1 ,leftright, -backwardforward, downup, turnleftright, eastwest, northturnawayaccuracy])
# Stop moving
def hover(self):
self.at("PCMD", [0,0.0,0.0,0.0,0.0])
def stop(self): # Hammertime !
self.hover()
# Basic movements
def moveLeft(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move(-self.__checkSpeedValue(speed),0.0,0.0,0.0)
def moveRight(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move( self.__checkSpeedValue(speed),0.0,0.0,0.0)
def moveForward(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move(0.0, self.__checkSpeedValue(speed),0.0,0.0)
def moveBackward(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move(0.0,-self.__checkSpeedValue(speed),0.0,0.0)
def moveUp(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move(0.0,0.0, self.__checkSpeedValue(speed),0.0)
def moveDown(self,args):
try: speed=args[0]
except: speed=self.__speed
self.move(0.0,0.0,-self.__checkSpeedValue(speed),0.0)
def turnLeft(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move(0.0,0.0,0.0,-self.__checkSpeedValue(speed))
def turnRight(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move(0.0,0.0,0.0, self.__checkSpeedValue(speed))
# Lets the drone rotate defined angle
# BUG: does not work with 180deg. turns
# ToDo: Should be able to stop in case of failures
def turnAngle(self,ndir,speed,*args):
opos = self.__NavData["demo"][2][2] # get the source/current (original) angle
npos = opos+ndir # calculate the destination (new) angle
minaxis = opos # to make sure, that the jump from -180 to 180 will...
maxaxis = opos # ...be correctly handled
speed = self.__checkSpeedValue(speed)
ospeed = speed # stores the given speed-value
reftime = time.time()
accurateness = 0
try: accurateness = args[0]
except: pass
if accurateness<=0:
accurateness = 0.005 # Destination angle can differ +/- this value (not demo-mode)
if self.__State[10]: accurateness = 0.1 # Destination angle can differ +/- this value in demo-mode
stop = False
while not stop:
ndc = self.__NavDataCount # wait for the next NavData-package
while ndc == self.__NavDataCount: time.sleep(0.001)
kalib = (time.time()-reftime)*self.selfRotation # trys to recalibrate, causing moving sensor-values around 0.0185 deg/sec
cpos = self.__NavData["demo"][2][2] # get the current angle
if minaxis > cpos: minaxis = cpos # set the minimal seen angle
if maxaxis < cpos: maxaxis = cpos # set the maximal seen angle
if cpos-minaxis >= 180: cpos = cpos-360 # correct the angle-value if necessary...
elif maxaxis-cpos >= 180: cpos = cpos+360 # ...for an easier calculation
speed = abs(cpos-npos+kalib) / 10.0 # the closer to the destination the slower the drone turns
if speed > ospeed: speed = ospeed # do not turn faster than recommended
if speed < 0.05: speed = 0.05 # too slow turns causes complications with calibration
self.__speed = speed
if cpos > (npos+kalib): self.turnLeft() # turn left, if destination angle is lower
else: self.turnRight() # turn right if destination angle is higher
if cpos < (npos+kalib+accurateness) and cpos > (npos+kalib-accurateness):# if angle is reached...
self.stop() # ...stop turning
time.sleep(0.01)
stop = True
return(True)
def takeoff(self):
self.at("REF", [290718208]) #290718208=10001010101000000001000000000
def land(self):
self.at("REF", [290717696]) #290717696=10001010101000000000000000000
###### NavData commands
# Switches to Demo- or Full-NavData-mode
def useDemoMode(self,value):
if value: self.setConfig("general:navdata_demo", "TRUE")
else: self.setConfig("general:navdata_demo", "FALSE")
def useMDemoMode(self,value):
if value: self.setMConfig("general:navdata_demo", "TRUE")
else: self.setMConfig("general:navdata_demo", "FALSE")
def getNDpackage(self,packets):
self.__NavData_pipe.send(("send",packets))
def addNDpackage(self,packets):
self.__NavData_pipe.send(("add",packets))
def delNDpackage(self,packets):
self.__NavData_pipe.send(("block",packets))
def reconnectNavData(self):
self.__NavData_pipe.send("reconnect")
###### Video & Marker commands
# This makes the drone fly around and follow 2D tags which the camera is able to detect.
def aflight(self, flag):
self.at("AFLIGHT", [flag]) #Integer: 1: start flight, 0: stop flight
def slowVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.__Video_pipe.send("slowVideo")
else: self.__Video_pipe.send("fastVideo")
def midVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.__Video_pipe.send("midVideo")
else: self.__Video_pipe.send("fastVideo")
def fastVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.__Video_pipe.send("fastVideo")
else: self.__Video_pipe.send("slowVideo")
def saveVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.__Video_pipe.send("saveVideo")
else: self.__Video_pipe.send("unsaveVideo")
def startVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.__Video_pipe.send("init")
else: self.stopVideo()
def stopVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.__Video_pipe.send("uninit")
else: self.startVideo()
def showVideo(self, *args):
try: do = args[0]
except: do = True
if do:
self.__Video_pipe.send("init")
self.__Video_pipe.send("show")
else: self.hideVideo()
def hideVideo(self, *args):
try: do = args[0]
except: do = True
if do:
self.__Video_pipe.send("init")
self.__Video_pipe.send("hide")
else: self.showVideo()
# Selects which video stream to send on the video UDP port.
def hdVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.setMConfig("video:video_codec","131")
else: self.setMConfig("video:video_codec","129")
def sdVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.setMConfig("video:video_codec","129")
else: self.setMConfig("video:video_codec","131")
def mp4Video(self, *args):
try: do = args[0]
except: do = True
if do: self.setMConfig("video:video_codec","128")
else: self.setMConfig("video:video_codec","129")
# Selects which video-framerate (in frames per second) to send on the video UDP port.
def videoFPS(self, fps):
try:
int(fps)
if fps>60: fps = 60
elif fps<1: fps = 1
self.setMConfig("video:codec_fps",fps)
except: pass
# Selects which video-bitrate (in kilobit per second) to send on the video UDP port.
def videoBitrate(self, bitrate):
try:
int(bitrate)
if bitrate > 20000: bitrate = 20000
if bitrate < 250: bitrate = 250
self.setMConfig("video:bitrate",bitrate)
except: pass
# Selects which video stream to send on the video UDP port.
def frontCam(self, *args):
try: do = args[0]
except: do = True
if do: self.setMConfig("video:video_channel","0")
else: self.setMConfig("video:video_channel","1")
def groundCam(self, *args):
try: do = args[0]
except: do = True
if do: self.setMConfig("video:video_channel","1")
else: self.setMConfig("video:video_channel","0")
### Misc commands
def reset(self):
if self.NavDataCount>0 and self.State[31]==1:
self.at("REF", [290717952]) #290717952=10001010101000000000100000000
def thrust(self, fl, fr, rl, rr): # Controls engines directly, overriding control loops.
fl *= 2
if fl > 64000: fl = 64000
elif fl < 0: fl = 0
fr *= 2
if fr > 64000: fr = 64000
elif fr < 0: fr = 0
rl *= 2
if rl > 64000: rl = 64000
elif rl < 0: rl = 0
rr *= 2
if rr > 64000: rr = 64000
elif rr < 0: rr = 0
self.at("PWM", [int(fl), int(fr), int(rr), int(rl)])
# Seems that integer-values could be between 0 (stop) to 511 (full); more than 511 seem to have no effect.
# Beware: if using too high values (e.g. floats (>64k ?)), there will be side-effects like restarting other motors, etc.
# Drone will shut down, if its flight-angle is more than set.
# Control the drone's LED.
def led(self, animation, frequency, duration):
if animation < 21 and frequency > 0 and duration >= 0:
self.at("LED", [animation, float(frequency), duration])
# Makes the drone execute a predefined movement (animation).
def anim(self, animation, duration):
if animation < 20 and duration >= 0:
self.at("ANIM", [animation, duration])
#########################=-
### Low-level Commands ###=-
#########################=-
# Upgrading the basic drone commands to low-level drone commands:vid
# Adding command-number, checking the values, convert 32-bit float to 32-bit integer and put it in quotes
def at(self, command, params):
self.__lock.acquire()
paramLn = ""
if params:
for p in params:
if type(p) == int: paramLn += ","+str(p)
elif type(p) == float: paramLn += ","+str(struct.unpack("i", struct.pack("f", p))[0])
elif type(p) == str: paramLn += ",\""+p+"\""
msg = "AT*"+command+"="+str(self.__CmdCounter)+paramLn+"\r"
self.__CmdCounter += 1
self.__sendrawmsg(msg)
self.__lock.release()
# Sending the low-level drone-readable commands to the drone...better do not use
def __sendrawmsg(self, msg):
try: self.__keepalive.cancel()
except: pass
if self.showCommands:
if msg.count("COMWDG") < 1: print msg
self.__sock.sendto(msg, (self.DroneIP, self.CmdPort))
self.__keepalive = threading.Timer(0.1, self.__heartbeat)
self.__keepalive.start()
#############################=-
### Convenient Commands ###=-
#############################=-
# Just add water
# Checks the battery-status
def getBattery(self):
batStatus = "OK"
batValue = 0
if self.__State[15] == 1: batStatus = "empty"
try: batValue = self.__NavData['demo'][1]
except: batValue = -1
return (batValue,batStatus) # Percent & status ("OK", "empty")
# Calculates the minor difference between two angles as the drone gives values from -180 to 180...
# ...so e.g. 170 and -160 are +30 difference and drone will turn to the correct direction
def angleDiff(self, base, value):
adiff = ((base+180)-(value+180)) %360
if adiff>180: adiff-=360
return adiff
# Grabs the pressed key (not yet for Windows)
# ToDo: Reprogram for Windows
def getKey(self):
key = ""
fd = sys.stdin.fileno()
if os.name == 'posix':
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
try:
try: key = sys.stdin.read(1)
except IOError: pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
if os.name == 'nt':
if msvcrt.kbhit(): key = msvcrt.getch()
key += self.__vKey
# self.__vKey = ""
return key
# Drone hops like an excited dog
def doggyHop(self):
ospeed = self.__speed
self.__speed = 1
for i in range (0,4,1):
self.moveUp()
time.sleep(0.20)
self.moveDown()
time.sleep(0.20)
self.hover()
self.__speed = ospeed
# Drone wags like a happy dog
def doggyWag(self):
ospeed = self.__speed
self.__speed = 1
for i in range (0,4,1):
self.moveLeft()
time.sleep(0.25)
self.moveRight()
time.sleep(0.25)
self.hover()
self.__speed = ospeed
# Drone nods
def doggyNod(self):
ospeed = self.__speed
self.__speed = 1
for i in range (0,4,1):
self.moveForward()
time.sleep(0.25)
self.moveBackward()
time.sleep(0.25)
self.hover()
self.__speed = ospeed
def printDefault(self, *args):
if os.name == 'posix':
print self.__pDefaultStr,
try:
if len(*args) > 0:
for i in args: print i,
print self.__pDefaultStr
except: pass
def printRed(self, *args):
if os.name == 'posix':
print self.__pRedStr,
try:
if len(*args) > 0:
for i in args: print i,
print self.__pDefaultStr
except: pass
def printGreen(self, *args):
if os.name == 'posix':
print self.__pGreenStr,
try:
if len(*args) > 0:
for i in args: print i,
print self.__pDefaultStr
except: pass
def printYellow(self, *args):
if os.name == 'posix':
print self.__pYellowStr,
try:
if len(*args) > 0:
for i in args: print i,
print self.__pDefaultStr
except: pass
def printBlue(self, *args):
if os.name == 'posix':
print self.__pBlueStr,
try:
if len(*args) > 0:
for i in args: print i,
print self.__pDefaultStr
except: pass
def printPurple(self, *args):
if os.name == 'posix':
print self.__pPurpleStr,
try:
if len(*args) > 0:
for i in args: print i,
print self.__pDefaultStr
except: pass
def printLineUp(self):
if os.name == 'posix': print self.__pLineUpStr,
##################################=-
### Threads & Thread-Sidekicks ###=-
##################################=-
# Idea: the network thread listens to the given network-stream and communication-pipes of other processes, such as for video or navdata-decoding.
# In case the connection to the drone is cut off for more than 2 seconds (so no keep-alive-command has been sent) the network
# needs to reconnect. In order to do so the (private) function "__netrecon" starts after 0.1 seconds of no incoming navdata-datapacket to
# reconnect all given network-sockets.
def __heartbeat(self):
# If the drone does not get a command, it will mutter after 50ms (CTRL watchdog / state[28] will set to 1)
# and panic after 2 seconds and abort data-communication on port 5554 (then you have to initialize the network again).
# Heartbeat will reset the watchdog and, by the way, the ACK_BIT (state[6], to accept any other AT*CONFIG command)
# If mainthread isn't alive anymore (because program crashed or whatever), heartbeat will initiate the shutdown.
if str(threading.enumerate()).count("MainThread, stopped") or str(threading.enumerate()).count("MainThread")==0: self.shutdown()
else: self.at("COMWDG",[])
# CheckAndReact is periodically called by the receiveData-Thread to check for mainly for critical status-error(s) and
# changed debug-modes.
def __checkAndReact(self, debug, showCommands):
# Automatic process-commands, used for syncing debugging-bits to child-processes
if debug != self.debug:
debug = self.debug
if debug:
self.__NavData_pipe.send("debug")
self.__Video_pipe.send("debug")
else:
self.__NavData_pipe.send("undebug")
self.__Video_pipe.send("undebug")
if showCommands != self.showCommands:
showCommands = self.showCommands
if showCommands:
self.__NavData_pipe.send("showCommands")
self.__Video_pipe.send("showCommands")
else:
self.__NavData_pipe.send("hideCommands")
self.__Video_pipe.send("hideCommands")
# Communication problem, shutting down
if self.stopOnComLoss and self.__State[30]:
self.shutdown()
sys.exit()
return (debug,showCommands)
# Thread for sending the configuration. It is asynchronous but save.
# The configuration-requests are in a queue, the first entry is sent. NavData will contain a "Control command ACK" status-bit,...
# ...that configuration is ready to be set. This will be confirmed and the procedure waits until this bit is 0 again; then the next entry will be processed.
# In savemode, there is a check whether the configuration has been changed correctly by requesting the current/latest configuration and double-checking this value.
def __sendConfig(self):
sleeptime, getconfigtag, self.__sendConfigRunning = 0.001, False, True
while not self.__networksuicide:
if len(self.__ConfigQueue): # If there is something in the queue...
if self.__ConfigQueue[0][-1]: self.sendConfigIDs() # ...check for multiuserconfig-request (and send it)
self.__ConfigSending = True # Set tag, to show sending is in process
qlen = len(self.__ConfigQueue)
if qlen > 1: # Testing for double entries, preventing a ping-pong in save-mode
i = 1
while True:
if i >= qlen: break
if self.__ConfigQueue[0][0].lower() == self.__ConfigQueue[i][0].lower():
self.__ConfigQueue.remove(self.__ConfigQueue[0])# Delete double entries
qlen = len(self.__ConfigQueue)
else: i+=1
self.at("CONFIG",self.__ConfigQueue[0][:-1]) # Send the first entry in queue
getconfigtag, configconfirmed, configreconfirmed = False, False, False
while not configconfirmed and not self.__networksuicide: # Wait for confirmation-bit from drone...
if self.__State[6] and not configreconfirmed and not self.__networksuicide:
self.at("CTRL",[5,0]) # ...and send reset the confirmation-bit
configreconfirmed = True
if not self.__State[6] and configreconfirmed and not self.__networksuicide:
configconfirmed = True # Wait for the reset of the confirmation-bit
time.sleep(sleeptime)
# It seems that the drone stores configurations not always correctly; therfore, here is a save-mode:
if self.sendConfigSaveMode and not self.__networksuicide:
lastConfigDataCount = self.__ConfigDataCount # Wait for the next configuration-list
self.getConfig()
while lastConfigDataCount == self.__ConfigDataCount and not self.__networksuicide: time.sleep(sleeptime)
# New & Optimized
for i in range (0,len(self.__ConfigData),1):
if self.__ConfigData[i][0].find(self.__ConfigQueue[0][0]) > -1:
if self.__ConfigData[i][1] != self.__ConfigQueue[0][1]:
if self.debug:
print " Configuration missmatched, resending !"
print " "+self.__ConfigData[i][0]+" should be \""+self.__ConfigQueue[0][1]+"\" is \""+self.__ConfigData[i][1]+"\""
self.__ConfigQueue.append(self.__ConfigQueue[0]) # If value is not correctly set, requeue !
self.__ConfigQueue.remove(self.__ConfigQueue[0]) # Configuration has been (correctly) set, delete request from queue and go on
if self.__networksuicide: self.__ConfigQueue=[]
if not len(self.__ConfigQueue):
if not getconfigtag:
self.getConfig()
getconfigtag = True
self.__ConfigSending = False
else: time.sleep(sleeptime)
if self.debug: print "sendConfig-Tread : committed suicide"
def __receiveData(self):
self.__net_pipes=[]
self.__net_pipes.append(self.__NavData_pipe)
self.__net_pipes.append(self.__Video_pipe)
self.__Config_pipe = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #TCP
self.__Config_pipe.setblocking(0)
self.__Config_pipe.connect_ex((self.DroneIP, self.CTLPort))
self.__net_pipes.append(self.__Config_pipe)
VideoIsDead, configdata, cfgdata, cmd = False, [], "", ""
self.__vDecodeRunning, debug, showCommands, self.__receiveDataRunning = False, False, False, True
while not self.__networksuicide:
in_pipe, dummy1, dummy2 = select.select(self.__net_pipes, [], [], 0.1) # When something is in a pipe...
for ip in in_pipe: # ...go and get it
if ip == self.__NavData_pipe: ### Receiving sensor-values from NavData-process
self.__NavData, self.__State, self.__NavDataCount, self.__NavDataTimeStamp, self.__NavDataDecodingTime, self.__NoNavData = self.__NavData_pipe.recv()
if ip == self.__vdecode_pipe: ### Receiving imagedata and feedback from videodecode-process
cmd, VideoImageCount, VideoImage, VideoDecodeTime = self.__vdecode_pipe.recv() # Imagedata
if self.showCommands and cmd!="Image" : print "** vDec -> Com :",cmd
if cmd == "suicided": self.__Video_pipe.send("vd died") # videodecode-process died
if cmd == "foundCodec": self.__Video_pipe.send("foundCodec") # the codec of the videostream has been found, do not flood anymore
if cmd == "VideoUp": self.__VideoReady = True # Imagedata is available
if cmd == "keypressed": self.__vKey = VideoImage # Pressed key on window
if cmd == "reset": self.__Video_pipe.send(cmd) # proxy to videodecode-process
if cmd == "Image": # Imagedata !
self.__VideoImageCount = VideoImageCount
self.__VideoImage = VideoImage
self.__VideoDecodeTime = VideoDecodeTime
self.__VideoDecodeTimeStamp = time.time()-self.__startTime
if ip == self.__Video_pipe: ### Receiving feedback from videostream-process
cmd = self.__Video_pipe.recv()
if self.showCommands and cmd != "": print "** Vid -> Com : ",cmd
if cmd == "vDecProc": # videodecode-process should start
if not self.__vDecodeRunning:
self.__vDecodeProcess = multiprocessing.Process( target=vDecode, args=(self.__VidPipePath,self.__vdecodeChild_pipe,os.getpid()))
#self.__vDecodeProcess.start()
self.__net_pipes.append(self.__vdecode_pipe)
self.__vDecodeRunning = True
self.__Video_pipe.send("vDecProcON")
# else: self.__vdecode_pipe.send(cmd) # If / elif / else is somehow not working here...whyever
if cmd == "VideoDown": self.__VideoReady=False # videodecode-process stopped
if cmd == "saveVideo": self.__SaveVideo=True # no preprocessing of the video
if cmd == "unsaveVideo": self.__SaveVideo=False # preprocessing activated again
if cmd == "debug": self.__vdecode_pipe.send(cmd) # proxy to videodecode-process
if cmd == "showCommands": self.__vdecode_pipe.send(cmd) # proxy to videodecode-process
if cmd == "hideCommands": self.__vdecode_pipe.send(cmd) # proxy to videodecode-process
if cmd == "show": self.__vdecode_pipe.send(cmd) # proxy to videodecode-process
if cmd == "hide": self.__vdecode_pipe.send(cmd) # proxy to videodecode-process
if cmd == "vDecProcKill":
self.__vdecode_pipe.send("die!") # videodecode-process should switch off
vDecodeRunning = False
if ip==self.__Config_pipe and not self.__networksuicide: ### Receiving drone-configuration
try:
if self.__networksuicide: break # Does not stop sometimes, so the loop will be forced to stop
cfgdata = cfgdata+self.__Config_pipe.recv(65535) # Data comes in two or three packages
if cfgdata.count("\x00"): # Last byte of sent config-file, everything was received
if self.__networksuicide: break
configdata = (cfgdata.split("\n")) # Split the huge package into a configuration-list
for i in range(0, len(configdata), 1):
configdata[i] = configdata[i].split(" = ") # Split the single configuration-lines into configuration and value
self.__ConfigData = configdata[:-1] # Last value is "\x00"
self.__ConfigDataTimeStamp = time.time()-self.__startTime # Set a timestamp for a better coordination
self.__ConfigDataCount+=1 # Alters the count of received Configdata for a better coordination
configdata, cfgdata = [], ""
if self.showCommands: print "Got "+str(len(self.__ConfigData))+" Configdata "+str(time.time()-self.__calltime)
self.__calltime=0
except IOError: pass
debug, showCommands = self.__checkAndReact(debug, showCommands) # Check for errors and things to change
if self.debug: print "receiveData-Thread : committed suicide"
def __stopnetwork(self):
self.__networksuicide = True
#############################=-
### Compatibility Commands ###=-
#############################=-
# While programming this API I changed some command-names
# This section converts the old commands into the new ones
def pwm(self, fl, fr, rl, rr): # Controls engines directly, overriding control loops.
if fl > 64000: fl = 64000
if fr > 64000: fr = 64000
if rl > 64000: rl = 64000
if rr > 64000: rr = 64000
self.at("PWM", [int(fl), int(fr), int(rr), int(rl)])
def groundVideo(self, *args): self.groundCam(*args)
def frontVideo(self, *args): self.frontCam(*args)
###############################################################################
### Internal Subfunctions
###############################################################################
def __checkSpeedValue(self,value):
try:
speed = float(value)
if self.valueCorrection:
speed = max(-1.0,speed)
speed = min( 1.0,speed)
except: speed = self.__speed
return speed
# Checks the inputs for the right length
def normalLen8(value):
value, zero = str(value), "00000000"
vlen = min(len(value),8)
normal = zero[0:8-vlen] + value[0:8]
return normal[0:8].lower()
##################################################################################################
###### Receive and Decode Video ######
##################################################################################################
# If the ps_drone-process has crashed, recognize it and kill yourself
def watchdogV(parentPID, ownPID):
global commitsuicideV
while not commitsuicideV:
time.sleep(1)
try : os.getpgid(parentPID)
except:
try: subprocess.Popen(["kill",str(os.getpid())],stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
except: pass
# Thread to capture, decode and display the video-stream
def vCapture(VidPipePath, parent_pipe):
import cv2
global vCruns, commitsuicideV, showVid, lockV, debugV
# cv2.startWindowThread()
show = False
hide = True
vCruns = True
t = time.time()
parent_pipe.send(("VideoUp",0,0,0))
capture = cv2.VideoCapture(VidPipePath)
ImgCount = 0
if debugV: print "CAPTURE: "+str(time.time()-t)
time.sleep(0.1)
parent_pipe.send(("foundCodec",0,0,0))
declag = time.time()
count = -3
imageXsize = 0
imageYsize = 0
windowName = "PS-Drone"
codecOK = False
lastKey = ""
cc=0
while not commitsuicideV:
decTimeRev = time.time()
receiveWatchdog = threading.Timer(2.0, VideoReceiveWatchdog, [parent_pipe,"vCapture", debugV]) # Resets video if something hangs
receiveWatchdog.start()
success, image = capture.read()
cc+=1
receiveWatchdog.cancel()
decTime = decTimeRev-time.time()
tlag = time.time()-declag
if not codecOK and success:
if image.shape[:2]==(360,640) or image.shape[:2]==(368,640) or image.shape[:2]==(720,1280) or image.shape[:2]==(1080,1920):
codecOK = True
if debugV: print "Codec seems OK"
else:
if debugV: print "Codec failure"
parent_pipe.send(("reset",0,0,0))
commitsuicideV = True
if codecOK:
if not (imageXsize == image.shape[1]) or not (imageYsize == image.shape[0]):
cv2.destroyAllWindows()
imageYsize, imageXsize = image.shape[:2]
windowName = "PS-Drone - "+str(imageXsize)+"x"+str(imageYsize)
if success:
if tlag > 0.02: count+=1
if count > 0:
ImgCount+=1
if not show and not hide:
cv2.destroyAllWindows()
hide = True
if show:
cv2.imshow(windowName, image)
key=cv2.waitKey(1)
if key>-1: parent_pipe.send(("keypressed",0,chr(key%256),0))
parent_pipe.send(("Image",ImgCount,image,decTime))
else: time.sleep(0.01)
declag = time.time()
if showVid:
if not show:
show=True
cv2.destroyAllWindows()
else:
if show:
show=False
cv2.destroyAllWindows()
vCruns = False
cv2.destroyAllWindows()
capture.release()
if debugV: print "vCapture-Thread : committed suicide"
### Process to decode the videostream in the FIFO-Pipe, stored there from main-loop.
# Storing and decoding must not be processed in the same process, thats why decoding is external.
# vDecode controls the vCapture-thread which captures and decodes finally the videostream.
def vDecode(VidPipePath, parent_pipe, parentPID):
global vCruns, commitsuicideV, showVid, lockV, debugV
showCommands = False
Thread_vCapture = threading.Thread(target=vCapture, args=(VidPipePath,parent_pipe))
Thread_vCapture.start()
Thread_watchdogV = threading.Thread(target=watchdogV, args=[parentPID,os.getpid()])
Thread_watchdogV.start()
while not commitsuicideV:
in_pipe, out_pipe, dummy2 = select.select([parent_pipe], [], [], 0.1) # When something is in a pipe...
cmd = parent_pipe.recv()
if showCommands: print "** Com -> vDec : ",cmd
if cmd == "die!": commitsuicideV = True
elif cmd == "reset": commitsuicideV = True
elif cmd == "show": showVid = True
elif cmd == "hide": showVid = False
elif cmd == "debug":
debugV = True
print "vDecode-Process : running"
if vCruns: print "vCapture-Thread : running"
elif cmd == "undebug": debugV = False
elif cmd == "showCommands": showCommands = True
elif cmd == "hideCommands": showCommands = False
Thread_vCapture.join()
parent_pipe.send(("suicided",0,0,0))
time.sleep(0.1)
if debugV: print "vDecode-Process : committed suicide"
#####################################################
def VideoReceiveWatchdog(parent_pipe,name, debugV):
if debugV: print "WHATCHDOG reset von",name
parent_pipe.send(("reset",0,0,0))
def mainloopV(DroneIP, VideoPort, VidPipePath, parent_pipe, parentPID):
inited, preinited, suicide, debugV, showCommands, slowVideo = False, False, 0, False, False, False
rawVideoFrame, VidStreamSnippet, VidStreamSnippetAvalible, iFrame, FrameCount = "", "", False, False, 0
saveVideo, unsureMode, searchCodecTime, frameRepeat, burstFrameCount = False, True, 0, 1, 0
reset, resetCount, commitsuicideV, foundCodec = False, 0, False, False
vstream_pipe, pipes = None, [parent_pipe]
vdecode_pipe, vdecode_childpipe = multiprocessing.Pipe()
pipes.append(vdecode_pipe)
Thread_watchdogV = threading.Thread(target=watchdogV, args=[parentPID,os.getpid()])
Thread_watchdogV.start()
while not commitsuicideV:
in_pipe, out_pipe, dummy2 = select.select(pipes, [], [], 0.1) # When something is in a pipe...
for ip in in_pipe:
if ip == parent_pipe:
cmd = parent_pipe.recv()
if showCommands: print "** Com -> Vid : ",cmd
if cmd == "die!":
if inited:
suicide = True
parent_pipe.send("vDecProcKill")
dummy = 0
else: commitsuicideV = True
elif cmd == "foundCodec": foundCodec = True
elif cmd == "reset" and not reset:# and resetCount<3:
inited, preinited, foundCodec = False, False, False
rawVideoFrame, VidStreamSnippet = "", ""
VidStreamSnippetAvalible = False
iFrame, FrameCount, reset = False, 0, True
unsureMode, searchCodecTime = True, 0
burstFrameCount = 0
resetCount += 1
parent_pipe.send("vDecProcKill")
elif cmd == "slowVideo":
slowVideo = True
frameRepeat = 1
elif cmd == "midVideo":
slowVideo = True
frameRepeat = 4
elif cmd == "fastVideo":
slowVideo = False
frameRepeat = 1
elif cmd == "saveVideo":
saveVideo = True
parent_pipe.send("saveVideo")
elif cmd == "unsaveVideo":
saveVideo = False
parent_pipe.send("unsaveVideo")
elif cmd == "showCommands":
showCommands = True
parent_pipe.send("showCommands")
elif cmd == "hideCommands":
showCommands = False
parent_pipe.send("hideCommands")
elif cmd == "debug":
debugV = True
print "Video-Process : running"
parent_pipe.send("debug")
elif cmd == "undebug":
debugV = False
parent_pipe.send("undebug")
elif cmd == "init" and not inited and not preinited:
preinited = True
try: os.mkfifo(VidPipePath)
except: pass
parent_pipe.send("vDecProc")
elif cmd == "vDecProcON":
rawVideoFrame = ""
VidStreamSnippet = ""
iFrame = False
FrameCount = 0
foundCodec = False
searchCodecTime = 0
if not vstream_pipe:
vstream_pipe = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
vstream_pipe.setblocking(0)
vstream_pipe.connect_ex((DroneIP,VideoPort))
pipes.append(vstream_pipe)
write2pipe = open(VidPipePath,"w+")
suicide = False
inited = True
preinited = False
unsureMode = True
elif cmd == "uninit" and inited:
parent_pipe.send("vDecProcKill")
elif cmd == "vd died":
if inited and not reset:
pipes.remove(vstream_pipe)
vstream_pipe.shutdown(socket.SHUT_RDWR)
vstream_pipe.close()
write2pipe.close()
inited = False
if suicide: commitsuicideV = True
parent_pipe.send("VideoDown")
try: os.remove(VidPipePath)
except: pass
if not inited and reset:
try: os.mkfifo(VidPipePath)
except: pass
parent_pipe.send("VideoDown")
parent_pipe.send("vDecProc")
parent_pipe.send("debug")
reset = False
burstFrameCount = 0
else:
parent_pipe.send(cmd)
### Grabs the Videostream and store it in a fifo-pipe for decoding.
# The decoder has to guess the videostream-format which takes around 266 video-frames.
# So the stream is preprocessed, I-Frames will cut out while initiation and a flood of copies
# will be send to the decoder, till the proper decoder for the videostream is found.
# In case of a slow or midspeed-video, only a single or a few copied I-frames are sent to the decoder.
if ip == vstream_pipe:
receiveWatchdog = threading.Timer(2.0, VideoReceiveWatchdog, [parent_pipe,"Video Mainloop", debugV,]) # Resets video if something hangs
receiveWatchdog.start()
videoPackage = vstream_pipe.recv(65535)
receiveWatchdog.cancel()
if len(videoPackage) == 0: commitsuicideV = True
else:
if inited and not reset:
if unsureMode: ### An MPEG4-Stream is not confirmed, fallback to savemode ?
if not searchCodecTime and not len(VidStreamSnippet): # Video is freshly initiated
searchCodecTime = time.time()
if (time.time()-searchCodecTime) < 0.15: # Collecting VidStreamSnipped for later use
VidStreamSnippet+=videoPackage
if (time.time()-searchCodecTime) > 2.0: # Waited too long for an MPEG4 stream confirmation...
saveVideo = True # ... fall back to savemode
parent_pipe.send("saveVideo") # Inform the main process
unsureMode = False
foundCodec = True # switch off codec guess speed-up
if not saveVideo:
# if len(videoPackage) == 0: commitsuicideV = True
# else:
if videoPackage[31:40].find("\x00\x00\x00")>3: # Found a new MPEG4-Frame
FrameCount+=1
### Processing the last frame
if iFrame: # If the last frame was an I-frame
VidStreamSnippet = rawVideoFrame # ... save it as VideoStreamSnippet for later use
if foundCodec: # OpenCV guessed the used Codec
if slowVideo: # Send just the iFrame (openCV stores about 5 in its queue),
for i in range(0,frameRepeat,1): # ... so repeat for less delay in midVideo()-mode
write2pipe.write(VidStreamSnippet)
iFrame = False
else: pass
if not slowVideo: # For all last Frames
if foundCodec:
try: write2pipe.write(rawVideoFrame)
except: pass
if not foundCodec: # Flood the pipe with the last iFrames, so that openCV can guess the codec faster
for i in range(0,5):
try:
write2pipe.write(rawVideoFrame)
burstFrameCount+=1
except: pass
### Processing new Frames
if ord(videoPackage[30]) == 1: #### Found an I-Frame
rawVideoFrame = "" # Delete the data previous to the first iFrame
unsureMode,iFrame = False, True
elif ord(videoPackage[30]) == 3: #### Found a P-Frame
unsureMode = False
else: #### Found an odd h264-frametype
if debugV:
print "*** Odd h264 Frametype: ",FrameCount,
for i in range(31,43,1): print ord(videoPackage[i]),
print " - ",videoPackage[31:40].find("\x00\x00\x00"),ord(videoPackage[30])
rawVideoFrame = ""
### Collecting data for the next frame from stream
rawVideoFrame+=videoPackage
else: #(saveVideo-Mode)
if foundCodec: write2pipe.write(videoPackage)
else:
for i in range(0,2):
write2pipe.write(VidStreamSnippet)
burstFrameCount+=1
if not foundCodec and burstFrameCount>350:
parent_pipe.send(("reset",0,0,0))
burstFrameCount=0
if debugV: print "To many pictures send while guessing the codec. Resetting."
try:
vstream_pipe.shutdown(socket.SHUT_RDWR)
vstream_pipe.close()
except: pass
try: write2pipe.close()
except: pass
try: vstream_pipe.close()
except: pass
try:
VidPipe=open(VidPipePath,"r")
r = "1"
while len(r): r=VidPipe.read()
FIFO.close()
except: pass
try: os.remove(VidPipePath)
except: pass
if debugV: print "Video-Process : committed suicide"
##################################################################################################
###### Receive and Decode NavData ######
##################################################################################################
### Description:
### It follows lousy code for abetter documentation! Later there will be lousy code because of laziness; I will correct it later....maybe.
### You will (normally) find the names of the official AR.drone SDK 2.0, some comments and the official data type of that value.
### A lot of entries are reversed engineered; for some, I have no idea what they are doing or what their meaning is.
### It would be nice if you could give me a hint if you have some further information.
##### Header ##################################################################
def decode_Header(data):
#Bit 00-07: FLY_MASK, VIDEO_MASK, VISION_MASK, CONTROL_MASK, ALTITUDE_MASK, USER_FEEDBACK_START, COMMAND_MASK, CAMERA_MASK
#Bit 08-15: TRAVELLING_MASK, USB_MASK, NAVDATA_DEMO_MASK, NAVDATA_BOOTSTRAP, MOTORS_MASK, COM_LOST_MASK, SOFTWARE_FAULT, VBAT_LOW
#Bit 16-23: USER_EL, TIMER_ELAPSED, MAGNETO_NEEDS_CALIB, ANGLES_OUT_OF_RANGE, WIND_MASK, ULTRASOUND_MASK, CUTOUT_MASK, PIC_VERSION_MASK
#Bit 24-31: ATCODEC_THREAD_ON, NAVDATA_THREAD_ON, VIDEO_THREAD_ON, ACQ_THREAD_ON, CTRL_WATCHDOG_MASK, ADC_WATCHDOG_MASK, COM_WATCHDOG_MASK, EMERGENCY_MASK
stateBit = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
stateBit[ 0] = data[1] &1 # 0: FLY MASK : (0) ardrone is landed, (1) ardrone is flying
stateBit[ 1] = data[1]>> 1&1 # 1: VIDEO MASK : (0) video disable, (1) video enable
stateBit[ 2] = data[1]>> 2&1 # 2: VISION MASK : (0) vision disable, (1) vision enable
stateBit[ 3] = data[1]>> 3&1 # 3: CONTROL ALGO : (0) euler angles control, (1) angular speed control
stateBit[ 4] = data[1]>> 4&1 # 4: ALTITUDE CONTROL ALGO : (0) altitude control inactive (1) altitude control active
stateBit[ 5] = data[1]>> 5&1 # 5: USER feedback : Start button state
stateBit[ 6] = data[1]>> 6&1 # 6: Control command ACK : (0) None, (1) one received
stateBit[ 7] = data[1]>> 7&1 # 7: CAMERA MASK : (0) camera not ready, (1) Camera ready
stateBit[ 8] = data[1]>> 8&1 # 8: Travelling mask : (0) disable, (1) enable
stateBit[ 9] = data[1]>> 9&1 # 9: USB key : (0) usb key not ready, (1) usb key ready
stateBit[10] = data[1]>>10&1 # 10: Navdata demo : (0) All navdata, (1) only navdata demo
stateBit[11] = data[1]>>11&1 # 11: Navdata bootstrap : (0) options sent in all or demo mode, (1) no navdata options sent
stateBit[12] = data[1]>>12&1 # 12: Motors status : (0) Ok, (1) Motors problem
stateBit[13] = data[1]>>13&1 # 13: Communication Lost : (0) Com is ok, (1) com problem
stateBit[14] = data[1]>>14&1 # 14: Software fault detected - user should land as quick as possible (1)
stateBit[15] = data[1]>>15&1 # 15: VBat low : (0) Ok, (1) too low
stateBit[16] = data[1]>>16&1 # 16: User Emergency Landing : (0) User EL is OFF, (1) User EL is ON
stateBit[17] = data[1]>>17&1 # 17: Timer elapsed : (0) not elapsed, (1) elapsed
stateBit[18] = data[1]>>18&1 # 18: Magnetometer calib state : (0) Ok, no calibration needed, (1) not ok, calibration needed
stateBit[19] = data[1]>>19&1 # 19: Angles : (0) Ok, (1) out of range
stateBit[20] = data[1]>>20&1 # 20: WIND MASK: (0) Ok, (1) Too much wind
stateBit[21] = data[1]>>21&1 # 21: Ultrasonic sensor : (0) Ok, (1) deaf
stateBit[22] = data[1]>>22&1 # 22: Cutout system detection : (0) Not detected, (1) detected
stateBit[23] = data[1]>>23&1 # 23: PIC Version number OK : (0) a bad version number, (1) version number is OK
stateBit[24] = data[1]>>24&1 # 24: ATCodec thread ON : (0) thread OFF, (1) thread ON
stateBit[25] = data[1]>>25&1 # 25: Navdata thread ON : (0) thread OFF, (1) thread ON
stateBit[26] = data[1]>>26&1 # 26: Video thread ON : (0) thread OFF, (1) thread ON
stateBit[27] = data[1]>>27&1 # 27: Acquisition thread ON : (0) thread OFF, (1) thread ON
stateBit[28] = data[1]>>28&1 # 28: CTRL watchdog : (0) control is well scheduled, (1) delay in control execution (> 5ms)
stateBit[29] = data[1]>>29&1 # 29: ADC Watchdog : (0) uart2 is good, (1) delay in uart2 dsr (> 5ms)
stateBit[30] = data[1]>>30&1 # 30: Communication Watchdog : (0) Com is ok, (1) com problem
stateBit[31] = data[1]>>31&1 # 31: Emergency landing : (0) no emergency, (1) emergency
stateBit[32] = data[2]
stateBit[33] = data[3]
# Alternative code:
# for i in range (0,32,1): arState[i]=data>>i&1
return (stateBit)
##### ID = 0 ### "demo" #######################################################
def decode_ID0(packet): # NAVDATA_DEMO_TAG
dataset = struct.unpack_from("HHIIfffifffIffffffffffffIIffffffffffff", packet, 0)
if dataset[1] != 148: print "*** ERROR : Navdata-Demo-Options-Package (ID=0) has the wrong size !!!"
demo=[[0,0,0,0,0,0,0,0,0,0,0,0],0,[0,0,0],0,[0,0,0],0,[0,0,0,0,0,0,0,0,0],[0,0,0],0,0,[0,0,0,0,0,0,0,0,0],[0,0,0]]
demo[0][ 0] = dataset[2]>>15&1 # DEFAULT (bool)
demo[0][ 1] = dataset[2]>>16&1 # INIT (bool)
demo[0][ 2] = dataset[2]>>17&1 # LANDED (bool)
demo[0][ 3] = dataset[2]>>18&1 # FLYING (bool)
demo[0][ 4] = dataset[2]>>19&1 # HOVERING (bool) (Seems like landing)
demo[0][ 5] = dataset[2]>>20&1 # TEST (bool)
demo[0][ 6] = dataset[2]>>21&1 # TRANS_TAKEOFF (bool)
demo[0][ 7] = dataset[2]>>22&1 # TRANS_GOFIX (bool)
demo[0][ 8] = dataset[2]>>23&1 # TRANS_LANDING (bool)
demo[0][ 9] = dataset[2]>>24&1 # TRANS_LOOPING (bool)
demo[0][10] = dataset[2]>>25&1 # TRANS_NO_VISION (bool)
demo[0][11] = dataset[2]>>26&1 # NUM_STATE (bool)
demo[1] =dataset[3] # vbat_flying_percentage battery voltage (filtered) in percent (uint32)
demo[2][0] =dataset[4]/1000.0 # theta pitch in degrees (float)
demo[2][1] =dataset[5]/1000.0 # phi roll in degrees (float)
demo[2][2] =dataset[6]/1000.0 # psi yaw in degrees (float)
demo[3] =dataset[7]/10.0 # altitude altitude in centimetres (int32)
demo[4][0] =dataset[8] # vx estimated speed in X in mm/s (float)
demo[4][1] =dataset[9] # vy estimated speed in Y in mm/s (float)
demo[4][2] =dataset[10] # vz estimated speed in Z in mm/s (float)
demo[5] =dataset[11] # num_frames streamed frame index (uint32) (Not used to integrate in video stage)
for i in range (0,9,1): demo[6][i] = dataset[12+i] # detection_camera_rot Camera parameters compute by detection (float matrix33)
for i in range (0,3,1): demo[7][i] = dataset[21+i] # detection_camera_trans Deprecated ! Don't use ! (float vector31)
demo[8] = dataset[24] # detection_tag_index Deprecated ! Don't use ! (uint32)
demo[9] = dataset[25] # detection_camera_type Type of tag (uint32)
for i in range (0,9,1): demo[10][i] = dataset[26+i] # drone_camera_rot Camera parameters computed by drone (float matrix33)
for i in range (0,3,1): demo[11][i] = dataset[35+i] # drone_camera_trans Deprecated ! Don't use ! (float vector31)
return(demo)
##### ID = 1 ### "time" #######################################################
def decode_ID1(packet): #NAVDATA_TIME_TAG
dataset = struct.unpack_from("HHI", packet, 0)
if dataset[1] != 8: print "*** ERROR : navdata-time-Options-Package (ID=1) has the wrong size !!!"
time=[0.0]
# Value: 11 most significant bits represent the seconds, and the 21 least significant bits represent the microseconds.
for i in range(0,21,1): time[0] += ((dataset[2]>>i&1)*(2**i)) # Calculating the millisecond-part
time[0] /= 1000000
for i in range(21,32,1): time[0] += (dataset[2]>>i&1)*(2**(i-21)) # Calculating second-part
return(time)
##### ID = 2 ### "raw_measures" ################################################
def decode_ID2(packet): #NAVDATA_RAW_MEASURES_TAG
dataset = struct.unpack_from("HHHHHhhhhhIHHHHHHHHHHHHhh", packet, 0)
if dataset[1] != 52: print "*** ERROR : navdata-raw_measures-Options-Package (ID=2) has the wrong size !!!"
raw_measures = [[0,0,0],[0,0,0],[0,0],0,0,0,0,0,0,0,0,0,0,0,0,0]
for i in range(0,3,1): raw_measures[0][i] = dataset[2+i] # raw_accs[xyz] filtered accelerometer-datas [LSB] (uint16)
for i in range(0,3,1): raw_measures[1][i] = dataset[5+i] # raw_gyros[xyz] filtered gyrometer-datas [LSB] (int16)
for i in range(0,2,1): raw_measures[2][i] = dataset[8+i] # raw_gyros_110[xy] gyrometers x/y 110 deg/s [LSB] (int16)
raw_measures[ 3] = dataset[10] # vbat_raw battery voltage raw (mV) (uint)
raw_measures[ 4] = dataset[11] # us_debut_echo [LSB] (uint16)
raw_measures[ 5] = dataset[12] # us_fin_echo [LSB] (uint16)
raw_measures[ 6] = dataset[13] # us_association_echo [LSB] (uint16)
raw_measures[ 7] = dataset[14] # us_distance_echo [LSB] (uint16)
raw_measures[ 8] = dataset[15] # us_courbe_temps [LSB] (uint16)
raw_measures[ 9] = dataset[16] # us_courbe_valeur [LSB] (uint16)
raw_measures[10] = dataset[17] # us_courbe_ref [LSB] (uint16)
raw_measures[11] = dataset[18] # flag_echo_ini [LSB] (uint16)
raw_measures[12] = dataset[19] # nb_echo [LSB] (uint16)
raw_measures[13] = dataset[21] # sum_echo juRef_st lower 16Bit, upper 16Bit=tags? (uint32)
raw_measures[14] = dataset[23] # alt_temp_raw in Milimeter (just lower 16Bit) (int32)
raw_measures[15] = dataset[24] # gradient [LSB] (int16)
return(raw_measures)
##### ID = 3 ### "phys_measures" ##############################################
def decode_ID3(packet): #NAVDATA_PHYS_MEASURES_TAG
dataset = struct.unpack_from("HHfHffffffIII", packet, 0)
if dataset[1] != 46: print "*** ERROR : navdata-phys_measures-Options-Package (ID=3) has the wrong size !!!"
phys_measures = [0,0,[0,0,0],[0,0,0],0,0,0]
phys_measures[0] = dataset[2] #float32 accs_temp
phys_measures[1] = dataset[3] #uint16 gyro_temp
phys_measures[4] = dataset[10] #uint32 alim3V3 3.3volt alim [LSB]
phys_measures[5] = dataset[11] #uint32 vrefEpson ref volt Epson gyro [LSB]
phys_measures[6] = dataset[12] #uint32 vrefIDG ref volt IDG gyro [LSB]
dataset = struct.unpack_from(">HHfHffffffIII", packet, 0) #switch from little to big-endian
for i in range(0,3,1): phys_measures[2][i] = dataset[4+i] #float32 phys_accs[xyz]
for i in range(0,3,1): phys_measures[3][i] = dataset[7+i] #float32 phys_gyros[xyz]
return(phys_measures)
##### ID = 4 ### "gyros_offsets" ##############################################
def decode_ID4(packet): #NNAVDATA_GYROS_OFFSETS_TAG
dataset = struct.unpack_from("HHfff", packet, 0)
if dataset[1] != 16: print "*** ERROR : navdata-gyros_offsets-Options-Package (ID=4) has the wrong size !!!"
gyros_offsets = [0,0,0]
for i in range (0,3,1): gyros_offsets[i]=dataset[i+2] # offset_g[xyz] in deg/s (float)
return(gyros_offsets)
##### ID = 5 ### "euler_angles" ###############################################
def decode_ID5(packet): #NAVDATA_EULER_ANGLES_TAG
dataset = struct.unpack_from("HHff", packet, 0)
if dataset[1] != 12: print "*** ERROR : navdata-euler_angles-Options-Package (ID=5) has the wrong size !!!"
euler_angles = [0,0]
euler_angles[0] = dataset[2] #float32 theta_a (head/back)
euler_angles[1] = dataset[3] #float32 phi_a (sides)
return(euler_angles)
##### ID = 6 ### "references" #################################################
def decode_ID6(packet): #NAVDATA_REFERENCES_TAG
dataset = struct.unpack_from("HHiiiiiiiiffffffIfffffI", packet, 0)
if dataset[1] != 88: print "*** ERROR : navdata-references-Options-Package (ID=6) has the wrong size !!!"
references = [[0,0,0],[0,0],[0,0,0],[0.0,0.0],[0.0,0.0],[0.0,0.0],0,[0.0,0.0,0.0,0.0,0.0,0]]
references[0][0] = dataset[2] #ref_theta Theta_ref_embedded [milli-deg] (int32)
references[0][1] = dataset[3] #ref_phi Phi_ref_embedded [milli-deg] (int32)
references[0][2] = dataset[9] #ref_psi Psi_ref_embedded [milli-deg] (int32)
references[1][0] = dataset[4] #ref_theta_I Theta_ref_int [milli-deg] (int32)
references[1][1] = dataset[5] #ref_phi_I Phi_ref_int [milli-deg] (int32)
references[2][0] = dataset[6] #ref_pitch Pitch_ref_embedded [milli-deg] (int32)
references[2][1] = dataset[7] #ref_roll Roll_ref_embedded [milli-deg] (int32)
references[2][2] = dataset[8] #ref_yaw Yaw_ref_embedded [milli-deg/s] (int32)
references[3][0] = dataset[10] #vx_ref Vx_Ref_[mm/s] (float)
references[3][1] = dataset[11] #vy_ref Vy_Ref_[mm/s] (float)
references[4][0] = dataset[12] #theta_mod Theta_modele [radian] (float)
references[4][1] = dataset[13] #phi_mod Phi_modele [radian] (float)
references[5][0] = dataset[14] #k_v_x (float)
references[5][1] = dataset[15] #k_v_y (float)
references[6] = dataset[16] #k_mode (uint32)
references[7][0] = dataset[17] #ui_time (float)
references[7][1] = dataset[18] #ui_theta (float)
references[7][2] = dataset[19] #ui_phi (float)
references[7][3] = dataset[20] #ui_psi (float)
references[7][4] = dataset[21] #ui_psi_accuracy (float)
references[7][5] = dataset[22] #ui_seq (int32)
return(references)
##### ID = 7 ### "trims" ######################################################
def decode_ID7(packet): #NAVDATA_TRIMS_TAG
dataset = struct.unpack_from("HHfff", packet, 0)
if dataset[1] != 16: print "*** ERROR : navdata-trims-Options-Package (ID=7) has the wrong size !!!"
trims = [0,0,0]
trims[0] = dataset[2] # angular_rates_trim (float)
trims[1] = dataset[3] # euler_angles_trim_theta [milli-deg] (float)
trims[2] = dataset[4] # euler_angles_trim_phi [milli-deg] (float)
return(trims)
##### ID = 8 ### "rc_references" ##############################################
def decode_ID8(packet): #NAVDATA_RC_REFERENCES_TAG
dataset = struct.unpack_from("HHiiiii", packet, 0)
if dataset[1] != 24: print "*** ERROR : navdata-rc_references-Options-Package (ID=8) has the wrong size !!!"
rc_references = [0,0,0,0,0]
rc_references[0] = dataset[2] # rc_ref_pitch Pitch_rc_embedded (int32)
rc_references[1] = dataset[3] # rc_ref_roll Roll_rc_embedded (int32)
rc_references[2] = dataset[4] # rc_ref_yaw Yaw_rc_embedded (int32)
rc_references[3] = dataset[5] # rc_ref_gaz Gaz_rc_embedded (int32)
rc_references[4] = dataset[6] # rc_ref_ag Ag_rc_embedded (int32)
return(rc_references)
##### ID = 9 ### "pwm" ########################################################
def decode_ID9(packet): #NAVDATA_PWM_TAG
dataset = struct.unpack_from("HHBBBBBBBBffffiiifiiifHHHHff", packet, 0)
if dataset[1] != 76 and dataset[1] != 92: #92 since firmware 2.4.8 ?
print "*** ERROR : navdata-navdata_pwm-Options-Package (ID=9) has the wrong size !!!"
#print "Soll: 76 Ist:",dataset[1]
pwm = [[0,0,0,0],[0,0,0,0],0.0,0.0,0.0,0.0,[0,0,0],0.0,[0,0,0,0.0],[0,0,0,0],0.0,0.0]
for i in range(0,4,1): pwm[0][i] = dataset[2+i] # motor1/2/3/4 [Pulse-width mod] (uint8)
for i in range(0,4,1): pwm[1][i] = dataset[6+i] # sat_motor1/2/3/4 [Pulse-width mod] (uint8)
pwm[2] = dataset[10] # gaz_feed_forward [Pulse-width mod] (float)
pwm[3] = dataset[11] # gaz_altitud [Pulse-width mod] (float)
pwm[4] = dataset[12] # altitude_integral [mm/s] (float)
pwm[5] = dataset[13] # vz_ref [mm/s] (float)
pwm[6][0] = dataset[14] # u_pitch [Pulse-width mod] (int32)
pwm[6][1] = dataset[15] # u_roll [Pulse-width mod] (int32)
pwm[6][2] = dataset[16] # u_yaw [Pulse-width mod] (int32)
pwm[7] = dataset[17] # yaw_u_I [Pulse-width mod] (float)
pwm[8][0] = dataset[18] # u_pitch_planif [Pulse-width mod] (int32)
pwm[8][1] = dataset[19] # u_roll_planif [Pulse-width mod] (int32)
pwm[8][2] = dataset[20] # u_yaw_planif [Pulse-width mod] (int32)
pwm[8][3] = dataset[21] # u_gaz_planif [Pulse-width mod] (float)
for i in range(0,4,1):
pwm[9][i] = dataset[22+i] # current_motor1/2/3/4 [mA] (uint16)
pwm[10] = dataset[26] # altitude_prop [Pulse-width mod] (float)
pwm[11] = dataset[27] # altitude_der [Pulse-width mod] (float)
return(pwm)
##### ID = 10 ### "altitude" ###################################################
def decode_ID10(packet): #NAVDATA_ALTITUDE_TAG
dataset = struct.unpack_from("HHifiiffiiiIffI", packet, 0)
if dataset[1] != 56: print "*** ERROR : navdata-navdata_altitude-Options-Package (ID=10) has the wrong size !!!"
altitude = [0,0.0,0,0,0.0,0.0,[0,0,0],0,[0,0],0]
altitude[0] = dataset[2] # altitude_vision [mm] (int32)
altitude[1] = dataset[3] # altitude_vz [mm/s] (float)
altitude[2] = dataset[4] # altitude_ref [mm] (int32)
altitude[3] = dataset[5] # altitude_raw [mm] (int32)
altitude[4] = dataset[6] # obs_accZ Observer AccZ [m/s2] (float)
altitude[5] = dataset[7] # obs_alt Observer altitude US [m](float)
for i in range (0,3,1):
altitude[6][i] = dataset[8+i] # obs_x 3-Vector (int32)
altitude[7] = dataset[11] # obs_state Observer state [-] (uint32)
for i in range (0,2,1):
altitude[8][i] = dataset[12+i] # est_vb 2-Vector (float)
altitude[9] = dataset[14] # est_state Observer flight state (uint32)
return(altitude)
##### ID = 11 ### "vision_raw" #################################################
def decode_ID11(packet): #NAVDATA_VISION_RAW_TAG
dataset = struct.unpack_from("HHfff", packet, 0)
if dataset[1] != 16: print "*** ERROR : navdata-vision_raw-Options-Package (ID=11) has the wrong size !!!"
vision_raw = [0,0,0]
for i in range (0,3,1): vision_raw[i] = dataset[2+i] # vision_tx_raw (xyz) (float)
return(vision_raw)
##### ID = 12 ### "vision_of" #################################################
def decode_ID12(packet): #NAVDATA_VISION_OF_TAG
dataset = struct.unpack_from("HHffffffffff", packet, 0)
if dataset[1] != 44: print "*** ERROR : navdata-vision_of-Options-Package (ID=12) has the wrong size !!!"
vision_of = [[0.0,0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0,0.0]]
for i in range (0,5,1): vision_of[0][i] = dataset[2+i] # of_dx[5] (float)
for i in range (0,5,1): vision_of[1][i] = dataset[7+i] # of_dy[5] (float)
return(vision_of)
##### ID = 13 ### "vision" #####################################################
def decode_ID13(packet): #NAVDATA_VISION_TAG
dataset = struct.unpack_from("HHIiffffifffiIffffffIIff", packet, 0)
if dataset[1] != 92: print "*** ERROR : navdata-vision-Options-Package (ID=13) has the wrong size !!!"
vision=[0,0,0.0,0.0,0.0,0.0,0,[0.0,0.0,0.0],0,0.0,[0.0,0.0,0.0],[0.0,0.0,0.0],0,0,[0.0,0.0]]
vision[0] = dataset[2] # vision_state FIXME: What are the meanings of the tags ?
vision[1] = dataset[3] # vision_misc (int32)
vision[2] = dataset[4] # vision_phi_trim (float)
vision[3] = dataset[5] # vision_phi_ref_prop (float)
vision[4] = dataset[6] # vision_theta_trim (float)
vision[5] = dataset[7] # vision_theta_ref_prop (float)
vision[6] = dataset[8] # new_raw_picture (int32)
for i in range (0,3,1):
vision[7][i] = dataset[9+i] # theta/phi/psi_capture (float)
vision[8] = dataset[12] # altitude_capture (int32)
for i in range (0,21,1): # Calculating milisecond-part
vision[9] += ((dataset[13]>>i&1)*(2**i))
vision[9] /= 1000000
for i in range (21,32,1): # Calculating second-part
vision[9] += (dataset[13]>>i&1)*(2**(i-21)) # time_capture (float)
for i in range (0,3,1):
vision[10][i] = dataset[14+i] # velocities[xyz] (float)
for i in range (0,3,1):
vision[11][i] = dataset[17+i] # delta_phi/theta/psi (float)
vision[12] = dataset[20] # gold_defined (uint32)
vision[13] = dataset[21] # gold_reset (uint32)
vision[14][0] = dataset[22] # gold_x (float)
vision[14][1] = dataset[23] # gold_y (float)
return(vision)
##### ID = 14 ### "vision_perf" ###############################################
def decode_ID14(packet): #NAVDATA_VISION_PERF_TAG
dataset = struct.unpack_from("HHffffffffffffffffffffffffff", packet, 0)
if dataset[1] != 108: print "*** ERROR : navdata-vision_of-Options-Package (ID=14) has the wrong size !!!"
vision_perf=[0.0,0.0,0.0,0.0,0.0,0.0,[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]]
vision_perf[0] = dataset[2] # time_szo (float)
vision_perf[1] = dataset[3] # time_corners (float)
vision_perf[2] = dataset[4] # time_compute (float)
vision_perf[3] = dataset[5] # time_tracking (float)
vision_perf[4] = dataset[6] # time_trans (float)
vision_perf[5] = dataset[7] # time_update (float)
for i in range (0,20,1):
vision_perf[6][i] = dataset[8+i] # time_custom[20] (float)
return(vision_perf)
##### ID = 15 ### "trackers_send" #############################################
def decode_ID15(packet): #NAVDATA_TRACKERS_SEND_TAG
dataset = struct.unpack_from("HHiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii", packet, 0)
if dataset[1] != 364: print "*** ERROR : navdata-trackers_send-Options-Package (ID=15) has the wrong size !!!"
DEFAULT_NB_TRACKERS_WIDTH = 6
DEFAULT_NB_TRACKERS_HEIGHT = 5
limit = DEFAULT_NB_TRACKERS_WIDTH*DEFAULT_NB_TRACKERS_HEIGHT
trackers_send = [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]]]
for i in range (0, limit, 1):
trackers_send[0][i] = dataset[2+i] # locked[limit] (int32)
for i in range (0, limit, 1):
trackers_send[1][i][0] = dataset[32+(i*2)] # point[x[limit],y[limit]] (int32)
trackers_send[1][i][1] = dataset[33+(i*2)]
return(trackers_send)
##### ID = 16 ### "vision_detect" #############################################
def decode_ID16(packet): #NAVDATA_VISION_DETECT_TAG
dataset = struct.unpack_from("HHIIIIIIIIIIIIIIIIIIIIIIIIIffffIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII", packet, offsetND)
if dataset[1] != 328: print "*** ERROR : navdata-vision_detect-Package (ID=16) has the wrong size !!!"
vision_detect = [0,[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0.0,0.0,0.0,0.0],[[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]],[[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0]],[0,0,0,0]]
#Max marker detection in one picture: 4
vision_detect[0] = dataset[2] # nb_detected (uint32)
for i in range (0,4,1): vision_detect[1][i] = dataset[3+i] # type[4] (uint32)
for i in range (0,4,1): vision_detect[2][i] = dataset[7+i] # xc[4] (uint32)
for i in range (0,4,1): vision_detect[3][i] = dataset[11+i] # yc[4] (uint32)
for i in range (0,4,1): vision_detect[4][i] = dataset[15+i] # width[4] (uint32)
for i in range (0,4,1): vision_detect[5][i] = dataset[19+i] # height[4] (uint32)
for i in range (0,4,1): vision_detect[6][i] = dataset[23+i] # dist[4] (uint32)
for i in range (0,4,1): vision_detect[7][i] = dataset[27+i] # orientation_angle[4] (float)
for i in range (0,4,1):
for j in range (0,9,1): vision_detect[8][i][j] = dataset[31+i+j] # rotation[4] (float 3x3 matrix (11,12,13,21,...)
for i in range (0,4,1):
for j in range (0,3,1): vision_detect[9][i][j] = dataset[67+i+j] # rotation[4] (float 3 vector)
for i in range (0,4,1): vision_detect[10][i] = dataset[79+i] # camera_source[4] (uint32)
return(vision_detect)
##### ID = 17 ### "watchdog" ###################################################
def decode_ID17(packet): #NAVDATA_WATCHDOG_TAG
dataset = struct.unpack_from("HHI", packet, offsetND)
if dataset[1] != 8: print "*** ERROR : navdata-watchdog-Package (ID=17) has the wrong size !!!"
watchdog = dataset[2] # watchdog Watchdog controll [-] (uint32)
return(watchdog)
##### ID = 18 ### "adc_data_frame" #############################################
def decode_ID18(packet): #NAVDATA_ADC_DATA_FRAME_TAG
dataset = struct.unpack_from("HHIBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB", packet, offsetND)
if dataset[1] != 40: print "*** ERROR : navdata-adc_data_frame-Package (ID=18) has the wrong size !!!"
adc_data_frame = [0,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
adc_data_frame[0] = dataset[2] # version (uint32)
for i in range (0,32,1): adc_data_frame[1][i] = dataset[3+i] # data_frame[32] (uint8)
return(adc_data_frame)
##### ID = 19 ### "video_stream" ###############################################
def decode_ID19(packet): #NAVDATA_VIDEO_STREAM_TAG
dataset = struct.unpack_from("HHBIIIIfIIIiiiiiII", packet, offsetND)
if dataset[1] != 65: print "*** ERROR : navdata-video_stream-Package (ID=19) has the wrong size !!!"
video_stream = [0,0,0,0,0,0.0,0,0,0,[0,0,0,0,0],0,0]
video_stream[0] = dataset[2] # quant quantizer reference used to encode [1:31] (uint8)
video_stream[1] = dataset[3] # frame_size frame size in bytes (uint32)
video_stream[2] = dataset[4] # frame_number frame index (uint32)
video_stream[3] = dataset[5] # atcmd_ref_seq atmcd ref sequence number (uint32)
video_stream[4] = dataset[6] # atcmd_mean_ref_gap mean time between two consecutive atcmd_ref (ms) (uint32)
video_stream[5] = dataset[7] # atcmd_var_ref_gap (float)
video_stream[6] = dataset[8] # atcmd_ref_quality estimator of atcmd link quality (uint32)
#Drone 2.0:
video_stream[7] = dataset[9] # out_bitrate measured out throughput from the video tcp socket (uint32)
video_stream[8] = dataset[10] # desired_bitrate last frame size generated by the video encoder (uint32)
for i in range (0,5,1): video_stream[9][i] = dataset[11+i] # data misc temporary data (int32)
video_stream[10] = dataset[16] # tcp_queue_level queue usage (uint32)
video_stream[11] = dataset[17] # fifo_queue_level queue usage (uint32)
return(video_stream)
##### ID = 20 ### "games" ######################################################
def decode_ID20(packet): #NAVDATA_GAMES_TAG
dataset = struct.unpack_from("HHII", packet, offsetND)
if dataset[1] != 12: print "*** ERROR : navdata-games-Package (ID=20) has the wrong size !!!"
games = [0,0]
games[0] = dataset[2] # double_tap_counter (uint32)
games[1] = dataset[3] # finish_line_counter (uint32)
return(games)
##### ID = 21 ### "pressure_raw" ###############################################
def decode_ID21(packet): #NAVDATA_PRESSURE_RAW_TAG
dataset = struct.unpack_from("HHihii", packet, offsetND)
if dataset[1] != 18: print "*** ERROR : navdata-pressure_raw-Package (ID=21) has the wrong size !!!"
pressure_raw = [0,0,0,0]
pressure_raw[0] = dataset[2] # up (int32)
pressure_raw[1] = dataset[3] # ut (int16)
pressure_raw[2] = dataset[4] # Temperature_meas (int32)
pressure_raw[3] = dataset[5] # Pression_meas (int32)
return(pressure_raw)
##### ID = 22 ### "magneto" ####################################################
def decode_ID22(packet): #NAVDATA_MAGNETO_TAG
dataset = struct.unpack_from("HHhhhffffffffffffBifff", packet, offsetND)
if dataset[1] != 83: print "*** ERROR : navdata-magneto-Package (ID=22) has the wrong size !!!"
magneto = [[0,0,0],[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0],0.0,0.0,0.0,0,0,0.0,0.0,0.0]
for i in range (0,3,1): magneto[0][i]=dataset[2+i] # mx/my/mz (int16)
for i in range (0,3,1): magneto[1][i]=dataset[5+i] # magneto_raw magneto in the body frame [mG] (vector float)
for i in range (0,3,1): magneto[2][i]=dataset[8+i] # magneto_rectified (vector float)
for i in range (0,3,1): magneto[3][i]=dataset[11+i] # magneto_offset (vector float)
magneto[ 4] = dataset[14] # heading_unwrapped (float)
magneto[ 5] = dataset[15] # heading_gyro_unwrapped (float)
magneto[ 6] = dataset[16] # heading_fusion_unwrapped (float)
magneto[ 7] = dataset[17] # magneto_calibration_ok (char)
magneto[ 8] = dataset[18] # magneto_state (uint32)
magneto[ 9] = dataset[19] # magneto_radius (float)
magneto[10] = dataset[20] # error_mean (float)
magneto[11] = dataset[21] # error_var (float)
return(magneto)
##### ID = 23 ### "wind_speed" ################################################
def decode_ID23(packet): #NAVDATA_WIND_TAG
dataset = struct.unpack_from("HHfffffffffffff", packet, offsetND)
if dataset[1] != 56 and dataset[1] != 64:
print "*** ERROR : navdata-wind_speed-Package (ID=23) has the wrong size !!!"
wind_speed = [0.0,0.0,[0.0,0.0],[0.0,0.0,0.0,0.0,0.0,0.0],[0.0,0.0,0.0]]
wind_speed[0] = dataset[2] # wind_speed (float)
wind_speed[1] = dataset[3] # wind_angle (float)
wind_speed[2][0] = dataset[4] # wind_compensation_theta (float)
wind_speed[2][1] = dataset[5] # wind_compensation_phi (float)
for i in range (0,6,1): wind_speed[3][i]=dataset[6+i] # state_x[1-6] (float)
for i in range (0,3,1): wind_speed[4][i]=dataset[7+i] # magneto_debug[1-3] (float)
return(wind_speed)
##### ID = 24 ### "kalman_pressure" ###########################################
def decode_ID24(packet): #NAVDATA_KALMAN_PRESSURE_TAG
dataset = struct.unpack_from("HHffffffffff?f?ff??", packet, offsetND)
if dataset[1] != 72: print "*** ERROR : navdata-wind_speed-Package (ID=24) has the wrong size !!!"
kalman_pressure = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0,0.0,False,0.0,0.0,False,False]
kalman_pressure[ 0] = dataset[2] # offset_pressure (float)
kalman_pressure[ 1] = dataset[3] # est_z (float)
kalman_pressure[ 2] = dataset[4] # est_zdot (float)
kalman_pressure[ 3] = dataset[5] # est_bias_PWM (float)
kalman_pressure[ 4] = dataset[6] # est_biais_pression (float)
kalman_pressure[ 5] = dataset[7] # offset_US (float)
kalman_pressure[ 6] = dataset[8] # prediction_US (float)
kalman_pressure[ 7] = dataset[9] # cov_alt (float)
kalman_pressure[ 8] = dataset[10] # cov_PWM (float)
kalman_pressure[ 9] = dataset[11] # cov_vitesse (float)
kalman_pressure[10] = dataset[12] # bool_effet_sol (bool)
kalman_pressure[11] = dataset[13] # somme_inno (float)
kalman_pressure[12] = dataset[14] # flag_rejet_US (bool)
kalman_pressure[13] = dataset[15] # u_multisinus (float)
kalman_pressure[14] = dataset[16] # gaz_altitude (float)
kalman_pressure[15] = dataset[17] # Flag_multisinus (bool)
kalman_pressure[16] = dataset[18] # Flag_multisinus_debut (bool)
return(kalman_pressure)
##### ID = 25 ### "hdvideo_stream" ############################################
def decode_ID25(packet): #NAVDATA_HDVIDEO-TAG
dataset = struct.unpack_from("HHfffffff", packet, offsetND)
if dataset[1] != 32: print "*** ERROR : navdata-hdvideo_stream-Package (ID=25) has the wrong size !!!"
hdvideo_stream = [0.0,0.0,0.0,0.0,0.0,0.0,0.0]
hdvideo_stream[0] = dataset[2] # hdvideo_state (float)
hdvideo_stream[1] = dataset[3] # storage_fifo_nb_packets (float)
hdvideo_stream[2] = dataset[4] # storage_fifo_size (float)
hdvideo_stream[3] = dataset[5] # usbkey_size USB key in kb (no key=0)(float)
hdvideo_stream[4] = dataset[6] # usbkey_freespace USB key in kb (no key=0)(float)
hdvideo_stream[5] = dataset[7] # frame_number PaVE field of the frame starting to be encoded for the HD stream (float)
hdvideo_stream[6] = dataset[8] # usbkey_remaining_time [sec] (float)
return(hdvideo_stream)
##### ID = 26 ### "wifi" ######################################################
def decode_ID26(packet): #NAVDATA_WIFI_TAG
dataset = struct.unpack_from("HHI", packet, offsetND)
if dataset[1] != 8: print "*** ERROR : navdata-wifi-Package (ID=26) has the wrong size !!!"
wifi = dataset[2] # link_quality (uint32)
return(wifi)
##### ID = 27 ### "zimmu_3000" ################################################
def decode_ID27(packet): #NAVDATA_ZIMU_3000_TAG
dataset = struct.unpack_from("HHif", packet, offsetND)
if dataset[1] != 12 and dataset[1] != 216: # 216 since firmware 2.4.8 ?
print "*** ERROR : navdata-zimmu_3000-Package (ID=27) has the wrong size !!!"
zimmu_3000 = [0,0.0]
zimmu_3000[0] = dataset[2] # vzimmuLSB (int32)
zimmu_3000[1] = dataset[3] # vzfind (float)
return(zimmu_3000)
##### Footer ### "chksum" #####################################################
def decode_Footer(packet,allpacket): ### Decode Checksum options-package ID=65535
dataset = struct.unpack_from("HHI", packet, offsetND)
if dataset[1] != 8: print "*** ERROR : Checksum-Options-Package (ID=65535) has the wrong size !!!"
chksum = [0,False]
chksum[0] = dataset[2]
sum, plen = 0, len(allpacket)-8
for i in range (0,plen,1): sum += ord(allpacket[i]) # Slows down this Navdata-subprocess massivly
if sum == chksum[0]: chksum[1] = True
return(chksum)
###############################################################################
### Navdata-Decoding
###############################################################################
def getDroneStatus(packet):
arState = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
checksum = (0,False)
length = len(packet)
dataset = struct.unpack_from("IIII", packet, 0) # Reading (Header, State, Sequence, Vision)
offsetND = struct.calcsize("IIII")
###############################=-
### Decode Options-Packages ###=-
###############################=-
def getNavdata(packet,choice):
navdata = {}
length = len(packet)
dataset = struct.unpack_from("IIII", packet, 0) # Reading (Header, State, Sequence, Vision)
navdata["state"] = decode_Header(dataset)
offsetND = struct.calcsize("IIII")
#Demo-mode contains normally Option-Packages with ID=0 (_navdata_demo_t), ID=16 (seems empty) and ID=65535 (checksum)
# Full Mode contains
while offsetND < length:
dataset = struct.unpack_from("HH", packet, offsetND) # Reading (Header, Length)
if dataset[0]== 0 and choice[ 0]: navdata["demo"] = decode_ID0(packet[offsetND:])
if dataset[0]== 1 and choice[ 1]: navdata["time"] = decode_ID1(packet[offsetND:])
if dataset[0]== 2 and choice[ 2]: navdata["raw_measures"] = decode_ID2(packet[offsetND:])
if dataset[0]== 3 and choice[ 3]: navdata["phys_measures"] = decode_ID3(packet[offsetND:])
if dataset[0]== 4 and choice[ 4]: navdata["gyros_offsets"] = decode_ID4(packet[offsetND:])
if dataset[0]== 5 and choice[ 5]: navdata["euler_angles"] = decode_ID5(packet[offsetND:])
if dataset[0]== 6 and choice[ 6]: navdata["references"] = decode_ID6(packet[offsetND:])
if dataset[0]== 7 and choice[ 7]: navdata["trims"] = decode_ID7(packet[offsetND:])
if dataset[0]== 8 and choice[ 8]: navdata["rc_references"] = decode_ID8(packet[offsetND:])
if dataset[0]== 9 and choice[ 9]: navdata["pwm"] = decode_ID9(packet[offsetND:])
if dataset[0]==10 and choice[10]: navdata["altitude"] = decode_ID10(packet[offsetND:])
if dataset[0]==11 and choice[11]: navdata["vision_raw"] = decode_ID11(packet[offsetND:])
if dataset[0]==12 and choice[12]: navdata["vision_of"] = decode_ID12(packet[offsetND:])
if dataset[0]==13 and choice[13]: navdata["vision"] = decode_ID13(packet[offsetND:])
if dataset[0]==14 and choice[14]: navdata["vision_perf"] = decode_ID14(packet[offsetND:])
if dataset[0]==15 and choice[15]: navdata["trackers_send"] = decode_ID15(packet[offsetND:])
if dataset[0]==16 and choice[16]: navdata["vision_detect"] = decode_ID16(packet[offsetND:])
if dataset[0]==17 and choice[17]: navdata["watchdog"] = decode_ID17(packet[offsetND:])
if dataset[0]==18 and choice[18]: navdata["adc_data_frame"] = decode_ID18(packet[offsetND:])
if dataset[0]==19 and choice[19]: navdata["video_stream"] = decode_ID19(packet[offsetND:])
if dataset[0]==20 and choice[20]: navdata["games"] = decode_ID20(packet[offsetND:])
if dataset[0]==21 and choice[21]: navdata["pressure_raw"] = decode_ID21(packet[offsetND:])
if dataset[0]==22 and choice[22]: navdata["magneto"] = decode_ID22(packet[offsetND:])
if dataset[0]==23 and choice[23]: navdata["wind_speed"] = decode_ID23(packet[offsetND:])
if dataset[0]==24 and choice[24]: navdata["kalman_pressure"] = decode_ID24(packet[offsetND:])
if dataset[0]==25 and choice[25]: navdata["hdvideo_stream"] = decode_ID25(packet[offsetND:])
if dataset[0]==26 and choice[26]: navdata["wifi"] = decode_ID26(packet[offsetND:])
if dataset[0]==27 and choice[27]: navdata["zimmu_3000"] = decode_ID27(packet[offsetND:])
if dataset[0]==65535 and choice[28]: navdata["chksum"] = decode_Footer(packet[offsetND:],packet)
offsetND += dataset[1]
return(navdata)
###############################=-
### Threads ###=-
###############################=-
def reconnect(navdata_pipe, commitsuicideND, DroneIP,NavDataPort):
if not commitsuicideND: navdata_pipe.sendto("\x01\x00\x00\x00", (DroneIP, NavDataPort))
def watchdogND(parentPID):
global commitsuicideND
while not commitsuicideND:
time.sleep(1)
try : os.getpgid(parentPID)
except: commitsuicideND=True
# It seems that you just have to reinitialize the network-connection once and the drone keeps on sending forever then.
def mainloopND(DroneIP,NavDataPort,parent_pipe,parentPID):
global commitsuicideND
something2send, MinimalPacketLength, timetag = False, 30, 0
packetlist = ["demo","time","raw_measures","phys_measures","gyros_offsets","euler_angles","references","trims","rc_references","pwm","altitude","vision_raw","vision_of","vision","vision_perf","trackers_send","vision_detect","watchdog","adc_data_frame","video_stream","games","pressure_raw","magneto","wind_speed","kalman_pressure","hdvideo_stream","wifi","zimmu_3000","chksum","state"]
choice = [False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,True]
overallchoice = False # This and oneTimeFailOver is necessary because of a bug (?) of AR.Drone sending NavData in DemoMode...
oneTimeFailOver = True # ...while setting a configuration the drone sends the next DemoMode-package with just its status.
debug = False
showCommands = False
# Checks if the main-process is running and sends ITS own PID back
ThreadWatchdogND = threading.Thread(target=watchdogND,args=[parentPID])
ThreadWatchdogND.start()
# Prepare communication-pipes
pipes = []
pipes.append(parent_pipe)
navdata_pipe = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
navdata_pipe.setblocking(0)
navdata_pipe.bind(('', NavDataPort))
pipes.append(navdata_pipe)
# start connection
reconnect(navdata_pipe, commitsuicideND, DroneIP, NavDataPort)
netHeartbeat = threading.Timer(2.0, reconnect, [navdata_pipe,commitsuicideND,DroneIP,NavDataPort,]) # Inits the first Network-Heartbeat (2 secs after disconnection the drone stops sending)
netHeartbeat.start()
if choice.count(True) > 0: overallchoice = True
while not commitsuicideND:
in_pipe, out_pipe, dummy2 = select.select(pipes, [], [], 0.5) # When something is in a pipe...
for ip in in_pipe:
if ip == parent_pipe:
cmd = parent_pipe.recv()
if showCommands: print "** Com -> Nav : ",cmd
# Signal to stop this process and all its threads
if cmd == "die!": commitsuicideND = True
# Enables/disables Debug-bit
elif cmd == "debug":
debug = True
print "NavData-Process : running"
elif cmd == "undebug":
debug = False
# Enables/disables Debug-bit
elif cmd == "showCommands": showCommands = True
elif cmd == "hideCommands": showCommands = False
elif cmd == "reconnect": reconnect(navdata_pipe, commitsuicideND, DroneIP, NavDataPort)
# Sets explicitly the value-packages which shall be decoded
elif cmd[0] == "send":
if cmd[1].count("all"):
for i in range (0,len(choice),1): choice[i] = True
else:
for i in range (0,len(packetlist),1):
if cmd[1].count(packetlist[i]): choice[i] = True
else: choice[i] = False
if choice.count(True) > 0: overallchoice = True
else: overallchoice = False
# Adds value-packages to the other which shall be decoded
elif cmd[0] == "add":
for i in range (0,len(packetlist),1):
if cmd[1].count(packetlist[i]): choice[i] = True
if cmd[1].count("all"):
for i in range (0,len(choice),1): choice[i] = True
if choice.count(True)>0: overallchoice = True
else: overallchoice = False
# Deletes packages from the value-package-list which shall not be decoded anymore
elif cmd[0] == "block":
if cmd[1].count("all"):
for i in range (0,len(packetlist),1): choice[i] = False
else:
for i in range (0,len(packetlist),1):
if cmd.count(packetlist[i]): choice[i] = False
if choice.count(True) > 0: overallchoice = True
else: overallchoice = False
if ip == navdata_pipe:
try:
netHeartbeat.cancel() # Connection is alive, Network-Heartbeat not necessary for a moment
Packet = navdata_pipe.recv(65535) # Receiving raw NavData-Package
netHeartbeat = threading.Timer(2.1,reconnect,[navdata_pipe,commitsuicideND,DroneIP,NavDataPort])
netHeartbeat.start() # Network-Heartbeat is set here, because the drone keeps on sending NavData (vid, etc you have to switch on)
timestamp = timetag # Setting up decoding-time calculation
timetag = time.time()
if overallchoice:
try: lastdecodedNavData=decodedNavData
except: lastdecodedNavData={}
decodedNavData = getNavdata(Packet,choice)
state = decodedNavData["state"]
# If there is an abnormal small NavPacket, the last NavPacket will be sent out with an error-tag
NoNavData = False
if len(Packet)<MinimalPacketLength and overallchoice: decodedNavData, NoNavData = lastdecodedNavData, True
dectime = time.time()-timetag
# Sends all the data to the mainprocess
parent_pipe.send((decodedNavData, state[0:32], state[32], timestamp, dectime, NoNavData))
except IOError: pass
suicideND = True
netHeartbeat.cancel()
if debug: print "NavData-Process : committed suicide"
##################################################################################################
###### Playground ######
##################################################################################################
if __name__ == "__main__":
###
### Here you can write your first test-codes and play around with them
###
import time
import ps_drone
drone = ps_drone.Drone() # Start using drone
drone.printBlue("Battery: ")
drone.startup() # Connects to drone and starts subprocesses
drone.reset() # Always good, at start
while drone.getBattery()[0] == -1: time.sleep(0.1) # Waits until the drone has done its reset
time.sleep(0.5) # Give it some time to fully awake
drone.printBlue("Battery: "+str(drone.getBattery()[0])+"% "+str(drone.getBattery()[1])) # Gives a battery-status
stop = False
while not stop:
key = drone.getKey()
if key == " ":
if drone.NavData["demo"][0][2] and not drone.NavData["demo"][0][3]: drone.takeoff()
else: drone.land()
elif key == "0": drone.hover()
elif key == "w": drone.moveForward()
elif key == "s": drone.moveBackward()
elif key == "a": drone.moveLeft()
elif key == "d": drone.moveRight()
elif key == "q": drone.turnLeft()
elif key == "e": drone.turnRight()
elif key == "7": drone.turnAngle(-10,1)
elif key == "9": drone.turnAngle( 10,1)
elif key == "4": drone.turnAngle(-45,1)
elif key == "6": drone.turnAngle( 45,1)
elif key == "1": drone.turnAngle(-90,1)
elif key == "3": drone.turnAngle( 90,1)
elif key == "8": drone.moveUp()
elif key == "2": drone.moveDown()
elif key == "*": drone.doggyHop()
elif key == "+": drone.doggyNod()
elif key == "-": drone.doggyWag()
elif key != "": stop = True
print "Batterie: "+str(drone.getBattery()[0])+"% "+str(drone.getBattery()[1]) # Gives a battery-status
|
PatrickChrist/CDTM-Deep-Learning-Drones
|
ps_drone.py
|
Python
|
mit
| 95,509
|
[
"VisIt"
] |
88b0e8cc0b44a357ca5691e6a370854b17701c2c6eff6ce40f94261d33b3c643
|
""" Principal Component Analysis
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <denis-alexander.engemann@inria.fr>
# Michael Eickenberg <michael.eickenberg@inria.fr>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
#
# License: BSD 3 clause
from math import log, sqrt
import numbers
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from scipy.sparse import issparse
from scipy.sparse.linalg import svds
from ._base import _BasePCA
from ..utils import check_random_state
from ..utils import check_array
from ..utils.extmath import fast_logdet, randomized_svd, svd_flip
from ..utils.extmath import stable_cumsum
from ..utils.validation import check_is_fitted
from ..utils.validation import _deprecate_positional_args
def _assess_dimension(spectrum, rank, n_samples):
"""Compute the log-likelihood of a rank ``rank`` dataset.
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum : array of shape (n_features)
Data spectrum.
rank : int
Tested rank value. It should be strictly lower than n_features,
otherwise the method isn't specified (division by zero in equation
(31) from the paper).
n_samples : int
Number of samples.
Returns
-------
ll : float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
n_features = spectrum.shape[0]
if not 1 <= rank < n_features:
raise ValueError("the tested rank should be in [1, n_features - 1]")
eps = 1e-15
if spectrum[rank - 1] < eps:
# When the tested rank is associated with a small eigenvalue, there's
# no point in computing the log-likelihood: it's going to be very
# small and won't be the max anyway. Also, it can lead to numerical
# issues below when computing pa, in particular in log((spectrum[i] -
# spectrum[j]) because this will take the log of something very small.
return -np.inf
pu = -rank * log(2.)
for i in range(1, rank + 1):
pu += (gammaln((n_features - i + 1) / 2.) -
log(np.pi) * (n_features - i + 1) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
v = max(eps, np.sum(spectrum[rank:]) / (n_features - rank))
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension(spectrum, n_samples):
"""Infers the dimension of a dataset with a given spectrum.
The returned value will be in [1, n_features - 1].
"""
ll = np.empty_like(spectrum)
ll[0] = -np.inf # we don't want to return n_components = 0
for rank in range(1, spectrum.shape[0]):
ll[rank] = _assess_dimension(spectrum, rank, n_samples)
return ll.argmax()
class PCA(_BasePCA):
"""Principal component analysis (PCA).
Linear dimensionality reduction using Singular Value Decomposition of the
data to project it to a lower dimensional space. The input data is centered
but not scaled for each feature before applying the SVD.
It uses the LAPACK implementation of the full SVD or a randomized truncated
SVD by the method of Halko et al. 2009, depending on the shape of the input
data and the number of components to extract.
It can also use the scipy.sparse.linalg ARPACK implementation of the
truncated SVD.
Notice that this class does not support sparse input. See
:class:`TruncatedSVD` for an alternative with sparse data.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, float, None or str
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
If ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's
MLE is used to guess the dimension. Use of ``n_components == 'mle'``
will interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.
If ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the
number of components such that the amount of variance that needs to be
explained is greater than the percentage specified by n_components.
If ``svd_solver == 'arpack'``, the number of components must be
strictly less than the minimum of n_features and n_samples.
Hence, the None case results in::
n_components == min(n_samples, n_features) - 1
copy : bool, default=True
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional (default False)
When True (False by default) the `components_` vectors are multiplied
by the square root of n_samples and then divided by the singular values
to ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
svd_solver : str {'auto', 'full', 'arpack', 'randomized'}
If auto :
The solver is selected by a default policy based on `X.shape` and
`n_components`: if the input data is larger than 500x500 and the
number of components to extract is lower than 80% of the smallest
dimension of the data, then the more efficient 'randomized'
method is enabled. Otherwise the exact full SVD is computed and
optionally truncated afterwards.
If full :
run exact full SVD calling the standard LAPACK solver via
`scipy.linalg.svd` and select the components by postprocessing
If arpack :
run SVD truncated to n_components calling ARPACK solver via
`scipy.sparse.linalg.svds`. It requires strictly
0 < n_components < min(X.shape)
If randomized :
run randomized SVD by the method of Halko et al.
.. versionadded:: 0.18.0
tol : float >= 0, optional (default .0)
Tolerance for singular values computed by svd_solver == 'arpack'.
.. versionadded:: 0.18.0
iterated_power : int >= 0, or 'auto', (default 'auto')
Number of iterations for the power method computed by
svd_solver == 'randomized'.
.. versionadded:: 0.18.0
random_state : int, RandomState instance, default=None
Used when the 'arpack' or 'randomized' solvers are used. Pass an int
for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
.. versionadded:: 0.18.0
Attributes
----------
components_ : array, shape (n_components, n_features)
Principal axes in feature space, representing the directions of
maximum variance in the data. The components are sorted by
``explained_variance_``.
explained_variance_ : array, shape (n_components,)
The amount of variance explained by each of the selected components.
Equal to n_components largest eigenvalues
of the covariance matrix of X.
.. versionadded:: 0.18
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of the ratios is equal to 1.0.
singular_values_ : array, shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
.. versionadded:: 0.19
mean_ : array, shape (n_features,)
Per-feature empirical mean, estimated from the training set.
Equal to `X.mean(axis=0)`.
n_components_ : int
The estimated number of components. When n_components is set
to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this
number is estimated from input data. Otherwise it equals the parameter
n_components, or the lesser value of n_features and n_samples
if n_components is None.
n_features_ : int
Number of features in the training data.
n_samples_ : int
Number of samples in the training data.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
compute the estimated data covariance and score samples.
Equal to the average of (min(n_features, n_samples) - n_components)
smallest eigenvalues of the covariance matrix of X.
See Also
--------
KernelPCA : Kernel Principal Component Analysis.
SparsePCA : Sparse Principal Component Analysis.
TruncatedSVD : Dimensionality reduction using truncated SVD.
IncrementalPCA : Incremental Principal Component Analysis.
References
----------
For n_components == 'mle', this class uses the method of *Minka, T. P.
"Automatic choice of dimensionality for PCA". In NIPS, pp. 598-604*
Implements the probabilistic PCA model from:
Tipping, M. E., and Bishop, C. M. (1999). "Probabilistic principal
component analysis". Journal of the Royal Statistical Society:
Series B (Statistical Methodology), 61(3), 611-622.
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`.
For svd_solver == 'randomized', see:
*Halko, N., Martinsson, P. G., and Tropp, J. A. (2011).
"Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions".
SIAM review, 53(2), 217-288.* and also
*Martinsson, P. G., Rokhlin, V., and Tygert, M. (2011).
"A randomized algorithm for the decomposition of matrices".
Applied and Computational Harmonic Analysis, 30(1), 47-68.*
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(n_components=2)
>>> print(pca.explained_variance_ratio_)
[0.9924... 0.0075...]
>>> print(pca.singular_values_)
[6.30061... 0.54980...]
>>> pca = PCA(n_components=2, svd_solver='full')
>>> pca.fit(X)
PCA(n_components=2, svd_solver='full')
>>> print(pca.explained_variance_ratio_)
[0.9924... 0.00755...]
>>> print(pca.singular_values_)
[6.30061... 0.54980...]
>>> pca = PCA(n_components=1, svd_solver='arpack')
>>> pca.fit(X)
PCA(n_components=1, svd_solver='arpack')
>>> print(pca.explained_variance_ratio_)
[0.99244...]
>>> print(pca.singular_values_)
[6.30061...]
"""
@_deprecate_positional_args
def __init__(self, n_components=None, *, copy=True, whiten=False,
svd_solver='auto', tol=0.0, iterated_power='auto',
random_state=None):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
self.svd_solver = svd_solver
self.tol = tol
self.iterated_power = iterated_power
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : None
Ignored variable.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : None
Ignored variable.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Transformed values.
Notes
-----
This method returns a Fortran-ordered array. To convert it to a
C-ordered array, use 'np.ascontiguousarray'.
"""
U, S, Vt = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * Vt * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Dispatch to the right submethod depending on the chosen solver."""
# Raise an error for sparse input.
# This is more informative than the generic one raised by check_array.
if issparse(X):
raise TypeError('PCA does not support sparse input. See '
'TruncatedSVD for a possible alternative.')
X = self._validate_data(X, dtype=[np.float64, np.float32],
ensure_2d=True, copy=self.copy)
# Handle n_components==None
if self.n_components is None:
if self.svd_solver != 'arpack':
n_components = min(X.shape)
else:
n_components = min(X.shape) - 1
else:
n_components = self.n_components
# Handle svd_solver
self._fit_svd_solver = self.svd_solver
if self._fit_svd_solver == 'auto':
# Small problem or n_components == 'mle', just call full PCA
if max(X.shape) <= 500 or n_components == 'mle':
self._fit_svd_solver = 'full'
elif n_components >= 1 and n_components < .8 * min(X.shape):
self._fit_svd_solver = 'randomized'
# This is also the case of n_components in (0,1)
else:
self._fit_svd_solver = 'full'
# Call different fits for either full or truncated SVD
if self._fit_svd_solver == 'full':
return self._fit_full(X, n_components)
elif self._fit_svd_solver in ['arpack', 'randomized']:
return self._fit_truncated(X, n_components, self._fit_svd_solver)
else:
raise ValueError("Unrecognized svd_solver='{0}'"
"".format(self._fit_svd_solver))
def _fit_full(self, X, n_components):
"""Fit the model by computing full SVD on X"""
n_samples, n_features = X.shape
if n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
elif not 0 <= n_components <= min(n_samples, n_features):
raise ValueError("n_components=%r must be between 0 and "
"min(n_samples, n_features)=%r with "
"svd_solver='full'"
% (n_components, min(n_samples, n_features)))
elif n_components >= 1:
if not isinstance(n_components, numbers.Integral):
raise ValueError("n_components=%r must be of type int "
"when greater than or equal to 1, "
"was of type=%r"
% (n_components, type(n_components)))
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, Vt = linalg.svd(X, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
U, Vt = svd_flip(U, Vt)
components_ = Vt
# Get variance explained by singular values
explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = explained_variance_.sum()
explained_variance_ratio_ = explained_variance_ / total_var
singular_values_ = S.copy() # Store the singular values.
# Postprocess the number of components required
if n_components == 'mle':
n_components = \
_infer_dimension(explained_variance_, n_samples)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
# side='right' ensures that number of features selected
# their variance is always greater than n_components float
# passed. More discussion in issue: #15669
ratio_cumsum = stable_cumsum(explained_variance_ratio_)
n_components = np.searchsorted(ratio_cumsum, n_components,
side='right') + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = \
explained_variance_ratio_[:n_components]
self.singular_values_ = singular_values_[:n_components]
return U, S, Vt
def _fit_truncated(self, X, n_components, svd_solver):
"""Fit the model by computing truncated SVD (by ARPACK or randomized)
on X
"""
n_samples, n_features = X.shape
if isinstance(n_components, str):
raise ValueError("n_components=%r cannot be a string "
"with svd_solver='%s'"
% (n_components, svd_solver))
elif not 1 <= n_components <= min(n_samples, n_features):
raise ValueError("n_components=%r must be between 1 and "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features),
svd_solver))
elif not isinstance(n_components, numbers.Integral):
raise ValueError("n_components=%r must be of type int "
"when greater than or equal to 1, was of type=%r"
% (n_components, type(n_components)))
elif svd_solver == 'arpack' and n_components == min(n_samples,
n_features):
raise ValueError("n_components=%r must be strictly less than "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features),
svd_solver))
random_state = check_random_state(self.random_state)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if svd_solver == 'arpack':
# random init solution, as ARPACK does it internally
v0 = random_state.uniform(-1, 1, size=min(X.shape))
U, S, Vt = svds(X, k=n_components, tol=self.tol, v0=v0)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
S = S[::-1]
# flip eigenvectors' sign to enforce deterministic output
U, Vt = svd_flip(U[:, ::-1], Vt[::-1])
elif svd_solver == 'randomized':
# sign flipping is done inside
U, S, Vt = randomized_svd(X, n_components=n_components,
n_iter=self.iterated_power,
flip_sign=True,
random_state=random_state)
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = Vt
self.n_components_ = n_components
# Get variance explained by singular values
self.explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = np.var(X, ddof=1, axis=0)
self.explained_variance_ratio_ = \
self.explained_variance_ / total_var.sum()
self.singular_values_ = S.copy() # Store the singular values.
if self.n_components_ < min(n_features, n_samples):
self.noise_variance_ = (total_var.sum() -
self.explained_variance_.sum())
self.noise_variance_ /= min(n_features, n_samples) - n_components
else:
self.noise_variance_ = 0.
return U, S, Vt
def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array, shape(n_samples, n_features)
The data.
Returns
-------
ll : array, shape (n_samples,)
Log-likelihood of each sample under the current model.
"""
check_is_fitted(self)
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi) -
fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array, shape(n_samples, n_features)
The data.
y : None
Ignored variable.
Returns
-------
ll : float
Average log-likelihood of the samples under the current model.
"""
return np.mean(self.score_samples(X))
|
bnaul/scikit-learn
|
sklearn/decomposition/_pca.py
|
Python
|
bsd-3-clause
| 23,545
|
[
"Gaussian"
] |
3b4206e0952dac0f4fd4ba7c5f11921fe8857f51e9481c69e6cf9eee50dedea7
|
"""
unittest_motifs.py
Jose Guzman, sjm.guzman@gmail.com
Claudia Espinoza, claudia.espinoza@ist.ac.at
Created: Wed Aug 9 17:49:33 CEST 2017
Unittest environment to test the counting of motifs
"""
import unittest
import numpy as np
from motifs import iicounter, eicounter, iecounter, eecounter
class TestIIMotifCounter(unittest.TestCase):
"""
A major unittest class to test IIMotifCounter
"""
A1 = np.array(([0,3],[0,0]))
A2 = np.array(([0,0],[3,0]))
B1 = np.array(([0,3],[1,0]))
B2 = np.array(([0,1],[3,0]))
C1 = np.array(([0,2],[0,0]))
C2 = np.array(([0,0],[2,0]))
D1 = np.array(([0,1],[0,0]))
D2 = np.array(([0,0],[1,0]))
E = np.array(([0,1],[1,0]))
Z = np.zeros((3,3))
set56 = np.array([[0, 0, 0],[1, 0, 3],[3, 1, 0]])
def setUp(self):
"""
Create IIMotifCounter objects from global matrices with
known connections
"""
self.a1 = iicounter(self.A1)
self.a2 = iicounter(self.A2)
self.b1 = iicounter(self.B1)
self.b2 = iicounter(self.B2)
self.c1 = iicounter(self.C1)
self.c2 = iicounter(self.C2)
self.d1 = iicounter(self.D1)
self.d2 = iicounter(self.D2)
self.e = iicounter(self.E)
self.z = iicounter(self.Z)
self.set56 = iicounter(self.set56)
def test_found_electrical_and_one_chemical(self):
"""
Test 'ii_c1e' : an alectrical synapse together with ONE chemical
"""
self.assertEquals(1, self.a1.ii_c1e_found)
self.assertEquals(1, self.a2.ii_c1e_found)
self.assertEquals(2, self.b1.ii_c1e_found)
self.assertEquals(2, self.b2.ii_c1e_found)
self.assertEquals(0, self.c1.ii_c1e_found)
self.assertEquals(0, self.c2.ii_c1e_found)
self.assertEquals(0, self.d1.ii_c1e_found)
self.assertEquals(0, self.d2.ii_c1e_found)
self.assertEquals(0, self.e.ii_c1e_found)
self.assertEquals(0, self.z.ii_c1e_found)
self.assertEquals(3, self.set56.ii_c1e_found)
def test_found_electrical_and_two_chemical(self):
"""
Test 'ii_c2e' : an alectrical synapse together with TWO chemical
"""
self.assertEquals(0, self.a1.ii_c2e_found)
self.assertEquals(0, self.a2.ii_c2e_found)
self.assertEquals(1, self.b1.ii_c2e_found)
self.assertEquals(1, self.b2.ii_c2e_found)
self.assertEquals(0, self.c1.ii_c2e_found)
self.assertEquals(0, self.c2.ii_c2e_found)
self.assertEquals(0, self.d1.ii_c2e_found)
self.assertEquals(0, self.d2.ii_c2e_found)
self.assertEquals(0, self.e.ii_c2e_found)
self.assertEquals(0, self.z.ii_c2e_found)
self.assertEquals(1, self.set56.ii_c2e_found)
def test_found_electrical_syn(self):
"""
Test 'ii_elec' : electrical synapses between interneurons
"""
self.assertEquals(1, self.a1.ii_elec_found)
self.assertEquals(1, self.a2.ii_elec_found)
self.assertEquals(1, self.b1.ii_elec_found)
self.assertEquals(1, self.b2.ii_elec_found)
self.assertEquals(1, self.c1.ii_elec_found)
self.assertEquals(1, self.c2.ii_elec_found)
self.assertEquals(0, self.d1.ii_elec_found)
self.assertEquals(0, self.d2.ii_elec_found)
self.assertEquals(0, self.e.ii_elec_found)
self.assertEquals(0, self.z.ii_elec_found)
self.assertEquals(2, self.set56.ii_elec_found)
def test_found_chemical_syn(self):
"""
Test 'ii_chem' : a chemical synapses between interneurons
"""
self.assertEquals(1, self.a1.ii_chem_found)
self.assertEquals(1, self.a2.ii_chem_found)
self.assertEquals(2, self.b1.ii_chem_found)
self.assertEquals(2, self.b2.ii_chem_found)
self.assertEquals(0, self.c1.ii_chem_found)
self.assertEquals(0, self.c2.ii_chem_found)
self.assertEquals(1, self.d1.ii_chem_found)
self.assertEquals(1, self.d2.ii_chem_found)
self.assertEquals(2, self.e.ii_chem_found)
self.assertEquals(0, self.z.ii_chem_found)
self.assertEquals(4, self.set56.ii_chem_found)
def test_found_bidirectional_chemical(self):
"""
Test 'ii_c2' : a bidirectional chemical synapse
"""
self.assertEquals(0, self.a1.ii_c2_found)
self.assertEquals(0, self.a2.ii_c2_found)
self.assertEquals(1, self.b1.ii_c2_found)
self.assertEquals(1, self.b2.ii_c2_found)
self.assertEquals(0, self.c1.ii_c2_found)
self.assertEquals(0, self.c2.ii_c2_found)
self.assertEquals(0, self.d1.ii_c2_found)
self.assertEquals(0, self.d2.ii_c2_found)
self.assertEquals(1, self.e.ii_c2_found)
self.assertEquals(0, self.z.ii_c2_found)
self.assertEquals(1, self.set56.ii_c2_found)
def test_found_convergent_motifs(self):
"""
Test 'ii_con' : convergent inhibitory chemical synapse
"""
self.assertEquals(1, self.set56.ii_con_found)
def test_found_divergent_motifs(self):
"""
Test 'ii_div' : divergent inhibitory chemical synapse
"""
self.assertEquals(2, self.set56.ii_div_found)
def test_found_linear_motifs(self):
"""
Test 'ii_lin' : linear inhibitory chemical synapse
"""
self.assertEquals(2, self.set56.ii_lin_found)
def test_add_objects(self):
"""
Test that sum objects is correct
"""
mysuma = self.a1 + self.a2
self.assertEquals(2, mysuma.ii_chem_found)
self.assertEquals(2, mysuma.ii_elec_found)
self.assertEquals(2, mysuma.ii_c1e_found)
self.assertEquals(0, mysuma.ii_c2e_found)
self.assertEquals(4, mysuma.ii_chem_tested)
self.assertEquals(2, mysuma.ii_elec_tested)
self.assertEquals(4, mysuma.ii_c1e_tested)
self.assertEquals(2, mysuma.ii_c2e_tested)
class TestEIMotifCounter(unittest.TestCase):
"""
A major unittest class to test EIMotifCounter
"""
A = np.ones((2,2))
B = np.array(([1,1],[0,0]))
C = np.array(([1,1],[1,0],[0,0]))
D = np.array(([1,1],[1,0],[1,0]))
def setUp(self):
"""
Create EIMotifCounter objects from global matrices with
known connections
"""
self.a = eicounter(self.A)
self.b = eicounter(self.B)
self.c = eicounter(self.C)
self.d = eicounter(self.D)
def test_found_chemical_syn(self):
"""
Test 'ei' : a chemical synapses between excitatory to inhibitory
neuron.
"""
self.assertEquals(4, self.a.ei_found)
self.assertEquals(2, self.b.ei_found)
def test_convergent2(self):
"""
Test convergent motifs from 2 excitatory cells
"""
self.assertEquals(2, self.a.e2i_found)
self.assertEquals(0, self.b.e2i_found)
self.assertEquals(1, self.c.e2i_found)
def test_convergent3(self):
"""
Test convergent motifs from 3 excitatory cells
"""
self.assertEquals(1, self.d.e3i_found)
def test_add_objects(self):
"""
Test that sum objects is correct
"""
mysum = self.a + self.b
self.assertEquals(6, mysum.ei_found)
self.assertEquals(8, mysum.ei_tested)
class TestIEMotifCounter(unittest.TestCase):
"""
A major unittest class to test IEMotifCounter
"""
A = np.ones((2,2))
B = np.array(([1,1],[0,0]))
def setUp(self):
"""
Create IEMotifCounter objects from global matrices with
known connections
"""
self.a = iecounter(self.A)
self.b = iecounter(self.B)
def test_found_chemical_syn(self):
"""
Test 'ie' : a chemical synapses between excitatory to inhibitory
neuron.
"""
self.assertEquals(4, self.a.ie_found)
self.assertEquals(2, self.b.ie_found)
def test_add_objects(self):
"""
Test that sum objects is correct
"""
mysum = self.a + self.b
self.assertEquals(6, mysum.ie_found)
self.assertEquals(8, mysum.ie_tested)
class TestAddingObjects(unittest.TestCase):
"""
Unittesting for adding two different MotifObject types
"""
def setUp(self):
ii = iicounter(np.array(([0,3],[1,0])))
ie = iecounter(np.ones((2,2)))
ei = eicounter(np.ones((2,2)))
self.ii_sum = ii + ii
self.ie_sum = ie + ie
self.ei_sum = ei + ei
self.ie_ii = ie + ii
self.ii_ie = ii + ei
self.ei_ie = ei + ie
self.ie_ei = ie + ei
def test_add_same_objects(self):
"""
Test the result of summing same MotifCounter inherited objects
"""
# test found
self.assertEquals(4, self.ii_sum.ii_chem_found)
self.assertEquals(2, self.ii_sum.ii_elec_found)
self.assertEquals(4, self.ii_sum.ii_c1e_found)
self.assertEquals(2, self.ii_sum.ii_c2e_found)
self.assertEquals(8, self.ei_sum.ei_found)
self.assertEquals(8, self.ie_sum.ie_found)
# test tested
self.assertEquals(4, self.ii_sum.ii_chem_tested)
self.assertEquals(2, self.ii_sum.ii_elec_tested)
self.assertEquals(4, self.ii_sum.ii_c1e_tested)
self.assertEquals(2, self.ii_sum.ii_c2e_tested)
self.assertEquals(8, self.ei_sum.ei_tested)
self.assertEquals(8, self.ie_sum.ie_tested)
def test_add_diff_objects(self):
"""
Test the result of different MotifCounter objects
will returna a MotifCounter object type
"""
# test found
self.assertEquals(2, self.ie_ii.ii_chem_found)
self.assertEquals(1, self.ie_ii.ii_elec_found)
self.assertEquals(2, self.ie_ii.ii_c1e_found)
self.assertEquals(1, self.ie_ii.ii_c2e_found)
self.assertEquals(4, self.ei_ie.ei_found)
self.assertEquals(4, self.ei_ie.ie_found)
self.assertEquals(4, self.ie_ei.ei_found)
self.assertEquals(4, self.ie_ei.ie_found)
# test tested
self.assertEquals(2, self.ie_ii.ii_chem_tested)
self.assertEquals(1, self.ie_ii.ii_elec_tested)
self.assertEquals(2, self.ie_ii.ii_c1e_tested)
self.assertEquals(1, self.ie_ii.ii_c2e_tested)
self.assertEquals(4, self.ei_ie.ei_tested)
self.assertEquals(4, self.ei_ie.ie_tested)
self.assertEquals(4, self.ie_ei.ei_tested)
self.assertEquals(4, self.ie_ei.ie_tested)
class TestCA3MotifCounter(unittest.TestCase):
"""
Test the number of motifs found in CA3 neurons
according to the data in Guzman et al., 2016
"""
def setUp(self):
"""
Load all CA3 connectivity motifs
"""
self.a = eecounter(np.loadtxt('../data/CA3/0_100218_1.syn'))
self.b = eecounter(np.loadtxt('../data/CA3/0_110113_0.syn'))
self.c = eecounter(np.loadtxt('../data/CA3/0_110127_1.syn'))
self.d = eecounter(np.loadtxt('../data/CA3/0_120305_1.syn'))
self.e = eecounter(np.loadtxt('../data/CA3/0_130424_0.syn'))
self.f = eecounter(np.loadtxt('../data/CA3/0_130621_0.syn'))
self.g = eecounter(np.loadtxt('../data/CA3/0_130705_0.syn'))
self.h = eecounter(np.loadtxt('../data/CA3/0_130722_3.syn'))
self.i = eecounter(np.loadtxt('../data/CA3/0_140205_3.syn'))
self.j = eecounter(np.loadtxt('../data/CA3/0_140218_0.syn'))
self.k = eecounter(np.loadtxt('../data/CA3/0_140519_2.syn'))
self.l = eecounter(np.loadtxt('../data/CA3/0_141006_0.syn'))
self.m = eecounter(np.loadtxt('../data/CA3/0_141202_0.syn'))
self.gap = eecounter(np.loadtxt('../data/CA3/0_140129_0.syn'))
def test_CA3bidirectional_connections(self):
"""
Test for correct number found in bidirectional connections
"""
self.assertEquals(1, self.d.ee_c2_found)
self.assertEquals(1, self.h.ee_c2_found)
self.assertEquals(2, self.i.ee_c2_found)
self.assertEquals(2, self.l.ee_c2_found)
def test_CA3convergent_connections(self):
"""
Test for correct number found in divergent connectons
"""
self.assertEquals(1, self.e.ee_con_found)
self.assertEquals(1, self.f.ee_con_found)
self.assertEquals(1, self.h.ee_con_found)
self.assertEquals(2, self.i.ee_con_found)
self.assertEquals(5, self.l.ee_con_found)
def test_CA3divergent_connections(self):
"""
Test for correct number found in convergent connectons
"""
self.assertEquals(1, self.c.ee_div_found)
self.assertEquals(1, self.d.ee_div_found)
self.assertEquals(1, self.g.ee_div_found)
self.assertEquals(6, self.i.ee_div_found)
self.assertEquals(1, self.k.ee_div_found)
self.assertEquals(10, self.l.ee_div_found)
self.assertEquals(3, self.m.ee_div_found)
def test_CA3linear_connections(self):
"""
Test for correct number found in convergent connectons
"""
self.assertEquals(1, self.a.ee_lin_found)
self.assertEquals(1, self.b.ee_lin_found)
self.assertEquals(1, self.d.ee_lin_found)
self.assertEquals(1, self.h.ee_lin_found)
self.assertEquals(7, self.i.ee_lin_found)
self.assertEquals(1, self.j.ee_lin_found)
self.assertEquals(12, self.l.ee_lin_found)
self.assertEquals(1, self.m.ee_lin_found)
def test_CA3GAP_junction(self):
"""
Test for correct number found in convergent connectons
"""
self.assertEquals(1, self.gap.ee_elec_found)
if __name__ == '__main__':
unittest.main()
|
ClaudiaEsp/inet
|
inet/unittest_motifs.py
|
Python
|
gpl-2.0
| 13,861
|
[
"NEURON"
] |
c6c8069f7344baae8422d4d486836dff2731a40ff0a1249361c9bcc502064df7
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Displays a GUI for the user to set Orca preferences."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import os
import sys
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import GObject
from gi.repository import Pango
import locale
import pyatspi
import time
from . import acss
from . import debug
from . import guilabels
from . import messages
from . import orca
from . import orca_gtkbuilder
from . import orca_gui_profile
from . import orca_state
from . import orca_platform
from . import script_manager
from . import settings
from . import settings_manager
from . import input_event
from . import keybindings
from . import pronunciation_dict
from . import braille
from . import speech
from . import speechserver
from . import text_attribute_names
_scriptManager = script_manager.getManager()
_settingsManager = settings_manager.getManager()
try:
import louis
except ImportError:
louis = None
from .orca_platform import tablesdir
if louis and not tablesdir:
louis = None
(HANDLER, DESCRIP, MOD_MASK1, MOD_USED1, KEY1, CLICK_COUNT1, OLDTEXT1, \
TEXT1, MODIF, EDITABLE) = list(range(10))
(NAME, IS_SPOKEN, IS_BRAILLED, VALUE) = list(range(4))
(ACTUAL, REPLACEMENT) = list(range(2))
# Must match the order of voice types in the GtkBuilder file.
#
(DEFAULT, UPPERCASE, HYPERLINK, SYSTEM) = list(range(4))
# Must match the order that the timeFormatCombo is populated.
#
(TIME_FORMAT_LOCALE, TIME_FORMAT_24_HMS,
TIME_FORMAT_24_HMS_WITH_WORDS, TIME_FORMAT_24_HM,
TIME_FORMAT_24_HM_WITH_WORDS) = list(range(5))
# Must match the order that the dateFormatCombo is populated.
#
(DATE_FORMAT_LOCALE, DATE_FORMAT_NUMBERS_DM, DATE_FORMAT_NUMBERS_MD,
DATE_FORMAT_NUMBERS_DMY, DATE_FORMAT_NUMBERS_MDY, DATE_FORMAT_NUMBERS_YMD,
DATE_FORMAT_FULL_DM, DATE_FORMAT_FULL_MD, DATE_FORMAT_FULL_DMY,
DATE_FORMAT_FULL_MDY, DATE_FORMAT_FULL_YMD, DATE_FORMAT_ABBREVIATED_DM,
DATE_FORMAT_ABBREVIATED_MD, DATE_FORMAT_ABBREVIATED_DMY,
DATE_FORMAT_ABBREVIATED_MDY, DATE_FORMAT_ABBREVIATED_YMD) = list(range(16))
class OrcaSetupGUI(orca_gtkbuilder.GtkBuilderWrapper):
def __init__(self, fileName, windowName, prefsDict = None):
"""Initialize the Orca configuration GUI.
Arguments:
- fileName: name of the GtkBuilder file.
- windowName: name of the component to get from the GtkBuilder
file.
"""
orca_gtkbuilder.GtkBuilderWrapper.__init__(self, fileName, windowName)
self.prefsDict = self._getGeneralSettings(prefsDict)
# Initialize variables to None to keep pylint happy.
#
self.bbindings = None
self.cellRendererText = None
self.defaultVoice = None
self.defKeyBindings = None
self.disableKeyGrabPref = None
self.getTextAttributesView = None
self.hyperlinkVoice = None
self.initializingSpeech = None
self.kbindings = None
self.keyBindingsModel = None
self.keyBindView = None
self.newBinding = None
self.pendingKeyBindings = None
self.planeCellRendererText = None
self.pronunciationModel = None
self.pronunciationView = None
self.screenHeight = None
self.screenWidth = None
self.speechFamiliesChoice = None
self.speechFamiliesChoices = None
self.speechFamiliesModel = None
self.speechServersChoice = None
self.speechServersChoices = None
self.speechServersModel = None
self.speechSystemsChoice = None
self.speechSystemsChoices = None
self.speechSystemsModel = None
self.systemVoice = None
self.uppercaseVoice = None
self.window = None
self.workingFactories = None
self.savedGain = None
self.savedPitch = None
self.savedRate = None
self._isInitialSetup = False
self.selectedFamilyChoices = {}
self.profilesCombo = None
self.profilesComboModel = None
self.startingProfileCombo = None
self._capturedKey = []
def _getGeneralSettings(self, prefsDict):
if prefsDict is None:
generalSettings = _settingsManager.getGeneralSettings()
activeProfile = generalSettings.get('startingProfile')
else:
activeProfile = prefsDict['activeProfile']
return _settingsManager.getGeneralSettings(activeProfile[1])
def init(self):
"""Initialize the Orca configuration GUI. Read the users current
set of preferences and set the GUI state to match. Setup speech
support and populate the combo box lists on the Speech Tab pane
accordingly.
"""
# Restore the default rate/pitch/gain,
# in case the user played with the sliders.
#
try:
voices = _settingsManager.getSetting('voices')
defaultVoice = voices[settings.DEFAULT_VOICE]
except KeyError:
defaultVoice = {}
try:
self.savedGain = defaultVoice[acss.ACSS.GAIN]
except KeyError:
self.savedGain = 10.0
try:
self.savedPitch = defaultVoice[acss.ACSS.AVERAGE_PITCH]
except KeyError:
self.savedPitch = 5.0
try:
self.savedRate = defaultVoice[acss.ACSS.RATE]
except KeyError:
self.savedRate = 50.0
# ***** Key Bindings treeview initialization *****
self.keyBindView = self.get_widget("keyBindingsTreeview")
if self.keyBindView.get_columns():
for column in self.keyBindView.get_columns():
self.keyBindView.remove_column(column)
self.keyBindingsModel = Gtk.TreeStore(
GObject.TYPE_STRING, # Handler name
GObject.TYPE_STRING, # Human Readable Description
GObject.TYPE_STRING, # Modifier mask 1
GObject.TYPE_STRING, # Used Modifiers 1
GObject.TYPE_STRING, # Modifier key name 1
GObject.TYPE_STRING, # Click count 1
GObject.TYPE_STRING, # Original Text of the Key Binding Shown 1
GObject.TYPE_STRING, # Text of the Key Binding Shown 1
GObject.TYPE_BOOLEAN, # Key Modified by User
GObject.TYPE_BOOLEAN) # Row with fields editable or not
self.planeCellRendererText = Gtk.CellRendererText()
self.cellRendererText = Gtk.CellRendererText()
self.cellRendererText.set_property("ellipsize", Pango.EllipsizeMode.END)
# HANDLER - invisble column
#
column = Gtk.TreeViewColumn("Handler",
self.planeCellRendererText,
text=HANDLER)
column.set_resizable(True)
column.set_visible(False)
column.set_sort_column_id(HANDLER)
self.keyBindView.append_column(column)
# DESCRIP
#
column = Gtk.TreeViewColumn(guilabels.KB_HEADER_FUNCTION,
self.cellRendererText,
text=DESCRIP)
column.set_resizable(True)
column.set_min_width(380)
column.set_sort_column_id(DESCRIP)
self.keyBindView.append_column(column)
# MOD_MASK1 - invisble column
#
column = Gtk.TreeViewColumn("Mod.Mask 1",
self.planeCellRendererText,
text=MOD_MASK1)
column.set_visible(False)
column.set_resizable(True)
column.set_sort_column_id(MOD_MASK1)
self.keyBindView.append_column(column)
# MOD_USED1 - invisble column
#
column = Gtk.TreeViewColumn("Use Mod.1",
self.planeCellRendererText,
text=MOD_USED1)
column.set_visible(False)
column.set_resizable(True)
column.set_sort_column_id(MOD_USED1)
self.keyBindView.append_column(column)
# KEY1 - invisble column
#
column = Gtk.TreeViewColumn("Key1",
self.planeCellRendererText,
text=KEY1)
column.set_resizable(True)
column.set_visible(False)
column.set_sort_column_id(KEY1)
self.keyBindView.append_column(column)
# CLICK_COUNT1 - invisble column
#
column = Gtk.TreeViewColumn("ClickCount1",
self.planeCellRendererText,
text=CLICK_COUNT1)
column.set_resizable(True)
column.set_visible(False)
column.set_sort_column_id(CLICK_COUNT1)
self.keyBindView.append_column(column)
# OLDTEXT1 - invisble column which will store a copy of the
# original keybinding in TEXT1 prior to the Apply or OK
# buttons being pressed. This will prevent automatic
# resorting each time a cell is edited.
#
column = Gtk.TreeViewColumn("OldText1",
self.planeCellRendererText,
text=OLDTEXT1)
column.set_resizable(True)
column.set_visible(False)
column.set_sort_column_id(OLDTEXT1)
self.keyBindView.append_column(column)
# TEXT1
#
rendererText = Gtk.CellRendererText()
rendererText.connect("editing-started",
self.editingKey,
self.keyBindingsModel)
rendererText.connect("editing-canceled",
self.editingCanceledKey)
rendererText.connect('edited',
self.editedKey,
self.keyBindingsModel,
MOD_MASK1, MOD_USED1, KEY1, CLICK_COUNT1, TEXT1)
column = Gtk.TreeViewColumn(guilabels.KB_HEADER_KEY_BINDING,
rendererText,
text=TEXT1,
editable=EDITABLE)
column.set_resizable(True)
column.set_sort_column_id(OLDTEXT1)
self.keyBindView.append_column(column)
# MODIF
#
rendererToggle = Gtk.CellRendererToggle()
rendererToggle.connect('toggled',
self.keyModifiedToggle,
self.keyBindingsModel,
MODIF)
column = Gtk.TreeViewColumn(guilabels.KB_MODIFIED,
rendererToggle,
active=MODIF,
activatable=EDITABLE)
#column.set_visible(False)
column.set_resizable(True)
column.set_sort_column_id(MODIF)
self.keyBindView.append_column(column)
# EDITABLE - invisble column
#
rendererToggle = Gtk.CellRendererToggle()
rendererToggle.set_property('activatable', False)
column = Gtk.TreeViewColumn("Modified",
rendererToggle,
active=EDITABLE)
column.set_visible(False)
column.set_resizable(True)
column.set_sort_column_id(EDITABLE)
self.keyBindView.append_column(column)
# Populates the treeview with all the keybindings:
#
self._populateKeyBindings()
self.window = self.get_widget("orcaSetupWindow")
self._setKeyEchoItems()
self.speechSystemsModel = \
self._initComboBox(self.get_widget("speechSystems"))
self.speechServersModel = \
self._initComboBox(self.get_widget("speechServers"))
self.speechFamiliesModel = \
self._initComboBox(self.get_widget("speechFamilies"))
self._initSpeechState()
# TODO - JD: Will this ever be the case??
self._isInitialSetup = \
not os.path.exists(_settingsManager.getPrefsDir())
self._initGUIState()
def _getACSSForVoiceType(self, voiceType):
"""Return the ACSS value for the the given voice type.
Arguments:
- voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
Returns the voice dictionary for the given voice type.
"""
if voiceType == DEFAULT:
voiceACSS = self.defaultVoice
elif voiceType == UPPERCASE:
voiceACSS = self.uppercaseVoice
elif voiceType == HYPERLINK:
voiceACSS = self.hyperlinkVoice
elif voiceType == SYSTEM:
voiceACSS = self.systemVoice
else:
voiceACSS = self.defaultVoice
return voiceACSS
def writeUserPreferences(self):
"""Write out the user's generic Orca preferences.
"""
pronunciationDict = self.getModelDict(self.pronunciationModel)
keyBindingsDict = self.getKeyBindingsModelDict(self.keyBindingsModel)
_settingsManager.saveSettings(self.prefsDict,
pronunciationDict,
keyBindingsDict)
def _getKeyValueForVoiceType(self, voiceType, key, useDefault=True):
"""Look for the value of the given key in the voice dictionary
for the given voice type.
Arguments:
- voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
- key: the key to look for in the voice dictionary.
- useDefault: if True, and the key isn't found for the given voice
type, the look for it in the default voice dictionary
as well.
Returns the value of the given key, or None if it's not set.
"""
if voiceType == DEFAULT:
voice = self.defaultVoice
elif voiceType == UPPERCASE:
voice = self.uppercaseVoice
if key not in voice:
if not useDefault:
return None
voice = self.defaultVoice
elif voiceType == HYPERLINK:
voice = self.hyperlinkVoice
if key not in voice:
if not useDefault:
return None
voice = self.defaultVoice
elif voiceType == SYSTEM:
voice = self.systemVoice
if key not in voice:
if not useDefault:
return None
voice = self.defaultVoice
else:
voice = self.defaultVoice
if key in voice:
return voice[key]
else:
return None
def _getFamilyNameForVoiceType(self, voiceType):
"""Gets the name of the voice family for the given voice type.
Arguments:
- voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
Returns the name of the voice family for the given voice type,
or None if not set.
"""
familyName = None
family = self._getKeyValueForVoiceType(voiceType, acss.ACSS.FAMILY)
if family and speechserver.VoiceFamily.NAME in family:
familyName = family[speechserver.VoiceFamily.NAME]
return familyName
def _setFamilyNameForVoiceType(self, voiceType, name, language, dialect):
"""Sets the name of the voice family for the given voice type.
Arguments:
- voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
- name: the name of the voice family to set.
- language: the locale of the voice family to set.
- dialect: the dialect of the voice family to set.
"""
family = self._getKeyValueForVoiceType(voiceType,
acss.ACSS.FAMILY,
False)
if family:
family[speechserver.VoiceFamily.NAME] = name
family[speechserver.VoiceFamily.LOCALE] = language
family[speechserver.VoiceFamily.DIALECT] = dialect
else:
voiceACSS = self._getACSSForVoiceType(voiceType)
voiceACSS[acss.ACSS.FAMILY] = {}
voiceACSS[acss.ACSS.FAMILY][speechserver.VoiceFamily.NAME] = name
voiceACSS[acss.ACSS.FAMILY][speechserver.VoiceFamily.LOCALE] = \
language
voiceACSS[acss.ACSS.FAMILY][speechserver.VoiceFamily.DIALECT] = dialect
#voiceACSS = self._getACSSForVoiceType(voiceType)
#settings.voices[voiceType] = voiceACSS
def _getRateForVoiceType(self, voiceType):
"""Gets the speaking rate value for the given voice type.
Arguments:
- voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
Returns the rate value for the given voice type, or None if
not set.
"""
return self._getKeyValueForVoiceType(voiceType, acss.ACSS.RATE)
def _setRateForVoiceType(self, voiceType, value):
"""Sets the speaking rate value for the given voice type.
Arguments:
- voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
- value: the rate value to set.
"""
voiceACSS = self._getACSSForVoiceType(voiceType)
voiceACSS[acss.ACSS.RATE] = value
#settings.voices[voiceType] = voiceACSS
def _getPitchForVoiceType(self, voiceType):
"""Gets the pitch value for the given voice type.
Arguments:
- voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
Returns the pitch value for the given voice type, or None if
not set.
"""
return self._getKeyValueForVoiceType(voiceType,
acss.ACSS.AVERAGE_PITCH)
def _setPitchForVoiceType(self, voiceType, value):
"""Sets the pitch value for the given voice type.
Arguments:
- voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
- value: the pitch value to set.
"""
voiceACSS = self._getACSSForVoiceType(voiceType)
voiceACSS[acss.ACSS.AVERAGE_PITCH] = value
#settings.voices[voiceType] = voiceACSS
def _getVolumeForVoiceType(self, voiceType):
"""Gets the volume (gain) value for the given voice type.
Arguments:
- voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
Returns the volume (gain) value for the given voice type, or
None if not set.
"""
return self._getKeyValueForVoiceType(voiceType, acss.ACSS.GAIN)
def _setVolumeForVoiceType(self, voiceType, value):
"""Sets the volume (gain) value for the given voice type.
Arguments:
- voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
- value: the volume (gain) value to set.
"""
voiceACSS = self._getACSSForVoiceType(voiceType)
voiceACSS[acss.ACSS.GAIN] = value
#settings.voices[voiceType] = voiceACSS
def _setVoiceSettingsForVoiceType(self, voiceType):
"""Sets the family, rate, pitch and volume GUI components based
on the given voice type.
Arguments:
- voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
"""
familyName = self._getFamilyNameForVoiceType(voiceType)
self._setSpeechFamiliesChoice(familyName)
rate = self._getRateForVoiceType(voiceType)
if rate != None:
self.get_widget("rateScale").set_value(rate)
else:
self.get_widget("rateScale").set_value(50.0)
pitch = self._getPitchForVoiceType(voiceType)
if pitch != None:
self.get_widget("pitchScale").set_value(pitch)
else:
self.get_widget("pitchScale").set_value(5.0)
volume = self._getVolumeForVoiceType(voiceType)
if volume != None:
self.get_widget("volumeScale").set_value(volume)
else:
self.get_widget("volumeScale").set_value(10.0)
def _setSpeechFamiliesChoice(self, familyName):
"""Sets the active item in the families ("Person:") combo box
to the given family name.
Arguments:
- families: the list of available voice families.
- familyName: the family name to use to set the active combo box item.
"""
if len(self.speechFamiliesChoices) == 0:
return
valueSet = False
i = 0
for family in self.speechFamiliesChoices:
name = family[speechserver.VoiceFamily.NAME]
if name == familyName:
self.get_widget("speechFamilies").set_active(i)
self.speechFamiliesChoice = self.speechFamiliesChoices[i]
valueSet = True
break
i += 1
if not valueSet:
debug.println(debug.LEVEL_FINEST,
"Could not find speech family match for %s" \
% familyName)
self.get_widget("speechFamilies").set_active(0)
self.speechFamiliesChoice = self.speechFamiliesChoices[0]
if valueSet:
self.selectedFamilyChoices[self.speechServersChoice] = i
def _setupFamilies(self):
"""Gets the list of voice families for the current speech server.
If there are families, get the information associated with
each voice family and add an entry for it to the families
GtkComboBox list.
"""
self.speechFamiliesModel.clear()
families = self.speechServersChoice.getVoiceFamilies()
self.speechFamiliesChoices = []
if len(families) == 0:
debug.println(debug.LEVEL_SEVERE, "Speech not available.")
debug.printStack(debug.LEVEL_FINEST)
self.speechFamiliesChoice = None
return
i = 0
for family in families:
name = family[speechserver.VoiceFamily.NAME] \
+ " (%s)" % family[speechserver.VoiceFamily.LOCALE]
self.speechFamiliesChoices.append(family)
self.speechFamiliesModel.append((i, name))
i += 1
# If user manually selected a family for the current speech server
# this choice it's restored. In other case the first family
# (usually the default one) is selected
#
selectedIndex = 0
if self.speechServersChoice in self.selectedFamilyChoices:
selectedIndex = self.selectedFamilyChoices[self.speechServersChoice]
self.get_widget("speechFamilies").set_active(selectedIndex)
# The family name will be selected as part of selecting the
# voice type. Whenever the families change, we'll reset the
# voice type selection to the first one ("Default").
#
comboBox = self.get_widget("voiceTypesCombo")
types = [guilabels.SPEECH_VOICE_TYPE_DEFAULT,
guilabels.SPEECH_VOICE_TYPE_UPPERCASE,
guilabels.SPEECH_VOICE_TYPE_HYPERLINK,
guilabels.SPEECH_VOICE_TYPE_SYSTEM]
self.populateComboBox(comboBox, types)
comboBox.set_active(DEFAULT)
voiceType = comboBox.get_active()
self._setVoiceSettingsForVoiceType(voiceType)
def _setSpeechServersChoice(self, serverInfo):
"""Sets the active item in the speech servers combo box to the
given server.
Arguments:
- serversChoices: the list of available speech servers.
- serverInfo: the speech server to use to set the active combo
box item.
"""
if len(self.speechServersChoices) == 0:
return
# We'll fallback to whatever we happen to be using in the event
# that this preference has never been set.
#
if not serverInfo:
serverInfo = speech.getInfo()
valueSet = False
i = 0
for server in self.speechServersChoices:
if serverInfo == server.getInfo():
self.get_widget("speechServers").set_active(i)
self.speechServersChoice = server
valueSet = True
break
i += 1
if not valueSet:
debug.println(debug.LEVEL_FINEST,
"Could not find speech server match for %s" \
% repr(serverInfo))
self.get_widget("speechServers").set_active(0)
self.speechServersChoice = self.speechServersChoices[0]
self._setupFamilies()
def _setupSpeechServers(self):
"""Gets the list of speech servers for the current speech factory.
If there are servers, get the information associated with each
speech server and add an entry for it to the speechServers
GtkComboBox list. Set the current choice to be the first item.
"""
self.speechServersModel.clear()
self.speechServersChoices = \
self.speechSystemsChoice.SpeechServer.getSpeechServers()
if len(self.speechServersChoices) == 0:
debug.println(debug.LEVEL_SEVERE, "Speech not available.")
debug.printStack(debug.LEVEL_FINEST)
self.speechServersChoice = None
self.speechFamiliesChoices = []
self.speechFamiliesChoice = None
return
i = 0
for server in self.speechServersChoices:
name = server.getInfo()[0]
self.speechServersModel.append((i, name))
i += 1
self._setSpeechServersChoice(self.prefsDict["speechServerInfo"])
debug.println(
debug.LEVEL_FINEST,
"orca_gui_prefs._setupSpeechServers: speechServersChoice: %s" \
% self.speechServersChoice.getInfo())
def _setSpeechSystemsChoice(self, systemName):
"""Set the active item in the speech systems combo box to the
given system name.
Arguments:
- factoryChoices: the list of available speech factories (systems).
- systemName: the speech system name to use to set the active combo
box item.
"""
systemName = systemName.strip("'")
if len(self.speechSystemsChoices) == 0:
self.speechSystemsChoice = None
return
valueSet = False
i = 0
for speechSystem in self.speechSystemsChoices:
name = speechSystem.__name__
if name.endswith(systemName):
self.get_widget("speechSystems").set_active(i)
self.speechSystemsChoice = self.speechSystemsChoices[i]
valueSet = True
break
i += 1
if not valueSet:
debug.println(debug.LEVEL_FINEST,
"Could not find speech system match for %s" \
% systemName)
self.get_widget("speechSystems").set_active(0)
self.speechSystemsChoice = self.speechSystemsChoices[0]
self._setupSpeechServers()
def _setupSpeechSystems(self, factories):
"""Sets up the speech systems combo box and sets the selection
to the preferred speech system.
Arguments:
-factories: the list of known speech factories (working or not)
"""
self.speechSystemsModel.clear()
self.workingFactories = []
for factory in factories:
try:
servers = factory.SpeechServer.getSpeechServers()
if len(servers):
self.workingFactories.append(factory)
except:
debug.printException(debug.LEVEL_FINEST)
self.speechSystemsChoices = []
if len(self.workingFactories) == 0:
debug.println(debug.LEVEL_SEVERE, "Speech not available.")
debug.printStack(debug.LEVEL_FINEST)
self.speechSystemsChoice = None
self.speechServersChoices = []
self.speechServersChoice = None
self.speechFamiliesChoices = []
self.speechFamiliesChoice = None
return
i = 0
for workingFactory in self.workingFactories:
self.speechSystemsChoices.append(workingFactory)
name = workingFactory.SpeechServer.getFactoryName()
self.speechSystemsModel.append((i, name))
i += 1
if self.prefsDict["speechServerFactory"]:
self._setSpeechSystemsChoice(self.prefsDict["speechServerFactory"])
else:
self.speechSystemsChoice = None
debug.println(
debug.LEVEL_FINEST,
"orca_gui_prefs._setupSpeechSystems: speechSystemsChoice: %s" \
% self.speechSystemsChoice)
def _initSpeechState(self):
"""Initialize the various speech components.
"""
voices = self.prefsDict["voices"]
self.defaultVoice = acss.ACSS(voices.get(settings.DEFAULT_VOICE))
self.uppercaseVoice = acss.ACSS(voices.get(settings.UPPERCASE_VOICE))
self.hyperlinkVoice = acss.ACSS(voices.get(settings.HYPERLINK_VOICE))
self.systemVoice = acss.ACSS(voices.get(settings.SYSTEM_VOICE))
# Just a note on general naming pattern:
#
# * = The name of the combobox
# *Model = the name of the comobox model
# *Choices = the Orca/speech python objects
# *Choice = a value from *Choices
#
# Where * = speechSystems, speechServers, speechFamilies
#
factories = speech.getSpeechServerFactories()
if len(factories) == 0 or not self.prefsDict.get('enableSpeech', True):
self.workingFactories = []
self.speechSystemsChoice = None
self.speechServersChoices = []
self.speechServersChoice = None
self.speechFamiliesChoices = []
self.speechFamiliesChoice = None
return
try:
speech.init()
except:
self.workingFactories = []
self.speechSystemsChoice = None
self.speechServersChoices = []
self.speechServersChoice = None
self.speechFamiliesChoices = []
self.speechFamiliesChoice = None
return
# This cascades into systems->servers->voice_type->families...
#
self.initializingSpeech = True
self._setupSpeechSystems(factories)
self.initializingSpeech = False
def _setSpokenTextAttributes(self, view, setAttributes,
state, moveToTop=False):
"""Given a set of spoken text attributes, update the model used by the
text attribute tree view.
Arguments:
- view: the text attribute tree view.
- setAttributes: the list of spoken text attributes to update.
- state: the state (True or False) that they all should be set to.
- moveToTop: if True, move these attributes to the top of the list.
"""
model = view.get_model()
view.set_model(None)
defScript = _scriptManager.getDefaultScript()
[attrList, attrDict] = \
defScript.utilities.stringToKeysAndDict(setAttributes)
[allAttrList, allAttrDict] = defScript.utilities.stringToKeysAndDict(
_settingsManager.getSetting('allTextAttributes'))
for i in range(0, len(attrList)):
for path in range(0, len(allAttrList)):
localizedKey = \
text_attribute_names.getTextAttributeName(attrList[i])
localizedValue = \
text_attribute_names.getTextAttributeName( \
attrDict[attrList[i]])
if localizedKey == model[path][NAME]:
thisIter = model.get_iter(path)
model.set_value(thisIter, NAME, localizedKey)
model.set_value(thisIter, IS_SPOKEN, state)
model.set_value(thisIter, VALUE, localizedValue)
if moveToTop:
thisIter = model.get_iter(path)
otherIter = model.get_iter(i)
model.move_before(thisIter, otherIter)
break
view.set_model(model)
def _setBrailledTextAttributes(self, view, setAttributes, state):
"""Given a set of brailled text attributes, update the model used
by the text attribute tree view.
Arguments:
- view: the text attribute tree view.
- setAttributes: the list of brailled text attributes to update.
- state: the state (True or False) that they all should be set to.
"""
model = view.get_model()
view.set_model(None)
defScript = _scriptManager.getDefaultScript()
[attrList, attrDict] = \
defScript.utilities.stringToKeysAndDict(setAttributes)
[allAttrList, allAttrDict] = defScript.utilities.stringToKeysAndDict(
_settingsManager.getSetting('allTextAttributes'))
for i in range(0, len(attrList)):
for path in range(0, len(allAttrList)):
localizedKey = \
text_attribute_names.getTextAttributeName(attrList[i])
if localizedKey == model[path][NAME]:
thisIter = model.get_iter(path)
model.set_value(thisIter, IS_BRAILLED, state)
break
view.set_model(model)
def _getAppNameForAttribute(self, attributeName):
"""Converts the given Atk attribute name into the application's
equivalent. This is necessary because an application or toolkit
(e.g. Gecko) might invent entirely new names for the same text
attributes.
Arguments:
- attribName: The name of the text attribute
Returns the application's equivalent name if found or attribName
otherwise.
"""
return attributeName
def _updateTextDictEntry(self):
"""The user has updated the text attribute list in some way. Update
the "enabledSpokenTextAttributes" and "enabledBrailledTextAttributes"
preference strings to reflect the current state of the corresponding
text attribute lists.
"""
model = self.getTextAttributesView.get_model()
spokenAttrStr = ""
brailledAttrStr = ""
noRows = model.iter_n_children(None)
for path in range(0, noRows):
localizedKey = model[path][NAME]
key = text_attribute_names.getTextAttributeKey(localizedKey)
# Convert the normalized, Atk attribute name back into what
# the app/toolkit uses.
#
key = self._getAppNameForAttribute(key)
localizedValue = model[path][VALUE]
value = text_attribute_names.getTextAttributeKey(localizedValue)
if model[path][IS_SPOKEN]:
spokenAttrStr += key + ":" + value + "; "
if model[path][IS_BRAILLED]:
brailledAttrStr += key + ":" + value + "; "
self.prefsDict["enabledSpokenTextAttributes"] = spokenAttrStr
self.prefsDict["enabledBrailledTextAttributes"] = brailledAttrStr
def contractedBrailleToggled(self, checkbox):
grid = self.get_widget('contractionTableGrid')
grid.set_sensitive(checkbox.get_active())
self.prefsDict["enableContractedBraille"] = checkbox.get_active()
def contractionTableComboChanged(self, combobox):
model = combobox.get_model()
myIter = combobox.get_active_iter()
self.prefsDict["brailleContractionTable"] = model[myIter][1]
def textAttributeSpokenToggled(self, cell, path, model):
"""The user has toggled the state of one of the text attribute
checkboxes to be spoken. Update our model to reflect this, then
update the "enabledSpokenTextAttributes" preference string.
Arguments:
- cell: the cell that changed.
- path: the path of that cell.
- model: the model that the cell is part of.
"""
thisIter = model.get_iter(path)
model.set(thisIter, IS_SPOKEN, not model[path][IS_SPOKEN])
self._updateTextDictEntry()
def textAttributeBrailledToggled(self, cell, path, model):
"""The user has toggled the state of one of the text attribute
checkboxes to be brailled. Update our model to reflect this,
then update the "enabledBrailledTextAttributes" preference string.
Arguments:
- cell: the cell that changed.
- path: the path of that cell.
- model: the model that the cell is part of.
"""
thisIter = model.get_iter(path)
model.set(thisIter, IS_BRAILLED, not model[path][IS_BRAILLED])
self._updateTextDictEntry()
def textAttrValueEdited(self, cell, path, new_text, model):
"""The user has edited the value of one of the text attributes.
Update our model to reflect this, then update the
"enabledSpokenTextAttributes" and "enabledBrailledTextAttributes"
preference strings.
Arguments:
- cell: the cell that changed.
- path: the path of that cell.
- new_text: the new text attribute value string.
- model: the model that the cell is part of.
"""
thisIter = model.get_iter(path)
model.set(thisIter, VALUE, new_text)
self._updateTextDictEntry()
def textAttrCursorChanged(self, widget):
"""Set the search column in the text attribute tree view
depending upon which column the user currently has the cursor in.
"""
[path, focusColumn] = self.getTextAttributesView.get_cursor()
if focusColumn:
noColumns = len(self.getTextAttributesView.get_columns())
for i in range(0, noColumns):
col = self.getTextAttributesView.get_column(i)
if focusColumn == col:
self.getTextAttributesView.set_search_column(i)
break
def _createTextAttributesTreeView(self):
"""Create the text attributes tree view. The view is the
textAttributesTreeView GtkTreeView widget. The view will consist
of a list containing three columns:
IS_SPOKEN - a checkbox whose state indicates whether this text
attribute will be spoken or not.
NAME - the text attribute name.
VALUE - if set, (and this attributes is enabled for speaking),
then this attribute will be spoken unless it equals
this value.
"""
self.getTextAttributesView = self.get_widget("textAttributesTreeView")
if self.getTextAttributesView.get_columns():
for column in self.getTextAttributesView.get_columns():
self.getTextAttributesView.remove_column(column)
model = Gtk.ListStore(GObject.TYPE_STRING,
GObject.TYPE_BOOLEAN,
GObject.TYPE_BOOLEAN,
GObject.TYPE_STRING)
# Initially setup the list store model based on the values of all
# the known text attributes.
#
defScript = _scriptManager.getDefaultScript()
[allAttrList, allAttrDict] = defScript.utilities.stringToKeysAndDict(
_settingsManager.getSetting('allTextAttributes'))
for i in range(0, len(allAttrList)):
thisIter = model.append()
localizedKey = \
text_attribute_names.getTextAttributeName(allAttrList[i])
localizedValue = \
text_attribute_names.getTextAttributeName( \
allAttrDict[allAttrList[i]])
model.set_value(thisIter, NAME, localizedKey)
model.set_value(thisIter, IS_SPOKEN, False)
model.set_value(thisIter, IS_BRAILLED, False)
model.set_value(thisIter, VALUE, localizedValue)
self.getTextAttributesView.set_model(model)
# Attribute Name column (NAME).
column = Gtk.TreeViewColumn(guilabels.TEXT_ATTRIBUTE_NAME)
column.set_min_width(250)
column.set_resizable(True)
renderer = Gtk.CellRendererText()
column.pack_end(renderer, True)
column.add_attribute(renderer, 'text', NAME)
self.getTextAttributesView.insert_column(column, 0)
# Attribute Speak column (IS_SPOKEN).
speakAttrColumnLabel = guilabels.PRESENTATION_SPEAK
column = Gtk.TreeViewColumn(speakAttrColumnLabel)
renderer = Gtk.CellRendererToggle()
column.pack_start(renderer, False)
column.add_attribute(renderer, 'active', IS_SPOKEN)
renderer.connect("toggled",
self.textAttributeSpokenToggled,
model)
self.getTextAttributesView.insert_column(column, 1)
column.clicked()
# Attribute Mark in Braille column (IS_BRAILLED).
markAttrColumnLabel = guilabels.PRESENTATION_MARK_IN_BRAILLE
column = Gtk.TreeViewColumn(markAttrColumnLabel)
renderer = Gtk.CellRendererToggle()
column.pack_start(renderer, False)
column.add_attribute(renderer, 'active', IS_BRAILLED)
renderer.connect("toggled",
self.textAttributeBrailledToggled,
model)
self.getTextAttributesView.insert_column(column, 2)
column.clicked()
# Attribute Value column (VALUE)
column = Gtk.TreeViewColumn(guilabels.PRESENTATION_PRESENT_UNLESS)
renderer = Gtk.CellRendererText()
renderer.set_property('editable', True)
column.pack_end(renderer, True)
column.add_attribute(renderer, 'text', VALUE)
renderer.connect("edited", self.textAttrValueEdited, model)
self.getTextAttributesView.insert_column(column, 4)
# Check all the enabled (spoken) text attributes.
#
self._setSpokenTextAttributes(
self.getTextAttributesView,
_settingsManager.getSetting('enabledSpokenTextAttributes'),
True, True)
# Check all the enabled (brailled) text attributes.
#
self._setBrailledTextAttributes(
self.getTextAttributesView,
_settingsManager.getSetting('enabledBrailledTextAttributes'),
True)
# Connect a handler for when the user changes columns within the
# view, so that we can adjust the search column for item lookups.
#
self.getTextAttributesView.connect("cursor_changed",
self.textAttrCursorChanged)
def pronActualValueEdited(self, cell, path, new_text, model):
"""The user has edited the value of one of the actual strings in
the pronunciation dictionary. Update our model to reflect this.
Arguments:
- cell: the cell that changed.
- path: the path of that cell.
- new_text: the new pronunciation dictionary actual string.
- model: the model that the cell is part of.
"""
thisIter = model.get_iter(path)
model.set(thisIter, ACTUAL, new_text)
def pronReplacementValueEdited(self, cell, path, new_text, model):
"""The user has edited the value of one of the replacement strings
in the pronunciation dictionary. Update our model to reflect this.
Arguments:
- cell: the cell that changed.
- path: the path of that cell.
- new_text: the new pronunciation dictionary replacement string.
- model: the model that the cell is part of.
"""
thisIter = model.get_iter(path)
model.set(thisIter, REPLACEMENT, new_text)
def pronunciationFocusChange(self, widget, event, isFocused):
"""Callback for the pronunciation tree's focus-{in,out}-event signal."""
orca_state.usePronunciationDictionary = not isFocused
def pronunciationCursorChanged(self, widget):
"""Set the search column in the pronunciation dictionary tree view
depending upon which column the user currently has the cursor in.
"""
[path, focusColumn] = self.pronunciationView.get_cursor()
if focusColumn:
noColumns = len(self.pronunciationView.get_columns())
for i in range(0, noColumns):
col = self.pronunciationView.get_column(i)
if focusColumn == col:
self.pronunciationView.set_search_column(i)
break
def _createPronunciationTreeView(self, pronunciations=None):
"""Create the pronunciation dictionary tree view. The view is the
pronunciationTreeView GtkTreeView widget. The view will consist
of a list containing two columns:
ACTUAL - the actual text string (word).
REPLACEMENT - the string that is used to pronounce that word.
Arguments:
- pronunciations: an optional dictionary used to get the
pronunciation from.
"""
self.pronunciationView = self.get_widget("pronunciationTreeView")
if self.pronunciationView.get_columns():
for column in self.pronunciationView.get_columns():
self.pronunciationView.remove_column(column)
model = Gtk.ListStore(GObject.TYPE_STRING,
GObject.TYPE_STRING)
# Initially setup the list store model based on the values of all
# existing entries in the pronunciation dictionary.
#
if pronunciations != None:
pronDict = pronunciations
else:
pronDict = pronunciation_dict.pronunciation_dict
for pronKey in sorted(pronDict.keys()):
thisIter = model.append()
try:
actual, replacement = pronDict[pronKey]
except:
# Try to do something sensible for the previous format of
# pronunciation dictionary entries. See bug #464754 for
# more details.
#
actual = pronKey
replacement = pronDict[pronKey]
model.set(thisIter,
ACTUAL, actual,
REPLACEMENT, replacement)
self.pronunciationView.set_model(model)
# Pronunciation Dictionary actual string (word) column (ACTUAL).
column = Gtk.TreeViewColumn(guilabels.DICTIONARY_ACTUAL_STRING)
column.set_min_width(250)
column.set_resizable(True)
renderer = Gtk.CellRendererText()
renderer.set_property('editable', True)
column.pack_end(renderer, True)
column.add_attribute(renderer, 'text', ACTUAL)
renderer.connect("edited", self.pronActualValueEdited, model)
self.pronunciationView.insert_column(column, 0)
# Pronunciation Dictionary replacement string column (REPLACEMENT)
column = Gtk.TreeViewColumn(guilabels.DICTIONARY_REPLACEMENT_STRING)
renderer = Gtk.CellRendererText()
renderer.set_property('editable', True)
column.pack_end(renderer, True)
column.add_attribute(renderer, 'text', REPLACEMENT)
renderer.connect("edited", self.pronReplacementValueEdited, model)
self.pronunciationView.insert_column(column, 1)
self.pronunciationModel = model
# Connect a handler for when the user changes columns within the
# view, so that we can adjust the search column for item lookups.
#
self.pronunciationView.connect("cursor_changed",
self.pronunciationCursorChanged)
self.pronunciationView.connect(
"focus_in_event", self.pronunciationFocusChange, True)
self.pronunciationView.connect(
"focus_out_event", self.pronunciationFocusChange, False)
def _initGUIState(self):
"""Adjust the settings of the various components on the
configuration GUI depending upon the users preferences.
"""
prefs = self.prefsDict
# Speech pane.
#
enable = prefs["enableSpeech"]
self.get_widget("speechSupportCheckButton").set_active(enable)
self.get_widget("speechOptionsGrid").set_sensitive(enable)
enable = prefs["onlySpeakDisplayedText"]
self.get_widget("onlySpeakDisplayedTextCheckButton").set_active(enable)
self.get_widget("contextOptionsGrid").set_sensitive(not enable)
if prefs["verbalizePunctuationStyle"] == \
settings.PUNCTUATION_STYLE_NONE:
self.get_widget("noneButton").set_active(True)
elif prefs["verbalizePunctuationStyle"] == \
settings.PUNCTUATION_STYLE_SOME:
self.get_widget("someButton").set_active(True)
elif prefs["verbalizePunctuationStyle"] == \
settings.PUNCTUATION_STYLE_MOST:
self.get_widget("mostButton").set_active(True)
else:
self.get_widget("allButton").set_active(True)
if prefs["speechVerbosityLevel"] == settings.VERBOSITY_LEVEL_BRIEF:
self.get_widget("speechBriefButton").set_active(True)
else:
self.get_widget("speechVerboseButton").set_active(True)
if prefs["readTableCellRow"]:
self.get_widget("rowSpeechButton").set_active(True)
else:
self.get_widget("cellSpeechButton").set_active(True)
self.get_widget("onlySpeakDisplayedTextCheckButton").set_active(
prefs["onlySpeakDisplayedText"])
self.get_widget("enableSpeechIndentationCheckButton").set_active(\
prefs["enableSpeechIndentation"])
self.get_widget("speakBlankLinesCheckButton").set_active(\
prefs["speakBlankLines"])
self.get_widget("speakMultiCaseStringsAsWordsCheckButton").set_active(\
prefs["speakMultiCaseStringsAsWords"])
self.get_widget("enableTutorialMessagesCheckButton").set_active(\
prefs["enableTutorialMessages"])
self.get_widget("enablePauseBreaksCheckButton").set_active(\
prefs["enablePauseBreaks"])
self.get_widget("enablePositionSpeakingCheckButton").set_active(\
prefs["enablePositionSpeaking"])
self.get_widget("enableMnemonicSpeakingCheckButton").set_active(\
prefs["enableMnemonicSpeaking"])
combobox = self.get_widget("sayAllStyle")
self.populateComboBox(combobox, [guilabels.SAY_ALL_STYLE_LINE,
guilabels.SAY_ALL_STYLE_SENTENCE])
combobox.set_active(prefs["sayAllStyle"])
combobox2 = self.get_widget("dateFormatCombo")
sdtime = time.strftime
ltime = time.localtime
self.populateComboBox(combobox2,
[sdtime(settings.DATE_FORMAT_LOCALE, ltime()),
sdtime(settings.DATE_FORMAT_NUMBERS_DM, ltime()),
sdtime(settings.DATE_FORMAT_NUMBERS_MD, ltime()),
sdtime(settings.DATE_FORMAT_NUMBERS_DMY, ltime()),
sdtime(settings.DATE_FORMAT_NUMBERS_MDY, ltime()),
sdtime(settings.DATE_FORMAT_NUMBERS_YMD, ltime()),
sdtime(settings.DATE_FORMAT_FULL_DM, ltime()),
sdtime(settings.DATE_FORMAT_FULL_MD, ltime()),
sdtime(settings.DATE_FORMAT_FULL_DMY, ltime()),
sdtime(settings.DATE_FORMAT_FULL_MDY, ltime()),
sdtime(settings.DATE_FORMAT_FULL_YMD, ltime()),
sdtime(settings.DATE_FORMAT_ABBREVIATED_DM, ltime()),
sdtime(settings.DATE_FORMAT_ABBREVIATED_MD, ltime()),
sdtime(settings.DATE_FORMAT_ABBREVIATED_DMY, ltime()),
sdtime(settings.DATE_FORMAT_ABBREVIATED_MDY, ltime()),
sdtime(settings.DATE_FORMAT_ABBREVIATED_YMD, ltime())
])
indexdate = DATE_FORMAT_LOCALE
dateFormat = self.prefsDict["presentDateFormat"]
if dateFormat == settings.DATE_FORMAT_LOCALE:
indexdate = DATE_FORMAT_LOCALE
elif dateFormat == settings.DATE_FORMAT_NUMBERS_DM:
indexdate = DATE_FORMAT_NUMBERS_DM
elif dateFormat == settings.DATE_FORMAT_NUMBERS_MD:
indexdate = DATE_FORMAT_NUMBERS_MD
elif dateFormat == settings.DATE_FORMAT_NUMBERS_DMY:
indexdate = DATE_FORMAT_NUMBERS_DMY
elif dateFormat == settings.DATE_FORMAT_NUMBERS_MDY:
indexdate = DATE_FORMAT_NUMBERS_MDY
elif dateFormat == settings.DATE_FORMAT_NUMBERS_YMD:
indexdate = DATE_FORMAT_NUMBERS_YMD
elif dateFormat == settings.DATE_FORMAT_FULL_DM:
indexdate = DATE_FORMAT_FULL_DM
elif dateFormat == settings.DATE_FORMAT_FULL_MD:
indexdate = DATE_FORMAT_FULL_MD
elif dateFormat == settings.DATE_FORMAT_FULL_DMY:
indexdate = DATE_FORMAT_FULL_DMY
elif dateFormat == settings.DATE_FORMAT_FULL_MDY:
indexdate = DATE_FORMAT_FULL_MDY
elif dateFormat == settings.DATE_FORMAT_FULL_YMD:
indexdate = DATE_FORMAT_FULL_YMD
elif dateFormat == settings.DATE_FORMAT_ABBREVIATED_DM:
indexdate = DATE_FORMAT_ABBREVIATED_DM
elif dateFormat == settings.DATE_FORMAT_ABBREVIATED_MD:
indexdate = DATE_FORMAT_ABBREVIATED_MD
elif dateFormat == settings.DATE_FORMAT_ABBREVIATED_DMY:
indexdate = DATE_FORMAT_ABBREVIATED_DMY
elif dateFormat == settings.DATE_FORMAT_ABBREVIATED_MDY:
indexdate = DATE_FORMAT_ABBREVIATED_MDY
elif dateFormat == settings.DATE_FORMAT_ABBREVIATED_YMD:
indexdate = DATE_FORMAT_ABBREVIATED_YMD
combobox2.set_active (indexdate)
combobox3 = self.get_widget("timeFormatCombo")
self.populateComboBox(combobox3,
[sdtime(settings.TIME_FORMAT_LOCALE, ltime()),
sdtime(settings.TIME_FORMAT_24_HMS, ltime()),
sdtime(settings.TIME_FORMAT_24_HMS_WITH_WORDS, ltime()),
sdtime(settings.TIME_FORMAT_24_HM, ltime()),
sdtime(settings.TIME_FORMAT_24_HM_WITH_WORDS, ltime())])
indextime = TIME_FORMAT_LOCALE
timeFormat = self.prefsDict["presentTimeFormat"]
if timeFormat == settings.TIME_FORMAT_LOCALE:
indextime = TIME_FORMAT_LOCALE
elif timeFormat == settings.TIME_FORMAT_24_HMS:
indextime = TIME_FORMAT_24_HMS
elif timeFormat == settings.TIME_FORMAT_24_HMS_WITH_WORDS:
indextime = TIME_FORMAT_24_HMS_WITH_WORDS
elif timeFormat == settings.TIME_FORMAT_24_HM:
indextime = TIME_FORMAT_24_HM
elif timeFormat == settings.TIME_FORMAT_24_HM_WITH_WORDS:
indextime = TIME_FORMAT_24_HM_WITH_WORDS
combobox3.set_active (indextime)
# Set the sensitivity of the "Update Interval" items, depending
# upon whether the "Speak progress bar updates" checkbox is checked.
#
enable = prefs["enableProgressBarUpdates"]
self.get_widget("speechProgressBarCheckButton").set_active(enable)
self.get_widget("progressBarUpdatesOptionsGrid").set_sensitive(enable)
interval = prefs["progressBarUpdateInterval"]
self.get_widget("speakProgressBarSpinButton").set_value(interval)
comboBox = self.get_widget("progressBarVerbosity")
levels = [guilabels.PROGRESS_BAR_ALL,
guilabels.PROGRESS_BAR_APPLICATION,
guilabels.PROGRESS_BAR_WINDOW]
self.populateComboBox(comboBox, levels)
comboBox.set_active(prefs["progressBarVerbosity"])
enable = prefs["enableMouseReview"]
self.get_widget("enableMouseReviewCheckButton").set_active(enable)
# Braille pane.
#
self.get_widget("enableBrailleCheckButton").set_active( \
prefs["enableBraille"])
state = prefs["brailleRolenameStyle"] == \
settings.BRAILLE_ROLENAME_STYLE_SHORT
self.get_widget("abbrevRolenames").set_active(state)
self.get_widget("disableBrailleEOLCheckButton").set_active(
prefs["disableBrailleEOL"])
if louis is None:
self.get_widget( \
"contractedBrailleCheckButton").set_sensitive(False)
else:
self.get_widget("contractedBrailleCheckButton").set_active( \
prefs["enableContractedBraille"])
# Set up contraction table combo box and set it to the
# currently used one.
#
tablesCombo = self.get_widget("contractionTableCombo")
tableDict = braille.listTables()
selectedTableIter = None
selectedTable = prefs["brailleContractionTable"] or \
braille.getDefaultTable()
if tableDict:
tablesModel = Gtk.ListStore(str, str)
names = sorted(tableDict.keys())
for name in names:
fname = tableDict[name]
it = tablesModel.append([name, fname])
if os.path.join(braille.tablesdir, fname) == \
selectedTable:
selectedTableIter = it
cell = self.planeCellRendererText
tablesCombo.clear()
tablesCombo.pack_start(cell, True)
tablesCombo.add_attribute(cell, 'text', 0)
tablesCombo.set_model(tablesModel)
if selectedTableIter:
tablesCombo.set_active_iter(selectedTableIter)
else:
tablesCombo.set_active(0)
else:
tablesCombo.set_sensitive(False)
if prefs["brailleVerbosityLevel"] == settings.VERBOSITY_LEVEL_BRIEF:
self.get_widget("brailleBriefButton").set_active(True)
else:
self.get_widget("brailleVerboseButton").set_active(True)
selectionIndicator = prefs["brailleSelectorIndicator"]
if selectionIndicator == settings.BRAILLE_SEL_7:
self.get_widget("brailleSelection7Button").set_active(True)
elif selectionIndicator == settings.BRAILLE_SEL_8:
self.get_widget("brailleSelection8Button").set_active(True)
elif selectionIndicator == settings.BRAILLE_SEL_BOTH:
self.get_widget("brailleSelectionBothButton").set_active(True)
else:
self.get_widget("brailleSelectionNoneButton").set_active(True)
linkIndicator = prefs["brailleLinkIndicator"]
if linkIndicator == settings.BRAILLE_LINK_7:
self.get_widget("brailleLink7Button").set_active(True)
elif linkIndicator == settings.BRAILLE_LINK_8:
self.get_widget("brailleLink8Button").set_active(True)
elif linkIndicator == settings.BRAILLE_LINK_BOTH:
self.get_widget("brailleLinkBothButton").set_active(True)
else:
self.get_widget("brailleLinkNoneButton").set_active(True)
# Key Echo pane.
#
self.get_widget("keyEchoCheckButton").set_active( \
prefs["enableKeyEcho"])
self.get_widget("enablePrintableKeysCheckButton").set_active( \
prefs["enablePrintableKeys"])
self.get_widget("enableModifierKeysCheckButton").set_active( \
prefs["enableModifierKeys"])
self.get_widget("enableFunctionKeysCheckButton").set_active( \
prefs["enableFunctionKeys"])
self.get_widget("enableActionKeysCheckButton").set_active( \
prefs["enableActionKeys"])
self.get_widget("enableNavigationKeysCheckButton").set_active( \
prefs["enableNavigationKeys"])
self.get_widget("enableDiacriticalKeysCheckButton").set_active( \
prefs["enableDiacriticalKeys"])
self.get_widget("enableEchoByCharacterCheckButton").set_active( \
prefs["enableEchoByCharacter"])
self.get_widget("enableEchoByWordCheckButton").set_active( \
prefs["enableEchoByWord"])
self.get_widget("enableEchoBySentenceCheckButton").set_active( \
prefs["enableEchoBySentence"])
# Text attributes pane.
#
self._createTextAttributesTreeView()
brailleIndicator = prefs["textAttributesBrailleIndicator"]
if brailleIndicator == settings.TEXT_ATTR_BRAILLE_7:
self.get_widget("textBraille7Button").set_active(True)
elif brailleIndicator == settings.TEXT_ATTR_BRAILLE_8:
self.get_widget("textBraille8Button").set_active(True)
elif brailleIndicator == settings.TEXT_ATTR_BRAILLE_BOTH:
self.get_widget("textBrailleBothButton").set_active(True)
else:
self.get_widget("textBrailleNoneButton").set_active(True)
# Pronunciation dictionary pane.
#
_profile = self.prefsDict.get('activeProfile')[1]
pronunciationsDict = _settingsManager.getPronunciations(_profile)
self._createPronunciationTreeView(pronunciationsDict)
# General pane.
#
self.get_widget("presentToolTipsCheckButton").set_active(
prefs["presentToolTips"])
if prefs["keyboardLayout"] == settings.GENERAL_KEYBOARD_LAYOUT_DESKTOP:
self.get_widget("generalDesktopButton").set_active(True)
else:
self.get_widget("generalLaptopButton").set_active(True)
# Orca User Profiles
#
self.profilesCombo = self.get_widget('availableProfilesComboBox1')
self.startingProfileCombo = self.get_widget('availableProfilesComboBox2')
self.profilesComboModel = self.get_widget('model9')
self.__initProfileCombo()
def __initProfileCombo(self):
"""Adding available profiles and setting active as the active one"""
availableProfiles = self.__getAvailableProfiles()
self.profilesComboModel.clear()
defaultValue = ['Default', 'default']
if not len(availableProfiles):
self.profilesComboModel.append(defaultValue)
else:
for profile in availableProfiles:
self.profilesComboModel.append(profile)
activeProfile = self.prefsDict.get('activeProfile') or defaultValue
startingProfile = self.prefsDict.get('startingProfile') or defaultValue
activeProfileIter = self.getComboBoxIndex(self.profilesCombo,
activeProfile[0])
startingProfileIter = self.getComboBoxIndex(self.startingProfileCombo,
startingProfile[0])
self.profilesCombo.set_active(activeProfileIter)
self.startingProfileCombo.set_active(startingProfileIter)
def __getAvailableProfiles(self):
"""Get available user profiles."""
return _settingsManager.availableProfiles()
def _updateOrcaModifier(self):
combobox = self.get_widget("orcaModifierComboBox")
keystring = ", ".join(self.prefsDict["orcaModifierKeys"])
combobox.set_active(self.getComboBoxIndex(combobox, keystring))
def populateComboBox(self, combobox, items):
"""Populates the combobox with the items provided.
Arguments:
- combobox: the GtkComboBox to populate
- items: the list of strings with which to populate it
"""
model = Gtk.ListStore(str)
for item in items:
model.append([item])
combobox.set_model(model)
def getComboBoxIndex(self, combobox, searchStr, col=0):
""" For each of the entries in the given combo box, look for searchStr.
Return the index of the entry if searchStr is found.
Arguments:
- combobox: the GtkComboBox to search.
- searchStr: the string to search for.
Returns the index of the first entry in combobox with searchStr, or
0 if not found.
"""
model = combobox.get_model()
myiter = model.get_iter_first()
for i in range(0, len(model)):
name = model.get_value(myiter, col)
if name == searchStr:
return i
myiter = model.iter_next(myiter)
return 0
def getComboBoxList(self, combobox):
"""Get the list of values from the active combox
"""
active = combobox.get_active()
model = combobox.get_model()
activeIter = model.get_iter(active)
activeLabel = model.get_value(activeIter, 0)
activeName = model.get_value(activeIter, 1)
return [activeLabel, activeName]
def getKeyBindingsModelDict(self, model, modifiedOnly=True):
modelDict = {}
node = model.get_iter_first()
while node:
child = model.iter_children(node)
while child:
key, modified = model.get(child, HANDLER, MODIF)
if modified or not modifiedOnly:
value = []
value.append(model.get(
child, KEY1, MOD_MASK1, MOD_USED1, CLICK_COUNT1))
modelDict[key] = value
child = model.iter_next(child)
node = model.iter_next(node)
return modelDict
def getModelDict(self, model):
"""Get the list of values from a list[str,str] model
"""
pronunciation_dict.pronunciation_dict = {}
currentIter = model.get_iter_first()
while currentIter is not None:
key, value = model.get(currentIter, ACTUAL, REPLACEMENT)
if key and value:
pronunciation_dict.setPronunciation(key, value)
currentIter = model.iter_next(currentIter)
modelDict = pronunciation_dict.pronunciation_dict
return modelDict
def showGUI(self):
"""Show the Orca configuration GUI window. This assumes that
the GUI has already been created.
"""
orcaSetupWindow = self.get_widget("orcaSetupWindow")
accelGroup = Gtk.AccelGroup()
orcaSetupWindow.add_accel_group(accelGroup)
helpButton = self.get_widget("helpButton")
(keyVal, modifierMask) = Gtk.accelerator_parse("F1")
helpButton.add_accelerator("clicked",
accelGroup,
keyVal,
modifierMask,
0)
ts = orca_state.lastInputEventTimestamp
if ts == 0:
ts = Gtk.get_current_event_time()
orcaSetupWindow.present_with_time(ts)
# We always want to re-order the text attributes page so that enabled
# items are consistently at the top.
#
self._setSpokenTextAttributes(
self.getTextAttributesView,
_settingsManager.getSetting('enabledSpokenTextAttributes'),
True, True)
orcaSetupWindow.show()
def _initComboBox(self, combobox):
"""Initialize the given combo box to take a list of int/str pairs.
Arguments:
- combobox: the GtkComboBox to initialize.
"""
cell = Gtk.CellRendererText()
combobox.pack_start(cell, True)
# We only want to display one column; not two.
#
try:
columnToDisplay = combobox.get_cells()[0]
combobox.add_attribute(columnToDisplay, 'text', 1)
except:
combobox.add_attribute(cell, 'text', 1)
model = Gtk.ListStore(int, str)
combobox.set_model(model)
# Force the display comboboxes to be left aligned.
#
if isinstance(combobox, Gtk.ComboBoxText):
size = combobox.size_request()
cell.set_fixed_size(size[0] - 29, -1)
return model
def _setKeyEchoItems(self):
"""[In]sensitize the checkboxes for the various types of key echo,
depending upon whether the value of the key echo check button is set.
"""
enable = self.get_widget("keyEchoCheckButton").get_active()
self.get_widget("enablePrintableKeysCheckButton").set_sensitive(enable)
self.get_widget("enableModifierKeysCheckButton").set_sensitive(enable)
self.get_widget("enableFunctionKeysCheckButton").set_sensitive(enable)
self.get_widget("enableActionKeysCheckButton").set_sensitive(enable)
self.get_widget("enableNavigationKeysCheckButton").set_sensitive(enable)
self.get_widget("enableDiacriticalKeysCheckButton").set_sensitive( \
enable)
def _presentMessage(self, text, interrupt=False):
"""If the text field is not None, presents the given text, optionally
interrupting anything currently being spoken.
Arguments:
- text: the text to present
- interrupt: if True, interrupt any speech currently being spoken
"""
defScript = _scriptManager.getDefaultScript()
defScript.speakMessage(text, interrupt=interrupt)
try:
defScript.displayBrailleMessage(text, flashTime=-1)
except:
pass
def _createNode(self, appName):
"""Create a new root node in the TreeStore model with the name of the
application.
Arguments:
- appName: the name of the TreeStore Node (the same of the application)
"""
model = self.keyBindingsModel
myiter = model.append(None)
model.set_value(myiter, DESCRIP, appName)
model.set_value(myiter, MODIF, False)
return myiter
def _getIterOf(self, appName):
"""Returns the Gtk.TreeIter of the TreeStore model
that matches the application name passed as argument
Arguments:
- appName: a string with the name of the application of the node wanted
it's the same that the field DESCRIP of the model treeStore
"""
model = self.keyBindingsModel
for row in model:
if ((model.iter_depth(row.iter) == 0) \
and (row[DESCRIP] == appName)):
return row.iter
return None
def _clickCountToString(self, clickCount):
"""Given a numeric clickCount, returns a string for inclusion
in the list of keybindings.
Argument:
- clickCount: the number of clicks associated with the keybinding.
"""
clickCountString = ""
if clickCount == 2:
clickCountString = " (%s)" % guilabels.CLICK_COUNT_DOUBLE
elif clickCount == 3:
clickCountString = " (%s)" % guilabels.CLICK_COUNT_TRIPLE
return clickCountString
def _insertRow(self, handl, kb, parent=None, modif=False):
"""Appends a new row with the new keybinding data to the treeview
Arguments:
- handl: the name of the handler associated to the keyBinding
- kb: the new keybinding.
- parent: the parent node of the treeview, where to append the kb
- modif: whether to check the modified field or not.
Returns a Gtk.TreeIter pointing at the new row.
"""
model = self.keyBindingsModel
if parent == None:
parent = self._getIterOf(guilabels.KB_GROUP_DEFAULT)
if parent != None:
myiter = model.append(parent)
if not kb.keysymstring:
text = None
else:
clickCount = self._clickCountToString(kb.click_count)
modifierNames = keybindings.getModifierNames(kb.modifiers)
keysymstring = kb.keysymstring
text = keybindings.getModifierNames(kb.modifiers) \
+ keysymstring \
+ clickCount
model.set_value(myiter, HANDLER, handl)
model.set_value(myiter, DESCRIP, kb.handler.description)
model.set_value(myiter, MOD_MASK1, str(kb.modifier_mask))
model.set_value(myiter, MOD_USED1, str(kb.modifiers))
model.set_value(myiter, KEY1, kb.keysymstring)
model.set_value(myiter, CLICK_COUNT1, str(kb.click_count))
if text != None:
model.set_value(myiter, OLDTEXT1, text)
model.set_value(myiter, TEXT1, text)
model.set_value(myiter, MODIF, modif)
model.set_value(myiter, EDITABLE, True)
return myiter
else:
return None
def _insertRowBraille(self, handl, com, inputEvHand,
parent=None, modif=False):
"""Appends a new row with the new braille binding data to the treeview
Arguments:
- handl: the name of the handler associated to the brailleBinding
- com: the BrlTTY command
- inputEvHand: the inputEventHandler with the new brailleBinding
- parent: the parent node of the treeview, where to append the kb
- modif: whether to check the modified field or not.
Returns a Gtk.TreeIter pointing at the new row.
"""
model = self.keyBindingsModel
if parent == None:
parent = self._getIterOf(guilabels.KB_GROUP_BRAILLE)
if parent != None:
myiter = model.append(parent)
model.set_value(myiter, HANDLER, handl)
model.set_value(myiter, DESCRIP, inputEvHand.description)
model.set_value(myiter, KEY1, str(com))
model.set_value(myiter, TEXT1, braille.command_name[com])
model.set_value(myiter, MODIF, modif)
model.set_value(myiter, EDITABLE, False)
return myiter
else:
return None
def _markModified(self):
""" Mark as modified the user custom key bindings:
"""
try:
defScript = _scriptManager.getDefaultScript()
defScript.setupInputEventHandlers()
keyBinds = keybindings.KeyBindings()
keyBinds = settings.overrideKeyBindings(defScript, keyBinds)
keyBind = keybindings.KeyBinding(None, None, None, None)
treeModel = self.keyBindingsModel
myiter = treeModel.get_iter_first()
while myiter != None:
iterChild = treeModel.iter_children(myiter)
while iterChild != None:
descrip = treeModel.get_value(iterChild, DESCRIP)
keyBind.handler = \
input_event.InputEventHandler(None, descrip)
if keyBinds.hasKeyBinding(keyBind,
typeOfSearch="description"):
treeModel.set_value(iterChild, MODIF, True)
iterChild = treeModel.iter_next(iterChild)
myiter = treeModel.iter_next(myiter)
except:
debug.printException(debug.LEVEL_SEVERE)
def _populateKeyBindings(self, clearModel=True):
"""Fills the TreeView with the list of Orca keybindings
Arguments:
- clearModel: if True, initially clear out the key bindings model.
"""
self.keyBindView.set_model(None)
self.keyBindView.set_headers_visible(False)
self.keyBindView.hide()
if clearModel:
self.keyBindingsModel.clear()
self.kbindings = None
iterOrca = self._getIterOf(guilabels.KB_GROUP_DEFAULT) \
or self._createNode(guilabels.KB_GROUP_DEFAULT)
iterUnbound = self._getIterOf(guilabels.KB_GROUP_UNBOUND) \
or self._createNode(guilabels.KB_GROUP_UNBOUND)
defScript = _scriptManager.getDefaultScript()
# If we are in the app-specific preferences, we already have
# populated our tree with bindings. Otherwise, we need to
# start from scratch.
#
if not self.kbindings:
self.kbindings = keybindings.KeyBindings()
defScript.setupInputEventHandlers()
self.defKeyBindings = defScript.getKeyBindings()
for kb in self.defKeyBindings.keyBindings:
if not self.kbindings.hasKeyBinding(kb, "strict"):
handl = defScript.getInputEventHandlerKey(kb.handler)
if kb.keysymstring:
self._insertRow(handl, kb, iterOrca)
else:
self._insertRow(handl, kb, iterUnbound)
self.kbindings.add(kb)
if not self.keyBindingsModel.iter_has_child(iterUnbound):
self.keyBindingsModel.remove(iterUnbound)
self._updateOrcaModifier()
self._markModified()
iterBB = self._createNode(guilabels.KB_GROUP_BRAILLE)
self.bbindings = defScript.getBrailleBindings()
for com, inputEvHand in list(self.bbindings.items()):
handl = defScript.getInputEventHandlerKey(inputEvHand)
self._insertRowBraille(handl, com, inputEvHand, iterBB)
self.keyBindView.set_model(self.keyBindingsModel)
self.keyBindView.set_headers_visible(True)
self.keyBindView.expand_all()
self.keyBindingsModel.set_sort_column_id(OLDTEXT1, Gtk.SortType.ASCENDING)
self.keyBindView.show()
# Keep track of new/unbound keybindings that have yet to be applied.
#
self.pendingKeyBindings = {}
def _cleanupSpeechServers(self):
"""Remove unwanted factories and drivers for the current active
factory, when the user dismisses the Orca Preferences dialog."""
for workingFactory in self.workingFactories:
if not (workingFactory == self.speechSystemsChoice):
workingFactory.SpeechServer.shutdownActiveServers()
else:
servers = workingFactory.SpeechServer.getSpeechServers()
for server in servers:
if not (server == self.speechServersChoice):
server.shutdown()
def speechSupportChecked(self, widget):
"""Signal handler for the "toggled" signal for the
speechSupportCheckButton GtkCheckButton widget. The user has
[un]checked the 'Enable Speech' checkbox. Set the 'enableSpeech'
preference to the new value. Set the rest of the speech pane items
[in]sensensitive depending upon whether this checkbox is checked.
Arguments:
- widget: the component that generated the signal.
"""
enable = widget.get_active()
self.prefsDict["enableSpeech"] = enable
self.get_widget("speechOptionsGrid").set_sensitive(enable)
def onlySpeakDisplayedTextToggled(self, widget):
"""Signal handler for the "toggled" signal for the GtkCheckButton
onlySpeakDisplayedText. In addition to updating the preferences,
set the sensitivity of the contextOptionsGrid.
Arguments:
- widget: the component that generated the signal.
"""
enable = widget.get_active()
self.prefsDict["onlySpeakDisplayedText"] = enable
self.get_widget("contextOptionsGrid").set_sensitive(not enable)
def speechSystemsChanged(self, widget):
"""Signal handler for the "changed" signal for the speechSystems
GtkComboBox widget. The user has selected a different speech
system. Clear the existing list of speech servers, and setup
a new list of speech servers based on the new choice. Setup a
new list of voices for the first speech server in the list.
Arguments:
- widget: the component that generated the signal.
"""
if self.initializingSpeech:
return
selectedIndex = widget.get_active()
self.speechSystemsChoice = self.speechSystemsChoices[selectedIndex]
self._setupSpeechServers()
def speechServersChanged(self, widget):
"""Signal handler for the "changed" signal for the speechServers
GtkComboBox widget. The user has selected a different speech
server. Clear the existing list of voices, and setup a new
list of voices based on the new choice.
Arguments:
- widget: the component that generated the signal.
"""
if self.initializingSpeech:
return
selectedIndex = widget.get_active()
self.speechServersChoice = self.speechServersChoices[selectedIndex]
# Whenever the speech servers change, we need to make sure we
# clear whatever family was in use by the current voice types.
# Otherwise, we can end up with family names from one server
# bleeding over (e.g., "Paul" from Fonix ends up getting in
# the "Default" voice type after we switch to eSpeak).
#
try:
del self.defaultVoice[acss.ACSS.FAMILY]
del self.uppercaseVoice[acss.ACSS.FAMILY]
del self.hyperlinkVoice[acss.ACSS.FAMILY]
del self.systemVoice[acss.ACSS.FAMILY]
except:
pass
self._setupFamilies()
def speechFamiliesChanged(self, widget):
"""Signal handler for the "value_changed" signal for the families
GtkComboBox widget. The user has selected a different voice
family. Save the new voice family name based on the new choice.
Arguments:
- widget: the component that generated the signal.
"""
if self.initializingSpeech:
return
selectedIndex = widget.get_active()
try:
family = self.speechFamiliesChoices[selectedIndex]
name = family[speechserver.VoiceFamily.NAME]
language = family[speechserver.VoiceFamily.LOCALE]
dialect = family[speechserver.VoiceFamily.DIALECT]
voiceType = self.get_widget("voiceTypesCombo").get_active()
self._setFamilyNameForVoiceType(voiceType, name, language, dialect)
except:
debug.printException(debug.LEVEL_SEVERE)
# Remember the last family manually selected by the user for the
# current speech server.
#
if not selectedIndex == -1:
self.selectedFamilyChoices[self.speechServersChoice] = selectedIndex
def voiceTypesChanged(self, widget):
"""Signal handler for the "changed" signal for the voiceTypes
GtkComboBox widget. The user has selected a different voice
type. Setup the new family, rate, pitch and volume component
values based on the new choice.
Arguments:
- widget: the component that generated the signal.
"""
if self.initializingSpeech:
return
voiceType = widget.get_active()
self._setVoiceSettingsForVoiceType(voiceType)
def rateValueChanged(self, widget):
"""Signal handler for the "value_changed" signal for the rateScale
GtkScale widget. The user has changed the current rate value.
Save the new rate value based on the currently selected voice
type.
Arguments:
- widget: the component that generated the signal.
"""
rate = widget.get_value()
voiceType = self.get_widget("voiceTypesCombo").get_active()
self._setRateForVoiceType(voiceType, rate)
voices = _settingsManager.getSetting('voices')
voices[settings.DEFAULT_VOICE][acss.ACSS.RATE] = rate
_settingsManager.setSetting('voices', voices)
def pitchValueChanged(self, widget):
"""Signal handler for the "value_changed" signal for the pitchScale
GtkScale widget. The user has changed the current pitch value.
Save the new pitch value based on the currently selected voice
type.
Arguments:
- widget: the component that generated the signal.
"""
pitch = widget.get_value()
voiceType = self.get_widget("voiceTypesCombo").get_active()
self._setPitchForVoiceType(voiceType, pitch)
voices = _settingsManager.getSetting('voices')
voices[settings.DEFAULT_VOICE][acss.ACSS.AVERAGE_PITCH] = pitch
_settingsManager.setSetting('voices', voices)
def volumeValueChanged(self, widget):
"""Signal handler for the "value_changed" signal for the voiceScale
GtkScale widget. The user has changed the current volume value.
Save the new volume value based on the currently selected voice
type.
Arguments:
- widget: the component that generated the signal.
"""
volume = widget.get_value()
voiceType = self.get_widget("voiceTypesCombo").get_active()
self._setVolumeForVoiceType(voiceType, volume)
voices = _settingsManager.getSetting('voices')
voices[settings.DEFAULT_VOICE][acss.ACSS.GAIN] = volume
_settingsManager.setSetting('voices', voices)
def checkButtonToggled(self, widget):
"""Signal handler for "toggled" signal for basic GtkCheckButton
widgets. The user has altered the state of the checkbox.
Set the preference to the new value.
Arguments:
- widget: the component that generated the signal.
"""
# To use this default handler please make sure:
# The name of the setting that will be changed is: settingName
# The id of the widget in the ui should be: settingNameCheckButton
#
settingName = Gtk.Buildable.get_name(widget)
# strip "CheckButton" from the end.
settingName = settingName[:-11]
self.prefsDict[settingName] = widget.get_active()
def keyEchoChecked(self, widget):
"""Signal handler for the "toggled" signal for the
keyEchoCheckbutton GtkCheckButton widget. The user has
[un]checked the 'Enable Key Echo' checkbox. Set the
'enableKeyEcho' preference to the new value. [In]sensitize
the checkboxes for the various types of key echo, depending
upon whether this value is checked or unchecked.
Arguments:
- widget: the component that generated the signal.
"""
self.prefsDict["enableKeyEcho"] = widget.get_active()
self._setKeyEchoItems()
def brailleSelectionChanged(self, widget):
"""Signal handler for the "toggled" signal for the
brailleSelectionNoneButton, brailleSelection7Button,
brailleSelection8Button or brailleSelectionBothButton
GtkRadioButton widgets. The user has toggled the braille
selection indicator value. If this signal was generated
as the result of a radio button getting selected (as
opposed to a radio button losing the selection), set the
'brailleSelectorIndicator' preference to the new value.
Arguments:
- widget: the component that generated the signal.
"""
if widget.get_active():
if widget.get_label() == guilabels.BRAILLE_DOT_7:
self.prefsDict["brailleSelectorIndicator"] = \
settings.BRAILLE_SEL_7
elif widget.get_label() == guilabels.BRAILLE_DOT_8:
self.prefsDict["brailleSelectorIndicator"] = \
settings.BRAILLE_SEL_8
elif widget.get_label() == guilabels.BRAILLE_DOT_7_8:
self.prefsDict["brailleSelectorIndicator"] = \
settings.BRAILLE_SEL_BOTH
else:
self.prefsDict["brailleSelectorIndicator"] = \
settings.BRAILLE_SEL_NONE
def brailleLinkChanged(self, widget):
"""Signal handler for the "toggled" signal for the
brailleLinkNoneButton, brailleLink7Button,
brailleLink8Button or brailleLinkBothButton
GtkRadioButton widgets. The user has toggled the braille
link indicator value. If this signal was generated
as the result of a radio button getting selected (as
opposed to a radio button losing the selection), set the
'brailleLinkIndicator' preference to the new value.
Arguments:
- widget: the component that generated the signal.
"""
if widget.get_active():
if widget.get_label() == guilabels.BRAILLE_DOT_7:
self.prefsDict["brailleLinkIndicator"] = \
settings.BRAILLE_LINK_7
elif widget.get_label() == guilabels.BRAILLE_DOT_8:
self.prefsDict["brailleLinkIndicator"] = \
settings.BRAILLE_LINK_8
elif widget.get_label() == guilabels.BRAILLE_DOT_7_8:
self.prefsDict["brailleLinkIndicator"] = \
settings.BRAILLE_LINK_BOTH
else:
self.prefsDict["brailleLinkIndicator"] = \
settings.BRAILLE_LINK_NONE
def brailleIndicatorChanged(self, widget):
"""Signal handler for the "toggled" signal for the
textBrailleNoneButton, textBraille7Button, textBraille8Button
or textBrailleBothButton GtkRadioButton widgets. The user has
toggled the text attributes braille indicator value. If this signal
was generated as the result of a radio button getting selected
(as opposed to a radio button losing the selection), set the
'textAttributesBrailleIndicator' preference to the new value.
Arguments:
- widget: the component that generated the signal.
"""
if widget.get_active():
if widget.get_label() == guilabels.BRAILLE_DOT_7:
self.prefsDict["textAttributesBrailleIndicator"] = \
settings.TEXT_ATTR_BRAILLE_7
elif widget.get_label() == guilabels.BRAILLE_DOT_8:
self.prefsDict["textAttributesBrailleIndicator"] = \
settings.TEXT_ATTR_BRAILLE_8
elif widget.get_label() == guilabels.BRAILLE_DOT_7_8:
self.prefsDict["textAttributesBrailleIndicator"] = \
settings.TEXT_ATTR_BRAILLE_BOTH
else:
self.prefsDict["textAttributesBrailleIndicator"] = \
settings.TEXT_ATTR_BRAILLE_NONE
def punctuationLevelChanged(self, widget):
"""Signal handler for the "toggled" signal for the noneButton,
someButton or allButton GtkRadioButton widgets. The user has
toggled the speech punctuation level value. If this signal
was generated as the result of a radio button getting selected
(as opposed to a radio button losing the selection), set the
'verbalizePunctuationStyle' preference to the new value.
Arguments:
- widget: the component that generated the signal.
"""
if widget.get_active():
if widget.get_label() == guilabels.PUNCTUATION_STYLE_NONE:
self.prefsDict["verbalizePunctuationStyle"] = \
settings.PUNCTUATION_STYLE_NONE
elif widget.get_label() == guilabels.PUNCTUATION_STYLE_SOME:
self.prefsDict["verbalizePunctuationStyle"] = \
settings.PUNCTUATION_STYLE_SOME
elif widget.get_label() == guilabels.PUNCTUATION_STYLE_MOST:
self.prefsDict["verbalizePunctuationStyle"] = \
settings.PUNCTUATION_STYLE_MOST
else:
self.prefsDict["verbalizePunctuationStyle"] = \
settings.PUNCTUATION_STYLE_ALL
def orcaModifierChanged(self, widget):
"""Signal handler for the changed signal for the orcaModifierComboBox
Set the 'orcaModifierKeys' preference to the new value.
Arguments:
- widget: the component that generated the signal.
"""
model = widget.get_model()
myIter = widget.get_active_iter()
orcaModifier = model[myIter][0]
self.prefsDict["orcaModifierKeys"] = orcaModifier.split(', ')
def progressBarVerbosityChanged(self, widget):
"""Signal handler for the changed signal for the progressBarVerbosity
GtkComboBox widget. Set the 'progressBarVerbosity' preference to
the new value.
Arguments:
- widget: the component that generated the signal.
"""
model = widget.get_model()
myIter = widget.get_active_iter()
progressBarVerbosity = model[myIter][0]
if progressBarVerbosity == guilabels.PROGRESS_BAR_ALL:
self.prefsDict["progressBarVerbosity"] = \
settings.PROGRESS_BAR_ALL
elif progressBarVerbosity == guilabels.PROGRESS_BAR_WINDOW:
self.prefsDict["progressBarVerbosity"] = \
settings.PROGRESS_BAR_WINDOW
else:
self.prefsDict["progressBarVerbosity"] = \
settings.PROGRESS_BAR_APPLICATION
def sayAllStyleChanged(self, widget):
"""Signal handler for the "changed" signal for the sayAllStyle
GtkComboBox widget. Set the 'sayAllStyle' preference to the
new value.
Arguments:
- widget: the component that generated the signal.
"""
model = widget.get_model()
myIter = widget.get_active_iter()
sayAllStyle = model[myIter][0]
if sayAllStyle == guilabels.SAY_ALL_STYLE_LINE:
self.prefsDict["sayAllStyle"] = settings.SAYALL_STYLE_LINE
elif sayAllStyle == guilabels.SAY_ALL_STYLE_SENTENCE:
self.prefsDict["sayAllStyle"] = settings.SAYALL_STYLE_SENTENCE
def dateFormatChanged(self, widget):
"""Signal handler for the "changed" signal for the dateFormat
GtkComboBox widget. Set the 'dateFormat' preference to the
new value.
Arguments:
- widget: the component that generated the signal.
"""
dateFormatCombo = widget.get_active()
if dateFormatCombo == DATE_FORMAT_LOCALE:
newFormat = settings.DATE_FORMAT_LOCALE
elif dateFormatCombo == DATE_FORMAT_NUMBERS_DM:
newFormat = settings.DATE_FORMAT_NUMBERS_DM
elif dateFormatCombo == DATE_FORMAT_NUMBERS_MD:
newFormat = settings.DATE_FORMAT_NUMBERS_MD
elif dateFormatCombo == DATE_FORMAT_NUMBERS_DMY:
newFormat = settings.DATE_FORMAT_NUMBERS_DMY
elif dateFormatCombo == DATE_FORMAT_NUMBERS_MDY:
newFormat = settings.DATE_FORMAT_NUMBERS_MDY
elif dateFormatCombo == DATE_FORMAT_NUMBERS_YMD:
newFormat = settings.DATE_FORMAT_NUMBERS_YMD
elif dateFormatCombo == DATE_FORMAT_FULL_DM:
newFormat = settings.DATE_FORMAT_FULL_DM
elif dateFormatCombo == DATE_FORMAT_FULL_MD:
newFormat = settings.DATE_FORMAT_FULL_MD
elif dateFormatCombo == DATE_FORMAT_FULL_DMY:
newFormat = settings.DATE_FORMAT_FULL_DMY
elif dateFormatCombo == DATE_FORMAT_FULL_MDY:
newFormat = settings.DATE_FORMAT_FULL_MDY
elif dateFormatCombo == DATE_FORMAT_FULL_YMD:
newFormat = settings.DATE_FORMAT_FULL_YMD
elif dateFormatCombo == DATE_FORMAT_ABBREVIATED_DM:
newFormat = settings.DATE_FORMAT_ABBREVIATED_DM
elif dateFormatCombo == DATE_FORMAT_ABBREVIATED_MD:
newFormat = settings.DATE_FORMAT_ABBREVIATED_MD
elif dateFormatCombo == DATE_FORMAT_ABBREVIATED_DMY:
newFormat = settings.DATE_FORMAT_ABBREVIATED_DMY
elif dateFormatCombo == DATE_FORMAT_ABBREVIATED_MDY:
newFormat = settings.DATE_FORMAT_ABBREVIATED_MDY
elif dateFormatCombo == DATE_FORMAT_ABBREVIATED_YMD:
newFormat = settings.DATE_FORMAT_ABBREVIATED_YMD
self.prefsDict["presentDateFormat"] = newFormat
def timeFormatChanged(self, widget):
"""Signal handler for the "changed" signal for the timeFormat
GtkComboBox widget. Set the 'timeFormat' preference to the
new value.
Arguments:
- widget: the component that generated the signal.
"""
timeFormatCombo = widget.get_active()
if timeFormatCombo == TIME_FORMAT_LOCALE:
newFormat = settings.TIME_FORMAT_LOCALE
elif timeFormatCombo == TIME_FORMAT_24_HMS:
newFormat = settings.TIME_FORMAT_24_HMS
elif timeFormatCombo == TIME_FORMAT_24_HMS_WITH_WORDS:
newFormat = settings.TIME_FORMAT_24_HMS_WITH_WORDS
elif timeFormatCombo == TIME_FORMAT_24_HM:
newFormat = settings.TIME_FORMAT_24_HM
elif timeFormatCombo == TIME_FORMAT_24_HM_WITH_WORDS:
newFormat = settings.TIME_FORMAT_24_HM_WITH_WORDS
self.prefsDict["presentTimeFormat"] = newFormat
def speechVerbosityChanged(self, widget):
"""Signal handler for the "toggled" signal for the speechBriefButton,
or speechVerboseButton GtkRadioButton widgets. The user has
toggled the speech verbosity level value. If this signal was
generated as the result of a radio button getting selected
(as opposed to a radio button losing the selection), set the
'speechVerbosityLevel' preference to the new value.
Arguments:
- widget: the component that generated the signal.
"""
if widget.get_active():
if widget.get_label() == guilabels.VERBOSITY_LEVEL_BRIEF:
self.prefsDict["speechVerbosityLevel"] = \
settings.VERBOSITY_LEVEL_BRIEF
else:
self.prefsDict["speechVerbosityLevel"] = \
settings.VERBOSITY_LEVEL_VERBOSE
def tableSpeechChanged(self, widget):
"""Signal handler for the "toggled" signal for the cellSpeechButton,
or rowSpeechButton GtkRadioButton widgets. The user has
toggled the table row speech type value. If this signal was
generated as the result of a radio button getting selected
(as opposed to a radio button losing the selection), set the
'readTableCellRow' preference to the new value.
Arguments:
- widget: the component that generated the signal.
"""
if widget.get_active():
if widget.get_label() == guilabels.TABLE_SPEAK_CELL:
self.prefsDict["readTableCellRow"] = False
else:
self.prefsDict["readTableCellRow"] = True
def speechProgressBarChecked(self, widget):
"""Signal handler for the "toggled" signal for the
speechProgressBarCheckButton GtkCheckButton widget.
The user has [un]checked the "Speak progress bar updates" checkbox.
Set the 'enableProgressBarUpdates' preference to the new value.
Set the rest of the 'update interval' items [in]sensensitive
depending upon whether this checkbox is checked.
Arguments:
- widget: the component that generated the signal.
"""
enable = widget.get_active()
self.prefsDict["enableProgressBarUpdates"] = enable
self.get_widget("progressBarUpdatesOptionsGrid").set_sensitive(enable)
def speakProgressBarValueChanged(self, widget):
"""Signal handler for the "value_changed" signal for the
speakProgressBarSpinButton GtkSpinButton widget.
The user has changed the value of the "speak progress bar
updates" spin button. Set the 'progressBarUpdateInterval'
preference to the new integer value.
Arguments:
- widget: the component that generated the signal.
"""
self.prefsDict["progressBarUpdateInterval"] = widget.get_value_as_int()
def abbrevRolenamesChecked(self, widget):
"""Signal handler for the "toggled" signal for the abbrevRolenames
GtkCheckButton widget. The user has [un]checked the 'Abbreviated
Rolenames' checkbox. Set the 'brailleRolenameStyle' preference
to the new value.
Arguments:
- widget: the component that generated the signal.
"""
if widget.get_active():
self.prefsDict["brailleRolenameStyle"] = \
settings.BRAILLE_ROLENAME_STYLE_SHORT
else:
self.prefsDict["brailleRolenameStyle"] = \
settings.BRAILLE_ROLENAME_STYLE_LONG
def brailleVerbosityChanged(self, widget):
"""Signal handler for the "toggled" signal for the brailleBriefButton,
or brailleVerboseButton GtkRadioButton widgets. The user has
toggled the braille verbosity level value. If this signal was
generated as the result of a radio button getting selected
(as opposed to a radio button losing the selection), set the
'brailleVerbosityLevel' preference to the new value.
Arguments:
- widget: the component that generated the signal.
"""
if widget.get_active():
if widget.get_label() == guilabels.VERBOSITY_LEVEL_BRIEF:
self.prefsDict["brailleVerbosityLevel"] = \
settings.VERBOSITY_LEVEL_BRIEF
else:
self.prefsDict["brailleVerbosityLevel"] = \
settings.VERBOSITY_LEVEL_VERBOSE
def keyModifiedToggle(self, cell, path, model, col):
"""When the user changes a checkbox field (boolean field)"""
model[path][col] = not model[path][col]
return
def editingKey(self, cell, editable, path, treeModel):
"""Starts user input of a Key for a selected key binding"""
self._presentMessage(messages.KB_ENTER_NEW_KEY)
orca_state.capturingKeys = True
editable.connect('key-press-event', self.kbKeyPressed)
return
def editingCanceledKey(self, editable):
"""Stops user input of a Key for a selected key binding"""
orca_state.capturingKeys = False
self._capturedKey = []
return
def _processKeyCaptured(self, keyPressedEvent):
"""Called when a new key event arrives and we are capturing keys.
(used for key bindings redefinition)
"""
# We want the keyname rather than the printable character.
# If it's not on the keypad, get the name of the unshifted
# character. (i.e. "1" instead of "!")
#
keycode = keyPressedEvent.hardware_keycode
keymap = Gdk.Keymap.get_default()
entries_for_keycode = keymap.get_entries_for_keycode(keycode)
entries = entries_for_keycode[-1]
eventString = Gdk.keyval_name(entries[0])
eventState = keyPressedEvent.state
orcaMods = settings.orcaModifierKeys
if eventString in orcaMods:
self._capturedKey = ['', settings.ORCA_MODIFIER_MASK, 0]
return False
modifierKeys = ['Alt_L', 'Alt_R', 'Control_L', 'Control_R',
'Shift_L', 'Shift_R', 'Meta_L', 'Meta_R',
'Num_Lock', 'Caps_Lock']
if eventString in modifierKeys:
return False
if not self._capturedKey \
or eventString in ['Return', 'Escape']:
self._capturedKey = [eventString, eventState, 1]
return True
string, modifiers, clickCount = self._capturedKey
isOrcaModifier = modifiers & settings.ORCA_MODIFIER_MASK
if isOrcaModifier:
eventState |= settings.ORCA_MODIFIER_MASK
self._capturedKey = [eventString, eventState, clickCount + 1]
return True
def kbKeyPressed(self, editable, event):
"""Special handler for the key_pressed events when editing the
keybindings. This lets us control what gets inserted into the
entry.
"""
keyProcessed = self._processKeyCaptured(event)
if not keyProcessed:
return True
if not self._capturedKey:
return False
keyName, modifiers, clickCount = self._capturedKey
if not keyName or keyName in ["Return", "Escape"]:
return False
isOrcaModifier = modifiers & settings.ORCA_MODIFIER_MASK
if keyName in ["Delete", "BackSpace"] and not isOrcaModifier:
editable.set_text("")
self._presentMessage(messages.KB_DELETED)
self._capturedKey = []
self.newBinding = None
return True
self.newBinding = keybindings.KeyBinding(keyName,
settings.defaultModifierMask,
modifiers,
None,
clickCount)
modifierNames = keybindings.getModifierNames(modifiers)
clickCountString = self._clickCountToString(clickCount)
newString = modifierNames + keyName + clickCountString
description = self.pendingKeyBindings.get(newString)
if description is None:
match = lambda x: x.keysymstring == keyName \
and x.modifiers == modifiers \
and x.click_count == clickCount \
and x.handler
matches = list(filter(match, self.kbindings.keyBindings))
if matches:
description = matches[0].handler.description
if description:
msg = messages.KB_ALREADY_BOUND % description
delay = int(1000 * settings.doubleClickTimeout)
GLib.timeout_add(delay, self._presentMessage, msg)
else:
msg = messages.KB_CAPTURED % newString
editable.set_text(newString)
self._presentMessage(msg)
return True
def editedKey(self, cell, path, new_text, treeModel,
modMask, modUsed, key, click_count, text):
"""The user changed the key for a Keybinding: update the model of
the treeview.
"""
orca_state.capturingKeys = False
self._capturedKey = []
myiter = treeModel.get_iter_from_string(path)
try:
originalBinding = treeModel.get_value(myiter, text)
except:
originalBinding = ''
modified = (originalBinding != new_text)
try:
string = self.newBinding.keysymstring
mods = self.newBinding.modifiers
clickCount = self.newBinding.click_count
except:
string = ''
mods = 0
clickCount = 1
mods = mods & Gdk.ModifierType.MODIFIER_MASK
if mods & (1 << pyatspi.MODIFIER_SHIFTLOCK) \
and mods & settings.ORCA_MODIFIER_MASK:
mods ^= (1 << pyatspi.MODIFIER_SHIFTLOCK)
treeModel.set(myiter,
modMask, str(settings.defaultModifierMask),
modUsed, str(int(mods)),
key, string,
text, new_text,
click_count, str(clickCount),
MODIF, modified)
speech.stop()
if new_text:
message = messages.KB_CAPTURED_CONFIRMATION % new_text
description = treeModel.get_value(myiter, DESCRIP)
self.pendingKeyBindings[new_text] = description
else:
message = messages.KB_DELETED_CONFIRMATION
if modified:
self._presentMessage(message)
self.pendingKeyBindings[originalBinding] = ""
return
def presentToolTipsChecked(self, widget):
"""Signal handler for the "toggled" signal for the
presentToolTipsCheckButton GtkCheckButton widget.
The user has [un]checked the 'Present ToolTips'
checkbox. Set the 'presentToolTips'
preference to the new value if the user can present tooltips.
Arguments:
- widget: the component that generated the signal.
"""
self.prefsDict["presentToolTips"] = widget.get_active()
def keyboardLayoutChanged(self, widget):
"""Signal handler for the "toggled" signal for the generalDesktopButton,
or generalLaptopButton GtkRadioButton widgets. The user has
toggled the keyboard layout value. If this signal was
generated as the result of a radio button getting selected
(as opposed to a radio button losing the selection), set the
'keyboardLayout' preference to the new value. Also set the
matching list of Orca modifier keys
Arguments:
- widget: the component that generated the signal.
"""
if widget.get_active():
if widget.get_label() == guilabels.KEYBOARD_LAYOUT_DESKTOP:
self.prefsDict["keyboardLayout"] = \
settings.GENERAL_KEYBOARD_LAYOUT_DESKTOP
self.prefsDict["orcaModifierKeys"] = \
settings.DESKTOP_MODIFIER_KEYS
else:
self.prefsDict["keyboardLayout"] = \
settings.GENERAL_KEYBOARD_LAYOUT_LAPTOP
self.prefsDict["orcaModifierKeys"] = \
settings.LAPTOP_MODIFIER_KEYS
def pronunciationAddButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the
pronunciationAddButton GtkButton widget. The user has clicked
the Add button on the Pronunciation pane. A new row will be
added to the end of the pronunciation dictionary list. Both the
actual and replacement strings will initially be set to an empty
string. Focus will be moved to that row.
Arguments:
- widget: the component that generated the signal.
"""
model = self.pronunciationView.get_model()
thisIter = model.append()
model.set(thisIter, ACTUAL, "", REPLACEMENT, "")
path = model.get_path(thisIter)
col = self.pronunciationView.get_column(0)
self.pronunciationView.grab_focus()
self.pronunciationView.set_cursor(path, col, True)
def pronunciationDeleteButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the
pronunciationDeleteButton GtkButton widget. The user has clicked
the Delete button on the Pronunciation pane. The row in the
pronunciation dictionary list with focus will be deleted.
Arguments:
- widget: the component that generated the signal.
"""
model, oldIter = self.pronunciationView.get_selection().get_selected()
model.remove(oldIter)
def textSelectAllButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the
textSelectAllButton GtkButton widget. The user has clicked
the Speak all button. Check all the text attributes and
then update the "enabledSpokenTextAttributes" and
"enabledBrailledTextAttributes" preference strings.
Arguments:
- widget: the component that generated the signal.
"""
attributes = _settingsManager.getSetting('allTextAttributes')
self._setSpokenTextAttributes(
self.getTextAttributesView, attributes, True)
self._setBrailledTextAttributes(
self.getTextAttributesView, attributes, True)
self._updateTextDictEntry()
def textUnselectAllButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the
textUnselectAllButton GtkButton widget. The user has clicked
the Speak none button. Uncheck all the text attributes and
then update the "enabledSpokenTextAttributes" and
"enabledBrailledTextAttributes" preference strings.
Arguments:
- widget: the component that generated the signal.
"""
attributes = _settingsManager.getSetting('allTextAttributes')
self._setSpokenTextAttributes(
self.getTextAttributesView, attributes, False)
self._setBrailledTextAttributes(
self.getTextAttributesView, attributes, False)
self._updateTextDictEntry()
def textResetButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the
textResetButton GtkButton widget. The user has clicked
the Reset button. Reset all the text attributes to their
initial state and then update the "enabledSpokenTextAttributes"
and "enabledBrailledTextAttributes" preference strings.
Arguments:
- widget: the component that generated the signal.
"""
attributes = _settingsManager.getSetting('allTextAttributes')
self._setSpokenTextAttributes(
self.getTextAttributesView, attributes, False)
self._setBrailledTextAttributes(
self.getTextAttributesView, attributes, False)
attributes = _settingsManager.getSetting('enabledSpokenTextAttributes')
self._setSpokenTextAttributes(
self.getTextAttributesView, attributes, True)
attributes = \
_settingsManager.getSetting('enabledBrailledTextAttributes')
self._setBrailledTextAttributes(
self.getTextAttributesView, attributes, True)
self._updateTextDictEntry()
def textMoveToTopButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the
textMoveToTopButton GtkButton widget. The user has clicked
the Move to top button. Move the selected rows in the text
attribute view to the very top of the list and then update
the "enabledSpokenTextAttributes" and "enabledBrailledTextAttributes"
preference strings.
Arguments:
- widget: the component that generated the signal.
"""
textSelection = self.getTextAttributesView.get_selection()
[model, paths] = textSelection.get_selected_rows()
for path in paths:
thisIter = model.get_iter(path)
model.move_after(thisIter, None)
self._updateTextDictEntry()
def textMoveUpOneButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the
textMoveUpOneButton GtkButton widget. The user has clicked
the Move up one button. Move the selected rows in the text
attribute view up one row in the list and then update the
"enabledSpokenTextAttributes" and "enabledBrailledTextAttributes"
preference strings.
Arguments:
- widget: the component that generated the signal.
"""
textSelection = self.getTextAttributesView.get_selection()
[model, paths] = textSelection.get_selected_rows()
for path in paths:
thisIter = model.get_iter(path)
indices = path.get_indices()
if indices[0]:
otherIter = model.iter_nth_child(None, indices[0]-1)
model.swap(thisIter, otherIter)
self._updateTextDictEntry()
def textMoveDownOneButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the
textMoveDownOneButton GtkButton widget. The user has clicked
the Move down one button. Move the selected rows in the text
attribute view down one row in the list and then update the
"enabledSpokenTextAttributes" and "enabledBrailledTextAttributes"
preference strings.
Arguments:
- widget: the component that generated the signal.
"""
textSelection = self.getTextAttributesView.get_selection()
[model, paths] = textSelection.get_selected_rows()
noRows = model.iter_n_children(None)
for path in paths:
thisIter = model.get_iter(path)
indices = path.get_indices()
if indices[0] < noRows-1:
otherIter = model.iter_next(thisIter)
model.swap(thisIter, otherIter)
self._updateTextDictEntry()
def textMoveToBottomButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the
textMoveToBottomButton GtkButton widget. The user has clicked
the Move to bottom button. Move the selected rows in the text
attribute view to the bottom of the list and then update the
"enabledSpokenTextAttributes" and "enabledBrailledTextAttributes"
preference strings.
Arguments:
- widget: the component that generated the signal.
"""
textSelection = self.getTextAttributesView.get_selection()
[model, paths] = textSelection.get_selected_rows()
for path in paths:
thisIter = model.get_iter(path)
model.move_before(thisIter, None)
self._updateTextDictEntry()
def helpButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the helpButton
GtkButton widget. The user has clicked the Help button.
Arguments:
- widget: the component that generated the signal.
"""
orca.helpForOrca(page="preferences")
def restoreSettings(self):
"""Restore the settings we saved away when opening the preferences
dialog."""
# Restore the default rate/pitch/gain,
# in case the user played with the sliders.
#
voices = _settingsManager.getSetting('voices')
defaultVoice = voices[settings.DEFAULT_VOICE]
defaultVoice[acss.ACSS.GAIN] = self.savedGain
defaultVoice[acss.ACSS.AVERAGE_PITCH] = self.savedPitch
defaultVoice[acss.ACSS.RATE] = self.savedRate
def saveBasicSettings(self):
if not self._isInitialSetup:
self.restoreSettings()
enable = self.get_widget("speechSupportCheckButton").get_active()
self.prefsDict["enableSpeech"] = enable
if self.speechSystemsChoice:
self.prefsDict["speechServerFactory"] = \
self.speechSystemsChoice.__name__
if self.speechServersChoice:
self.prefsDict["speechServerInfo"] = \
self.speechServersChoice.getInfo()
if self.defaultVoice != None:
self.prefsDict["voices"] = {
settings.DEFAULT_VOICE: acss.ACSS(self.defaultVoice),
settings.UPPERCASE_VOICE: acss.ACSS(self.uppercaseVoice),
settings.HYPERLINK_VOICE: acss.ACSS(self.hyperlinkVoice),
settings.SYSTEM_VOICE: acss.ACSS(self.systemVoice),
}
def applyButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the applyButton
GtkButton widget. The user has clicked the Apply button.
Write out the users preferences. If GNOME accessibility hadn't
previously been enabled, warn the user that they will need to
log out. Shut down any active speech servers that were started.
Reload the users preferences to get the new speech, braille and
key echo value to take effect. Do not dismiss the configuration
window.
Arguments:
- widget: the component that generated the signal.
"""
self.saveBasicSettings()
activeProfile = self.getComboBoxList(self.profilesCombo)
startingProfile = self.getComboBoxList(self.startingProfileCombo)
self.prefsDict['profile'] = activeProfile
self.prefsDict['activeProfile'] = activeProfile
self.prefsDict['startingProfile'] = startingProfile
_settingsManager.setStartingProfile(startingProfile)
self.writeUserPreferences()
orca.loadUserSettings()
self._initSpeechState()
self._populateKeyBindings()
self.__initProfileCombo()
def cancelButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the cancelButton
GtkButton widget. The user has clicked the Cancel button.
Don't write out the preferences. Destroy the configuration window.
Arguments:
- widget: the component that generated the signal.
"""
self.windowClosed(widget)
self.get_widget("orcaSetupWindow").destroy()
def okButtonClicked(self, widget=None):
"""Signal handler for the "clicked" signal for the okButton
GtkButton widget. The user has clicked the OK button.
Write out the users preferences. If GNOME accessibility hadn't
previously been enabled, warn the user that they will need to
log out. Shut down any active speech servers that were started.
Reload the users preferences to get the new speech, braille and
key echo value to take effect. Hide the configuration window.
Arguments:
- widget: the component that generated the signal.
"""
self.applyButtonClicked(widget)
self._cleanupSpeechServers()
self.get_widget("orcaSetupWindow").destroy()
def windowClosed(self, widget):
"""Signal handler for the "closed" signal for the orcaSetupWindow
GtkWindow widget. This is effectively the same as pressing the
cancel button, except the window is destroyed for us.
Arguments:
- widget: the component that generated the signal.
"""
factory = _settingsManager.getSetting('speechServerFactory')
if factory:
self._setSpeechSystemsChoice(factory)
server = _settingsManager.getSetting('speechServerInfo')
if server:
self._setSpeechServersChoice(server)
self._cleanupSpeechServers()
self.restoreSettings()
def windowDestroyed(self, widget):
"""Signal handler for the "destroyed" signal for the orcaSetupWindow
GtkWindow widget. Reset orca_state.orcaOS to None, so that the
GUI can be rebuilt from the GtkBuilder file the next time the user
wants to display the configuration GUI.
Arguments:
- widget: the component that generated the signal.
"""
self.keyBindView.set_model(None)
self.getTextAttributesView.set_model(None)
self.pronunciationView.set_model(None)
self.keyBindView.set_headers_visible(False)
self.getTextAttributesView.set_headers_visible(False)
self.pronunciationView.set_headers_visible(False)
self.keyBindView.hide()
self.getTextAttributesView.hide()
self.pronunciationView.hide()
orca_state.orcaOS = None
def showProfileGUI(self, widget):
"""Show profile Dialog to add a new one"""
orca_gui_profile.showProfileUI(self)
def saveProfile(self, profileToSaveLabel):
"""Creates a new profile based on the name profileToSaveLabel and
updates the Preferences dialog combo boxes accordingly."""
if not profileToSaveLabel:
return
profileToSave = profileToSaveLabel.replace(' ', '_').lower()
profile = [profileToSaveLabel, profileToSave]
def saveActiveProfile(newProfile = True):
if newProfile:
activeProfileIter = self.profilesComboModel.append(profile)
self.profilesCombo.set_active_iter(activeProfileIter)
self.prefsDict['profile'] = profile
self.prefsDict['activeProfile'] = profile
self.saveBasicSettings()
self.writeUserPreferences()
availableProfiles = [p[1] for p in self.__getAvailableProfiles()]
if isinstance(profileToSave, str) \
and profileToSave != '' \
and not profileToSave in availableProfiles \
and profileToSave != 'default':
saveActiveProfile()
else:
if profileToSave != None:
message = guilabels.PROFILE_CONFLICT_MESSAGE % \
("<b>%s</b>" % profileToSaveLabel)
dialog = Gtk.MessageDialog(None,
Gtk.DialogFlags.MODAL,
type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.YES_NO)
dialog.set_markup("<b>%s</b>" % guilabels.PROFILE_CONFLICT_LABEL)
dialog.format_secondary_markup(message)
dialog.set_title(guilabels.PROFILE_CONFLICT_TITLE)
response = dialog.run()
if response == Gtk.ResponseType.YES:
dialog.destroy()
saveActiveProfile(False)
else:
dialog.destroy()
def loadProfileButtonClicked(self, widget):
"""Load profile button clicked handler"""
if self._isInitialSetup:
return
dialog = Gtk.MessageDialog(None,
Gtk.DialogFlags.MODAL,
type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.YES_NO)
dialog.set_markup("<b>%s</b>" % guilabels.PROFILE_LOAD_LABEL)
dialog.format_secondary_markup(guilabels.PROFILE_LOAD_MESSAGE)
response = dialog.run()
if response == Gtk.ResponseType.YES:
dialog.destroy()
self.loadSelectedProfile()
else:
dialog.destroy()
def loadSelectedProfile(self):
"""Load selected profile"""
self.saveBasicSettings()
activeProfile = self.getComboBoxList(self.profilesCombo)
self.prefsDict['activeProfile'] = activeProfile
_settingsManager.setProfile(activeProfile[1])
self.prefsDict = _settingsManager.getGeneralSettings(activeProfile[1])
orca.loadUserSettings(skipReloadMessage=True)
self._initGUIState()
self._initSpeechState()
self._populateKeyBindings()
self.__initProfileCombo()
class WarningDialogGUI(Gtk.MessageDialog):
def __init__(self):
Gtk.MessageDialog.__init__(self)
self.set_property('message-type', Gtk.MessageType.INFO)
self.set_property('text', messages.PREFERENCES_WARNING_DIALOG)
self.add_button('gtk-ok', Gtk.ResponseType.OK)
self.connect('response', self.onResponse)
self.connect('destroy', self.onDestroy)
def init(self):
pass
def showGUI(self):
"""Show the Warning dialog."""
ts = orca_state.lastInputEventTimestamp
if ts == 0:
ts = Gtk.get_current_event_time()
self.present_with_time(ts)
def onResponse(self, widget, response):
"""Signal handler for the responses emitted by the dialog."""
if response == Gtk.ResponseType.OK:
self.destroy()
def onDestroy(self, widget):
"""Signal handler for the 'destroy' signal of the Warning dialog."""
orca_state.orcaWD = None
def showPreferencesUI():
if not orca_state.appOS and not orca_state.orcaOS:
startingProfile = _settingsManager.profile
prefsDict = _settingsManager.getGeneralSettings(startingProfile)
orca_state.prefsUIFile = \
os.path.join(orca_platform.prefix,
orca_platform.datadirname,
orca_platform.package,
"ui",
"orca-setup.ui")
orca_state.orcaOS = OrcaSetupGUI(orca_state.prefsUIFile,
"orcaSetupWindow", prefsDict)
orca_state.orcaOS.init()
orca_state.orcaOS.showGUI()
else:
if not orca_state.orcaWD:
orca_state.orcaWD = WarningDialogGUI()
orca_state.orcaWD.showGUI()
def main():
locale.setlocale(locale.LC_ALL, '')
showPreferencesUI()
Gtk.main()
sys.exit(0)
if __name__ == "__main__":
main()
|
h4ck3rm1k3/orca-sonar
|
src/orca/orca_gui_prefs.py
|
Python
|
lgpl-2.1
| 128,646
|
[
"ORCA"
] |
64b9d8255fb74c9a7d5ece1a32caf53c9bc2f71f8e5c59f2b94e31d402a8d663
|
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.galaxy.token import GalaxyToken
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.six.moves.urllib.parse import quote as urlquote, urlencode
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import open_url
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def g_connect(method):
''' wrapper to lazily initialize connection info to galaxy '''
def wrapped(self, *args, **kwargs):
if not self.initialized:
display.vvvv("Initial connection to galaxy_server: %s" % self._api_server)
server_version = self._get_server_api_version()
if server_version not in self.SUPPORTED_VERSIONS:
raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
self.baseurl = '%s/api/%s' % (self._api_server, server_version)
self.version = server_version # for future use
display.vvvv("Base API: %s" % self.baseurl)
self.initialized = True
return method(self, *args, **kwargs)
return wrapped
class GalaxyAPI(object):
''' This class is meant to be used as a API client for an Ansible Galaxy server '''
SUPPORTED_VERSIONS = ['v1']
def __init__(self, galaxy):
self.galaxy = galaxy
self.token = GalaxyToken()
self._api_server = C.GALAXY_SERVER
self._validate_certs = not galaxy.options.ignore_certs
self.baseurl = None
self.version = None
self.initialized = False
display.debug('Validate TLS certificates: %s' % self._validate_certs)
# set the API server
if galaxy.options.api_server != C.GALAXY_SERVER:
self._api_server = galaxy.options.api_server
def __auth_header(self):
token = self.token.get()
if token is None:
raise AnsibleError("No access token. You must first use login to authenticate and obtain an access token.")
return {'Authorization': 'Token ' + token}
@g_connect
def __call_galaxy(self, url, args=None, headers=None, method=None):
if args and not headers:
headers = self.__auth_header()
try:
display.vvv(url)
resp = open_url(url, data=args, validate_certs=self._validate_certs, headers=headers, method=method,
timeout=20)
data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
except HTTPError as e:
res = json.loads(to_text(e.fp.read(), errors='surrogate_or_strict'))
raise AnsibleError(res['detail'])
return data
@property
def api_server(self):
return self._api_server
@property
def validate_certs(self):
return self._validate_certs
def _get_server_api_version(self):
"""
Fetches the Galaxy API current version to ensure
the API server is up and reachable.
"""
url = '%s/api/' % self._api_server
try:
return_data = open_url(url, validate_certs=self._validate_certs)
except Exception as e:
raise AnsibleError("Failed to get data from the API server (%s): %s " % (url, to_native(e)))
try:
data = json.loads(to_text(return_data.read(), errors='surrogate_or_strict'))
except Exception as e:
raise AnsibleError("Could not process data from the API server (%s): %s " % (url, to_native(e)))
if 'current_version' not in data:
raise AnsibleError("missing required 'current_version' from server response (%s)" % url)
return data['current_version']
@g_connect
def authenticate(self, github_token):
"""
Retrieve an authentication token
"""
url = '%s/tokens/' % self.baseurl
args = urlencode({"github_token": github_token})
resp = open_url(url, data=args, validate_certs=self._validate_certs, method="POST")
data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
return data
@g_connect
def create_import_task(self, github_user, github_repo, reference=None, role_name=None):
"""
Post an import request
"""
url = '%s/imports/' % self.baseurl
args = {
"github_user": github_user,
"github_repo": github_repo,
"github_reference": reference if reference else ""
}
if role_name:
args['alternate_role_name'] = role_name
elif github_repo.startswith('ansible-role'):
args['alternate_role_name'] = github_repo[len('ansible-role') + 1:]
data = self.__call_galaxy(url, args=urlencode(args))
if data.get('results', None):
return data['results']
return data
@g_connect
def get_import_task(self, task_id=None, github_user=None, github_repo=None):
"""
Check the status of an import task.
"""
url = '%s/imports/' % self.baseurl
if task_id is not None:
url = "%s?id=%d" % (url, task_id)
elif github_user is not None and github_repo is not None:
url = "%s?github_user=%s&github_repo=%s" % (url, github_user, github_repo)
else:
raise AnsibleError("Expected task_id or github_user and github_repo")
data = self.__call_galaxy(url)
return data['results']
@g_connect
def lookup_role_by_name(self, role_name, notify=True):
"""
Find a role by name.
"""
role_name = urlquote(role_name)
try:
parts = role_name.split(".")
user_name = ".".join(parts[0:-1])
role_name = parts[-1]
if notify:
display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
except:
raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
data = self.__call_galaxy(url)
if len(data["results"]) != 0:
return data["results"][0]
return None
@g_connect
def fetch_role_related(self, related, role_id):
"""
Fetch the list of related items for the given role.
The url comes from the 'related' field of the role.
"""
try:
url = '%s/roles/%s/%s/?page_size=50' % (self.baseurl, role_id, related)
data = self.__call_galaxy(url)
results = data['results']
done = (data.get('next_link', None) is None)
while not done:
url = '%s%s' % (self._api_server, data['next_link'])
data = self.__call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
return results
except:
return None
@g_connect
def get_list(self, what):
"""
Fetch the list of items specified.
"""
try:
url = '%s/%s/?page_size' % (self.baseurl, what)
data = self.__call_galaxy(url)
if "results" in data:
results = data['results']
else:
results = data
done = True
if "next" in data:
done = (data.get('next_link', None) is None)
while not done:
url = '%s%s' % (self._api_server, data['next_link'])
data = self.__call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
return results
except Exception as error:
raise AnsibleError("Failed to download the %s list: %s" % (what, str(error)))
@g_connect
def search_roles(self, search, **kwargs):
search_url = self.baseurl + '/search/roles/?'
if search:
search_url += '&autocomplete=' + urlquote(search)
tags = kwargs.get('tags', None)
platforms = kwargs.get('platforms', None)
page_size = kwargs.get('page_size', None)
author = kwargs.get('author', None)
if tags and isinstance(tags, string_types):
tags = tags.split(',')
search_url += '&tags_autocomplete=' + '+'.join(tags)
if platforms and isinstance(platforms, string_types):
platforms = platforms.split(',')
search_url += '&platforms_autocomplete=' + '+'.join(platforms)
if page_size:
search_url += '&page_size=%s' % page_size
if author:
search_url += '&username_autocomplete=%s' % author
data = self.__call_galaxy(search_url)
return data
@g_connect
def add_secret(self, source, github_user, github_repo, secret):
url = "%s/notification_secrets/" % self.baseurl
args = urlencode({
"source": source,
"github_user": github_user,
"github_repo": github_repo,
"secret": secret
})
data = self.__call_galaxy(url, args=args)
return data
@g_connect
def list_secrets(self):
url = "%s/notification_secrets" % self.baseurl
data = self.__call_galaxy(url, headers=self.__auth_header())
return data
@g_connect
def remove_secret(self, secret_id):
url = "%s/notification_secrets/%s/" % (self.baseurl, secret_id)
data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
return data
@g_connect
def delete_role(self, github_user, github_repo):
url = "%s/removerole/?github_user=%s&github_repo=%s" % (self.baseurl, github_user, github_repo)
data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
return data
|
mheap/ansible
|
lib/ansible/galaxy/api.py
|
Python
|
gpl-3.0
| 11,033
|
[
"Galaxy"
] |
e2166a22b4d6eda3ab02a6e9265ee1f37ef566d1caeb67cf8d600b0ee86af1c8
|
from openzwave.network import ZWaveNode
from Firefly import logging, scheduler
from Firefly.helpers.device import *
from Firefly.helpers.device.device import Device
from Firefly.helpers.metadata.metadata import action_battery
from Firefly.util.zwave_command_class import COMMAND_CLASS_BATTERY, COMMAND_CLASS_DESC
class ZwavePrarmValue(object):
#TODO: Maybe set better defaults
def __init__(self, index=None, label=None, ref=None, value=None, command_class=None, value_type=None, genre=None):
self.index = index
try:
self.label = self.label.lower()
except:
self.label = label
self.ref = ref
self.value = value
try:
self.command_class = COMMAND_CLASS_DESC[command_class]
except:
self.command_class = command_class
self.type = value_type
self.genre = genre
def __repr__(self):
return '<[ZWAVE VALUE] %(label)s: %(value)s [index: %(index)s, command class: %(command_class)s, genre: %(genre)s] %(ref)s>' % {
'label': self.label,
'value': self.value,
'index': self.index,
'command_class': self.command_class,
'genre': self.genre,
'ref': self.ref
}
class ZwaveDevice(Device):
def __init__(self, firefly, package, title, author, commands, requests, device_type, **kwargs):
#commands.append('ZWAVE_CONFIG')
#commands.append('ZWAVE_UPDATE')
#requests.append('SENSORS')
#requests.append('PARAMS')
#requests.append('RAW_VALUES')
#requests.append('ZWAVE_VALUES')
#requests.append('battery')
super().__init__(firefly, package, title, author, commands, requests, device_type, **kwargs)
self._node = kwargs.get('node')
self._sensors = {}
self._switches = {}
self._config_params = {}
self.zwave_values = {}
self._raw_values = {}
self._config_updated = False
self._update_try_count = 0
self._node_id = kwargs.get('node_id')
self._manufacturer_id = ''
self._manufacturer_name = ''
self._product_name = ''
self._product_type = ''
self.value_map = kwargs.get('value_map', {})
self.add_command('ZWAVE_CONFIG', self.zwave_config)
self.add_command('ZWAVE_UPDATE', self.update_from_zwave)
#self.add_request('SENSORS', self.get_sensors)
#self.add_request('PARAMS', self.get_params)
#self.add_request('RAW_VALUES', self.get_raw_values)
self.add_request('ZWAVE_VALUES', self.get_zwave_values)
self._battery = kwargs.get('battery', 'NOT REPORTED')
#self.add_request('battery', self.get_battery)
#self.add_action('battery', metaText(text_request='battery', context='Current Battery Level', title='Battery'))
self._update_lock = False
self._last_command_source = 'startup'
def update_device_config(self, **kwargs):
self.node.refresh_info()
self._config_updated = True
def zwave_config(self, **kwargs):
if self._node is None:
logging.critical('FAILING TO UPDATE DEVICE')
return False
param = kwargs.get('id')
value = kwargs.get('value')
size = kwargs.get('size', 2)
if size:
size = int(size)
if param and value:
param = int(param)
value = int(value)
self._node.set_config_param(param, value, size=size)
return True
def export(self, current_values: bool = True, api_view: bool = False) -> dict:
export_data = super().export(current_values, api_view)
export_data['node_id'] = self._node_id
export_data['manufacturer_id'] = self._manufacturer_id
export_data['manufacturer_name'] = self._manufacturer_name
export_data['product_name'] = self._product_name
export_data['product_type'] = self._product_type
export_data['battery'] = self._battery
export_data['value_map'] = self.value_map
return export_data
def get_zwave_values(self, **kwargs):
return_data = []
for idx, value in self.zwave_values.items():
return_data.append(value.__dict__)
return return_data
def get_sensors(self, **kwargs):
sensor = kwargs.get('sensor')
if sensor:
s = self._sensors.get(sensor)
return s
return self._sensors
def get_params(self, **kwargs):
values = kwargs.get('VALUE')
if values:
s = self._config_params.get(values)
return s
return self._config_params
def get_raw_values(self, **kwargs):
values = kwargs.get('VALUE')
if values:
s = self._raw_values.get(values)
return s
return self._raw_values
def update_from_zwave(self, node: ZWaveNode = None, ignore_update=False, **kwargs):
'''
Currently the update command is not in the COMMANDS -> THis is because it acts differently right now.. This may
change in the near future.
Args:
node ():
Returns:
'''
logging.debug('Updating ZWave Values: %s' % str(kwargs))
# Return if no valid node object.
if node is None and self._node is None:
return
if node is None:
node = self._node
try:
if not self._manufacturer_id:
self._manufacturer_id = node.manufacturer_id
if not self._manufacturer_name:
self._manufacturer_name = node.manufacturer_name
if not self._product_name:
self._product_name = node.product_name
if not self._product_type:
self._product_type = node.product_type
except:
pass
# This will set the node on the first update once zwave boots
self._node = node
self._node_id = node.node_id
# Update config if device config has not been updated.
if not self._config_updated:
for s, i in node.get_values().items():
self.zwave_values["%s_%s" %(i.genre.lower(), i.index)] = ZwavePrarmValue(i.index, i.label, s, i.data, i.command_class, i.type, i.genre)
elif kwargs.get('values'):
values = kwargs.get('values')
self.zwave_values["%s_%s" %(values.genre.lower(), values.index)] = ZwavePrarmValue(values.index, values.label, values.value_id, values.data, values.command_class, values.type, values.genre)
if node.has_command_class(COMMAND_CLASS_BATTERY) and BATTERY not in self.request_map:
self.add_request(BATTERY, self.get_battery)
self.add_action(BATTERY, action_battery())
if self._node.is_ready and self._update_try_count <= 20 and not self._config_updated:
scheduler.runInS(5, self.update_device_config, '%s-update_config' % self.id, max_instances=1)
logging.debug('Not Done updating ZWave Values')
if self._update_try_count > 20:
self._config_updated = True
logging.debug('Done updating ZWave Values')
def get_battery(self):
return self._battery
def get_zwave_value(self, value_id: int) -> ZwavePrarmValue:
try:
return self.zwave_values[value_id]
except KeyError:
return ZwavePrarmValue()
except Exception as e:
logging.error('[ZWAVE DEVICE] unknown error: %s' % e)
return ZwavePrarmValue()
def verify_set_zwave_param(self, param_index, param_value, size=2) -> bool:
if self.get_zwave_value("config_%s" %param_index) != param_value:
self.node.set_config_param(param_index, param_value, size)
return False
return True
def verify_set_zwave_params(self, param_list) -> bool:
successful = True
for param in param_list:
if len(param) == 3:
successful &= self.verify_set_zwave_param(param[0], param[1], param[2])
elif len(param) == 2:
successful &= self.verify_set_zwave_param(param[0], param[1])
else:
logging.error('[ZWAVE DEVICE] unknown param length')
return successful
@property
def node(self):
return self._node
|
Firefly-Automation/Firefly
|
Firefly/components/zwave/zwave_device.py
|
Python
|
apache-2.0
| 7,575
|
[
"Firefly"
] |
da231a5d6a10cd73001c93385b7e06360d3f592d8affaa116728da93a496acf2
|
##############################################################################
# adaptiveMD: A Python Framework to Run Adaptive Molecular Dynamics (MD)
# Simulations on HPC Resources
# Copyright 2017 FU Berlin and the Authors
#
# Authors: Jan-Hendrik Prinz
# Contributors:
#
# `adaptiveMD` is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from mongodb import StorableMixin
from util import DT
class LogEntry(StorableMixin):
"""
A storable representation of a log entry
Examples
--------
>>> p = Project('tutorial-project')
>>> l = LogEntry('worker', 'failed execution', 'simsalabim, didnt work')
>>> print l
>>> p.logs.add(l)
Attributes
----------
logger : str
the name of the logger for classification
title : str
a short title for the log entry
message : str
the long and detailed message
level : int
pick `LogEntry.SEVERE`, `LogEntry.ERROR` or `LogEntry.INFO` (default)
objs : dict of storable objects
you can attach objects that can help with specifying the error message
"""
SEVERE = 1
ERROR = 2
INFO = 3
def __init__(self, logger, title, message, level=INFO, objs=None):
super(LogEntry, self).__init__()
self.logger = logger
self.title = title
self.message = message
self.level = level
self.objs = objs
def __str__(self):
return '%s [%s:%s] %s\n%s' % (
DT(self.__time__).time,
self.logger,
self.level,
self.title,
self.message
)
|
thempel/adaptivemd
|
adaptivemd/logentry.py
|
Python
|
lgpl-2.1
| 2,269
|
[
"MDTraj"
] |
f2dbec87a79599335ef525d485f6bda0c1396d1f37ed2bb7a14a1e1caadc1226
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Base class for a wrapper over QWebView/QWebEngineView."""
import enum
import itertools
import attr
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QUrl, QObject, QSizeF, Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QWidget, QApplication
import pygments
import pygments.lexers
import pygments.formatters
from qutebrowser.keyinput import modeman
from qutebrowser.config import config
from qutebrowser.utils import (utils, objreg, usertypes, log, qtutils,
urlutils, message)
from qutebrowser.misc import miscwidgets, objects
from qutebrowser.browser import mouse, hints
from qutebrowser.qt import sip
tab_id_gen = itertools.count(0)
def create(win_id, private, parent=None):
"""Get a QtWebKit/QtWebEngine tab object.
Args:
win_id: The window ID where the tab will be shown.
private: Whether the tab is a private/off the record tab.
parent: The Qt parent to set.
"""
# Importing modules here so we don't depend on QtWebEngine without the
# argument and to avoid circular imports.
mode_manager = modeman.instance(win_id)
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginetab
tab_class = webenginetab.WebEngineTab
else:
from qutebrowser.browser.webkit import webkittab
tab_class = webkittab.WebKitTab
return tab_class(win_id=win_id, mode_manager=mode_manager, private=private,
parent=parent)
def init():
"""Initialize backend-specific modules."""
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginetab
webenginetab.init()
class WebTabError(Exception):
"""Base class for various errors."""
class UnsupportedOperationError(WebTabError):
"""Raised when an operation is not supported with the given backend."""
TerminationStatus = enum.Enum('TerminationStatus', [
'normal',
'abnormal', # non-zero exit status
'crashed', # e.g. segfault
'killed',
'unknown',
])
@attr.s
class TabData:
"""A simple namespace with a fixed set of attributes.
Attributes:
keep_icon: Whether the (e.g. cloned) icon should not be cleared on page
load.
inspector: The QWebInspector used for this webview.
viewing_source: Set if we're currently showing a source view.
Only used when sources are shown via pygments.
open_target: Where to open the next link.
Only used for QtWebKit.
override_target: Override for open_target for fake clicks (like hints).
Only used for QtWebKit.
pinned: Flag to pin the tab.
fullscreen: Whether the tab has a video shown fullscreen currently.
netrc_used: Whether netrc authentication was performed.
input_mode: current input mode for the tab.
"""
keep_icon = attr.ib(False)
viewing_source = attr.ib(False)
inspector = attr.ib(None)
open_target = attr.ib(usertypes.ClickTarget.normal)
override_target = attr.ib(None)
pinned = attr.ib(False)
fullscreen = attr.ib(False)
netrc_used = attr.ib(False)
input_mode = attr.ib(usertypes.KeyMode.normal)
def should_show_icon(self):
return (config.val.tabs.favicons.show == 'always' or
config.val.tabs.favicons.show == 'pinned' and self.pinned)
class AbstractAction:
"""Attribute of AbstractTab for Qt WebActions.
Class attributes (overridden by subclasses):
action_class: The class actions are defined on (QWeb{Engine,}Page)
action_base: The type of the actions (QWeb{Engine,}Page.WebAction)
"""
action_class = None
action_base = None
def __init__(self, tab):
self._widget = None
self._tab = tab
def exit_fullscreen(self):
"""Exit the fullscreen mode."""
raise NotImplementedError
def save_page(self):
"""Save the current page."""
raise NotImplementedError
def run_string(self, name):
"""Run a webaction based on its name."""
member = getattr(self.action_class, name, None)
if not isinstance(member, self.action_base):
raise WebTabError("{} is not a valid web action!".format(name))
self._widget.triggerPageAction(member)
def show_source(self,
pygments=False): # pylint: disable=redefined-outer-name
"""Show the source of the current page in a new tab."""
raise NotImplementedError
def _show_source_pygments(self):
def show_source_cb(source):
"""Show source as soon as it's ready."""
# WORKAROUND for https://github.com/PyCQA/pylint/issues/491
# pylint: disable=no-member
lexer = pygments.lexers.HtmlLexer()
formatter = pygments.formatters.HtmlFormatter(
full=True, linenos='table')
# pylint: enable=no-member
highlighted = pygments.highlight(source, lexer, formatter)
tb = objreg.get('tabbed-browser', scope='window',
window=self._tab.win_id)
new_tab = tb.tabopen(background=False, related=True)
new_tab.set_html(highlighted, self._tab.url())
new_tab.data.viewing_source = True
self._tab.dump_async(show_source_cb)
class AbstractPrinting:
"""Attribute of AbstractTab for printing the page."""
def __init__(self):
self._widget = None
def check_pdf_support(self):
raise NotImplementedError
def check_printer_support(self):
raise NotImplementedError
def check_preview_support(self):
raise NotImplementedError
def to_pdf(self, filename):
raise NotImplementedError
def to_printer(self, printer, callback=None):
"""Print the tab.
Args:
printer: The QPrinter to print to.
callback: Called with a boolean
(True if printing succeeded, False otherwise)
"""
raise NotImplementedError
class AbstractSearch(QObject):
"""Attribute of AbstractTab for doing searches.
Attributes:
text: The last thing this view was searched for.
search_displayed: Whether we're currently displaying search results in
this view.
_flags: The flags of the last search (needs to be set by subclasses).
_widget: The underlying WebView widget.
"""
def __init__(self, parent=None):
super().__init__(parent)
self._widget = None
self.text = None
self.search_displayed = False
def _is_case_sensitive(self, ignore_case):
"""Check if case-sensitivity should be used.
This assumes self.text is already set properly.
Arguments:
ignore_case: The ignore_case value from the config.
"""
mapping = {
'smart': not self.text.islower(),
'never': True,
'always': False,
}
return mapping[ignore_case]
def search(self, text, *, ignore_case='never', reverse=False,
result_cb=None):
"""Find the given text on the page.
Args:
text: The text to search for.
ignore_case: Search case-insensitively. ('always'/'never/'smart')
reverse: Reverse search direction.
result_cb: Called with a bool indicating whether a match was found.
"""
raise NotImplementedError
def clear(self):
"""Clear the current search."""
raise NotImplementedError
def prev_result(self, *, result_cb=None):
"""Go to the previous result of the current search.
Args:
result_cb: Called with a bool indicating whether a match was found.
"""
raise NotImplementedError
def next_result(self, *, result_cb=None):
"""Go to the next result of the current search.
Args:
result_cb: Called with a bool indicating whether a match was found.
"""
raise NotImplementedError
class AbstractZoom(QObject):
"""Attribute of AbstractTab for controlling zoom.
Attributes:
_neighborlist: A NeighborList with the zoom levels.
_default_zoom_changed: Whether the zoom was changed from the default.
"""
def __init__(self, tab, parent=None):
super().__init__(parent)
self._tab = tab
self._widget = None
self._default_zoom_changed = False
self._init_neighborlist()
config.instance.changed.connect(self._on_config_changed)
self._zoom_factor = float(config.val.zoom.default) / 100
# # FIXME:qtwebengine is this needed?
# # For some reason, this signal doesn't get disconnected automatically
# # when the WebView is destroyed on older PyQt versions.
# # See https://github.com/qutebrowser/qutebrowser/issues/390
# self.destroyed.connect(functools.partial(
# cfg.changed.disconnect, self.init_neighborlist))
@pyqtSlot(str)
def _on_config_changed(self, option):
if option in ['zoom.levels', 'zoom.default']:
if not self._default_zoom_changed:
factor = float(config.val.zoom.default) / 100
self.set_factor(factor)
self._init_neighborlist()
def _init_neighborlist(self):
"""Initialize self._neighborlist."""
levels = config.val.zoom.levels
self._neighborlist = usertypes.NeighborList(
levels, mode=usertypes.NeighborList.Modes.edge)
self._neighborlist.fuzzyval = config.val.zoom.default
def offset(self, offset):
"""Increase/Decrease the zoom level by the given offset.
Args:
offset: The offset in the zoom level list.
Return:
The new zoom percentage.
"""
level = self._neighborlist.getitem(offset)
self.set_factor(float(level) / 100, fuzzyval=False)
return level
def _set_factor_internal(self, factor):
raise NotImplementedError
def set_factor(self, factor, *, fuzzyval=True):
"""Zoom to a given zoom factor.
Args:
factor: The zoom factor as float.
fuzzyval: Whether to set the NeighborLists fuzzyval.
"""
if fuzzyval:
self._neighborlist.fuzzyval = int(factor * 100)
if factor < 0:
raise ValueError("Can't zoom to factor {}!".format(factor))
default_zoom_factor = float(config.val.zoom.default) / 100
self._default_zoom_changed = (factor != default_zoom_factor)
self._zoom_factor = factor
self._set_factor_internal(factor)
def factor(self):
return self._zoom_factor
def set_default(self):
self._set_factor_internal(float(config.val.zoom.default) / 100)
def set_current(self):
self._set_factor_internal(self._zoom_factor)
class AbstractCaret(QObject):
"""Attribute of AbstractTab for caret browsing.
Signals:
selection_toggled: Emitted when the selection was toggled.
arg: Whether the selection is now active.
"""
selection_toggled = pyqtSignal(bool)
def __init__(self, tab, mode_manager, parent=None):
super().__init__(parent)
self._tab = tab
self._widget = None
self.selection_enabled = False
mode_manager.entered.connect(self._on_mode_entered)
mode_manager.left.connect(self._on_mode_left)
def _on_mode_entered(self, mode):
raise NotImplementedError
def _on_mode_left(self, mode):
raise NotImplementedError
def move_to_next_line(self, count=1):
raise NotImplementedError
def move_to_prev_line(self, count=1):
raise NotImplementedError
def move_to_next_char(self, count=1):
raise NotImplementedError
def move_to_prev_char(self, count=1):
raise NotImplementedError
def move_to_end_of_word(self, count=1):
raise NotImplementedError
def move_to_next_word(self, count=1):
raise NotImplementedError
def move_to_prev_word(self, count=1):
raise NotImplementedError
def move_to_start_of_line(self):
raise NotImplementedError
def move_to_end_of_line(self):
raise NotImplementedError
def move_to_start_of_next_block(self, count=1):
raise NotImplementedError
def move_to_start_of_prev_block(self, count=1):
raise NotImplementedError
def move_to_end_of_next_block(self, count=1):
raise NotImplementedError
def move_to_end_of_prev_block(self, count=1):
raise NotImplementedError
def move_to_start_of_document(self):
raise NotImplementedError
def move_to_end_of_document(self):
raise NotImplementedError
def toggle_selection(self):
raise NotImplementedError
def drop_selection(self):
raise NotImplementedError
def selection(self, callback):
raise NotImplementedError
def _follow_enter(self, tab):
"""Follow a link by faking an enter press."""
if tab:
self._tab.key_press(Qt.Key_Enter, modifier=Qt.ControlModifier)
else:
self._tab.key_press(Qt.Key_Enter)
def follow_selected(self, *, tab=False):
raise NotImplementedError
class AbstractScroller(QObject):
"""Attribute of AbstractTab to manage scroll position."""
perc_changed = pyqtSignal(int, int)
def __init__(self, tab, parent=None):
super().__init__(parent)
self._tab = tab
self._widget = None
self.perc_changed.connect(self._log_scroll_pos_change)
@pyqtSlot()
def _log_scroll_pos_change(self):
log.webview.vdebug("Scroll position changed to {}".format(
self.pos_px()))
def _init_widget(self, widget):
self._widget = widget
def pos_px(self):
raise NotImplementedError
def pos_perc(self):
raise NotImplementedError
def to_perc(self, x=None, y=None):
raise NotImplementedError
def to_point(self, point):
raise NotImplementedError
def to_anchor(self, name):
raise NotImplementedError
def delta(self, x=0, y=0):
raise NotImplementedError
def delta_page(self, x=0, y=0):
raise NotImplementedError
def up(self, count=1):
raise NotImplementedError
def down(self, count=1):
raise NotImplementedError
def left(self, count=1):
raise NotImplementedError
def right(self, count=1):
raise NotImplementedError
def top(self):
raise NotImplementedError
def bottom(self):
raise NotImplementedError
def page_up(self, count=1):
raise NotImplementedError
def page_down(self, count=1):
raise NotImplementedError
def at_top(self):
raise NotImplementedError
def at_bottom(self):
raise NotImplementedError
class AbstractHistory:
"""The history attribute of a AbstractTab."""
def __init__(self, tab):
self._tab = tab
self._history = None
def __len__(self):
return len(self._history)
def __iter__(self):
return iter(self._history.items())
def current_idx(self):
raise NotImplementedError
def back(self, count=1):
"""Go back in the tab's history."""
idx = self.current_idx() - count
if idx >= 0:
self._go_to_item(self._item_at(idx))
else:
self._go_to_item(self._item_at(0))
raise WebTabError("At beginning of history.")
def forward(self, count=1):
"""Go forward in the tab's history."""
idx = self.current_idx() + count
if idx < len(self):
self._go_to_item(self._item_at(idx))
else:
self._go_to_item(self._item_at(len(self) - 1))
raise WebTabError("At end of history.")
def can_go_back(self):
raise NotImplementedError
def can_go_forward(self):
raise NotImplementedError
def _item_at(self, i):
raise NotImplementedError
def _go_to_item(self, item):
raise NotImplementedError
def serialize(self):
"""Serialize into an opaque format understood by self.deserialize."""
raise NotImplementedError
def deserialize(self, data):
"""Serialize from a format produced by self.serialize."""
raise NotImplementedError
def load_items(self, items):
"""Deserialize from a list of WebHistoryItems."""
raise NotImplementedError
class AbstractElements:
"""Finding and handling of elements on the page."""
def __init__(self, tab):
self._widget = None
self._tab = tab
def find_css(self, selector, callback, *, only_visible=False):
"""Find all HTML elements matching a given selector async.
Args:
callback: The callback to be called when the search finished.
selector: The CSS selector to search for.
only_visible: Only show elements which are visible on screen.
"""
raise NotImplementedError
def find_id(self, elem_id, callback):
"""Find the HTML element with the given ID async.
Args:
callback: The callback to be called when the search finished.
elem_id: The ID to search for.
"""
raise NotImplementedError
def find_focused(self, callback):
"""Find the focused element on the page async.
Args:
callback: The callback to be called when the search finished.
Called with a WebEngineElement or None.
"""
raise NotImplementedError
def find_at_pos(self, pos, callback):
"""Find the element at the given position async.
This is also called "hit test" elsewhere.
Args:
pos: The QPoint to get the element for.
callback: The callback to be called when the search finished.
Called with a WebEngineElement or None.
"""
raise NotImplementedError
class AbstractAudio(QObject):
"""Handling of audio/muting for this tab."""
muted_changed = pyqtSignal(bool)
recently_audible_changed = pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent)
self._widget = None
def set_muted(self, muted: bool):
"""Set this tab as muted or not."""
raise NotImplementedError
def is_muted(self):
"""Whether this tab is muted."""
raise NotImplementedError
def toggle_muted(self):
self.set_muted(not self.is_muted())
def is_recently_audible(self):
"""Whether this tab has had audio playing recently."""
raise NotImplementedError
class AbstractTab(QWidget):
"""A wrapper over the given widget to hide its API and expose another one.
We use this to unify QWebView and QWebEngineView.
Attributes:
history: The AbstractHistory for the current tab.
registry: The ObjectRegistry associated with this tab.
private: Whether private browsing is turned on for this tab.
_load_status: loading status of this page
Accessible via load_status() method.
_has_ssl_errors: Whether SSL errors happened.
Needs to be set by subclasses.
for properties, see WebView/WebEngineView docs.
Signals:
See related Qt signals.
new_tab_requested: Emitted when a new tab should be opened with the
given URL.
load_status_changed: The loading status changed
fullscreen_requested: Fullscreen display was requested by the page.
arg: True if fullscreen should be turned on,
False if it should be turned off.
renderer_process_terminated: Emitted when the underlying renderer
process terminated.
arg 0: A TerminationStatus member.
arg 1: The exit code.
predicted_navigation: Emitted before we tell Qt to open a URL.
"""
window_close_requested = pyqtSignal()
link_hovered = pyqtSignal(str)
load_started = pyqtSignal()
load_progress = pyqtSignal(int)
load_finished = pyqtSignal(bool)
icon_changed = pyqtSignal(QIcon)
title_changed = pyqtSignal(str)
load_status_changed = pyqtSignal(str)
new_tab_requested = pyqtSignal(QUrl)
url_changed = pyqtSignal(QUrl)
shutting_down = pyqtSignal()
contents_size_changed = pyqtSignal(QSizeF)
add_history_item = pyqtSignal(QUrl, QUrl, str) # url, requested url, title
fullscreen_requested = pyqtSignal(bool)
renderer_process_terminated = pyqtSignal(TerminationStatus, int)
predicted_navigation = pyqtSignal(QUrl)
def __init__(self, *, win_id, mode_manager, private, parent=None):
self.private = private
self.win_id = win_id
self.tab_id = next(tab_id_gen)
super().__init__(parent)
self.registry = objreg.ObjectRegistry()
tab_registry = objreg.get('tab-registry', scope='window',
window=win_id)
tab_registry[self.tab_id] = self
objreg.register('tab', self, registry=self.registry)
self.data = TabData()
self._layout = miscwidgets.WrapperLayout(self)
self._widget = None
self._progress = 0
self._has_ssl_errors = False
self._mode_manager = mode_manager
self._load_status = usertypes.LoadStatus.none
self._mouse_event_filter = mouse.MouseEventFilter(
self, parent=self)
self.backend = None
# FIXME:qtwebengine Should this be public api via self.hints?
# Also, should we get it out of objreg?
hintmanager = hints.HintManager(win_id, self.tab_id, parent=self)
objreg.register('hintmanager', hintmanager, scope='tab',
window=self.win_id, tab=self.tab_id)
self.predicted_navigation.connect(self._on_predicted_navigation)
def _set_widget(self, widget):
# pylint: disable=protected-access
self._widget = widget
self._layout.wrap(self, widget)
self.history._history = widget.history()
self.scroller._init_widget(widget)
self.caret._widget = widget
self.zoom._widget = widget
self.search._widget = widget
self.printing._widget = widget
self.action._widget = widget
self.elements._widget = widget
self.audio._widget = widget
self.settings._settings = widget.settings()
self._install_event_filter()
self.zoom.set_default()
def _install_event_filter(self):
raise NotImplementedError
def _set_load_status(self, val):
"""Setter for load_status."""
if not isinstance(val, usertypes.LoadStatus):
raise TypeError("Type {} is no LoadStatus member!".format(val))
log.webview.debug("load status for {}: {}".format(repr(self), val))
self._load_status = val
self.load_status_changed.emit(val.name)
def event_target(self):
"""Return the widget events should be sent to."""
raise NotImplementedError
def send_event(self, evt):
"""Send the given event to the underlying widget.
The event will be sent via QApplication.postEvent.
Note that a posted event may not be re-used in any way!
"""
# This only gives us some mild protection against re-using events, but
# it's certainly better than a segfault.
if getattr(evt, 'posted', False):
raise utils.Unreachable("Can't re-use an event which was already "
"posted!")
recipient = self.event_target()
if recipient is None:
# https://github.com/qutebrowser/qutebrowser/issues/3888
log.webview.warning("Unable to find event target!")
return
evt.posted = True
QApplication.postEvent(recipient, evt)
@pyqtSlot(QUrl)
def _on_predicted_navigation(self, url):
"""Adjust the title if we are going to visit an URL soon."""
qtutils.ensure_valid(url)
url_string = url.toDisplayString()
log.webview.debug("Predicted navigation: {}".format(url_string))
self.title_changed.emit(url_string)
@pyqtSlot(QUrl)
def _on_url_changed(self, url):
"""Update title when URL has changed and no title is available."""
if url.isValid() and not self.title():
self.title_changed.emit(url.toDisplayString())
self.url_changed.emit(url)
@pyqtSlot()
def _on_load_started(self):
self._progress = 0
self._has_ssl_errors = False
self.data.viewing_source = False
self._set_load_status(usertypes.LoadStatus.loading)
self.load_started.emit()
@pyqtSlot(usertypes.NavigationRequest)
def _on_navigation_request(self, navigation):
"""Handle common acceptNavigationRequest code."""
url = utils.elide(navigation.url.toDisplayString(), 100)
log.webview.debug("navigation request: url {}, type {}, is_main_frame "
"{}".format(url,
navigation.navigation_type,
navigation.is_main_frame))
if (navigation.navigation_type == navigation.Type.link_clicked and
not navigation.url.isValid()):
msg = urlutils.get_errstring(navigation.url,
"Invalid link clicked")
message.error(msg)
self.data.open_target = usertypes.ClickTarget.normal
navigation.accepted = False
def handle_auto_insert_mode(self, ok):
"""Handle `input.insert_mode.auto_load` after loading finished."""
if not config.val.input.insert_mode.auto_load or not ok:
return
cur_mode = self._mode_manager.mode
if cur_mode == usertypes.KeyMode.insert:
return
def _auto_insert_mode_cb(elem):
"""Called from JS after finding the focused element."""
if elem is None:
log.webview.debug("No focused element!")
return
if elem.is_editable():
modeman.enter(self.win_id, usertypes.KeyMode.insert,
'load finished', only_if_normal=True)
self.elements.find_focused(_auto_insert_mode_cb)
@pyqtSlot(bool)
def _on_load_finished(self, ok):
if sip.isdeleted(self._widget):
# https://github.com/qutebrowser/qutebrowser/issues/3498
return
sess_manager = objreg.get('session-manager')
sess_manager.save_autosave()
if ok and not self._has_ssl_errors:
if self.url().scheme() == 'https':
self._set_load_status(usertypes.LoadStatus.success_https)
else:
self._set_load_status(usertypes.LoadStatus.success)
elif ok:
self._set_load_status(usertypes.LoadStatus.warn)
else:
self._set_load_status(usertypes.LoadStatus.error)
self.load_finished.emit(ok)
if not self.title():
self.title_changed.emit(self.url().toDisplayString())
self.zoom.set_current()
@pyqtSlot()
def _on_history_trigger(self):
"""Emit add_history_item when triggered by backend-specific signal."""
raise NotImplementedError
@pyqtSlot(int)
def _on_load_progress(self, perc):
self._progress = perc
self.load_progress.emit(perc)
def url(self, requested=False):
raise NotImplementedError
def progress(self):
return self._progress
def load_status(self):
return self._load_status
def _openurl_prepare(self, url, *, predict=True):
qtutils.ensure_valid(url)
if predict:
self.predicted_navigation.emit(url)
def openurl(self, url, *, predict=True):
raise NotImplementedError
def reload(self, *, force=False):
raise NotImplementedError
def stop(self):
raise NotImplementedError
def clear_ssl_errors(self):
raise NotImplementedError
def key_press(self, key, modifier=Qt.NoModifier):
"""Send a fake key event to this tab."""
raise NotImplementedError
def dump_async(self, callback, *, plain=False):
"""Dump the current page's html asynchronously.
The given callback will be called with the result when dumping is
complete.
"""
raise NotImplementedError
def run_js_async(self, code, callback=None, *, world=None):
"""Run javascript async.
The given callback will be called with the result when running JS is
complete.
Args:
code: The javascript code to run.
callback: The callback to call with the result, or None.
world: A world ID (int or usertypes.JsWorld member) to run the JS
in the main world or in another isolated world.
"""
raise NotImplementedError
def shutdown(self):
raise NotImplementedError
def title(self):
raise NotImplementedError
def icon(self):
raise NotImplementedError
def set_html(self, html, base_url=QUrl()):
raise NotImplementedError
def networkaccessmanager(self):
"""Get the QNetworkAccessManager for this tab.
This is only implemented for QtWebKit.
For QtWebEngine, always returns None.
"""
raise NotImplementedError
def user_agent(self):
"""Get the user agent for this tab.
This is only implemented for QtWebKit.
For QtWebEngine, always returns None.
"""
raise NotImplementedError
def __repr__(self):
try:
url = utils.elide(self.url().toDisplayString(QUrl.EncodeUnicode),
100)
except (AttributeError, RuntimeError) as exc:
url = '<{}>'.format(exc.__class__.__name__)
return utils.get_repr(self, tab_id=self.tab_id, url=url)
def is_deleted(self):
return sip.isdeleted(self._widget)
|
airodactyl/qutebrowser
|
qutebrowser/browser/browsertab.py
|
Python
|
gpl-3.0
| 31,342
|
[
"VisIt"
] |
0057a8d9709f8586a7112dde499db1adb7b304dd627f81b81c1a32d668753656
|
#!/usr/bin/env python
"""
@author Luke Campbell <LCampbell@ASAScience.com>
@file ion/services/dm/inventory/dataset_management_service.py
@date Tue Jul 24 08:59:29 EDT 2012
@brief Dataset Management Service implementation
"""
from pyon.public import PRED, RT
from pyon.core.exception import BadRequest, NotFound, Conflict
from pyon.datastore.datastore import DataStore
from pyon.net.endpoint import RPCClient
from pyon.util.arg_check import validate_is_instance, validate_true, validate_is_not_none
from pyon.util.file_sys import FileSystem, FS
from pyon.util.log import log
from ion.services.dm.utility.granule_utils import SimplexCoverage, ParameterDictionary, GridDomain, ParameterContext
from ion.util.time_utils import TimeUtils
from interface.objects import ParameterContext as ParameterContextResource, ParameterDictionary as ParameterDictionaryResource, ParameterFunction as ParameterFunctionResource
from interface.objects import Dataset, ResourceVisibilityEnum
from interface.services.dm.idataset_management_service import BaseDatasetManagementService, DatasetManagementServiceClient
from coverage_model.basic_types import AxisTypeEnum
from coverage_model import AbstractCoverage, ViewCoverage, ComplexCoverage, ComplexCoverageType
from coverage_model.parameter_functions import AbstractFunction
from interface.services.sa.idata_process_management_service import DataProcessManagementServiceProcessClient
from coverage_model import NumexprFunction, PythonFunction, QuantityType, ParameterFunctionType
from coverage_model.coverages.coverage_extents import ReferenceCoverageExtents
from interface.objects import DataProcessDefinition, DataProcessTypeEnum, ParameterFunctionType as PFT
from interface.objects import CoverageTypeEnum
from ion.services.dm.utility.granule_utils import time_series_domain
from ion.services.eoi.table_loader import ResourceParser
from uuid import uuid4
from udunitspy.udunits2 import UdunitsError
import os
import numpy as np
import re
import ast
class DatasetManagementService(BaseDatasetManagementService):
def __init__(self, *args, **kwargs):
super(DatasetManagementService, self).__init__(*args,**kwargs)
self.logging_name = '(DatasetManagementService %s)' % (self.name or self.id)
def on_start(self):
super(DatasetManagementService,self).on_start()
using_eoi_services = self.CFG.get_safe('eoi.meta.use_eoi_services', False)
if using_eoi_services:
self.resource_parser = ResourceParser()
else:
self.resource_parser = None
#--------
def create_dataset(self, dataset=None, parameter_dict=None, parameter_dictionary_id=''):
if parameter_dict is not None:
log.warning("Creating a parameter dictionary raw with coverage objects will soon be deprecated")
if parameter_dictionary_id:
parameter_dict = self._coverage_parameter_dictionary(parameter_dictionary_id)
parameter_dict = parameter_dict.dump() # Serialize it
dataset.coverage_version = 'UNSET'
dataset_id, rev = self.clients.resource_registry.create(dataset)
try:
if dataset.coverage_type == CoverageTypeEnum.SIMPLEX:
cov = self._create_coverage(dataset_id, dataset.description or dataset_id, parameter_dict)
self._save_coverage(cov)
cov.close()
elif dataset.coverage_type == CoverageTypeEnum.COMPLEX:
cov = self._create_complex_coverage(dataset_id, dataset.description or dataset_id, parameter_dict)
cov.close()
else:
raise BadRequest("Unknown Coverage Type")
except Exception:
# Clean up dangling resource if there's no coverage
self.delete_dataset(dataset_id)
raise
dataset.coverage_version = "TODO"
dataset._id = dataset_id
dataset._rev = rev
self.update_dataset(dataset)
log.debug('creating dataset: %s', dataset_id)
#table loader create resource
if dataset.visibility == ResourceVisibilityEnum.PUBLIC:
log.debug('dataset visible: %s', dataset_id)
if self._get_eoi_service_available() and parameter_dictionary_id:
params = self.read_parameter_contexts(parameter_dictionary_id)
param_defs = {}
for p in params:
param_defs[p.name] = {
"value_encoding" : p.value_encoding,
"parameter_type" : p.parameter_type,
"units" : p.units,
"standard_name" : p.name,
"display_name" : p.display_name,
"description" : p.description,
"fill_value" : p.fill_value
}
self._create_single_resource(dataset_id, param_defs)
self.clients.resource_registry.create_association(dataset_id, PRED.hasParameterDictionary, parameter_dictionary_id)
return dataset_id
def read_dataset(self, dataset_id=''):
retval = self.clients.resource_registry.read(dataset_id)
validate_is_instance(retval,Dataset)
return retval
def update_dataset(self, dataset=None):
if not (dataset and dataset._id):
raise BadRequest('%s: Dataset either not provided or malformed.' % self.logging_name)
self.clients.resource_registry.update(dataset)
#@todo: Check to make sure retval is boolean
log.debug('DM:update dataset: dataset_id: %s', dataset._id)
return True
def delete_dataset(self, dataset_id=''):
assocs = self.clients.resource_registry.find_associations(subject=dataset_id, predicate=PRED.hasStream)
for assoc in assocs:
self.clients.resource_registry.delete_association(assoc)
self.clients.resource_registry.delete(dataset_id)
log.debug('DM:delete dataset: dataset_id: %s', dataset_id)
if self._get_eoi_service_available():
self._remove_single_resource(dataset_id)
def register_dataset(self, data_product_id=''):
raise BadRequest("register_dataset is no longer supported, please use create_catalog_entry in data product management")
def add_dataset_window_to_complex(self, device_dataset_id='', window=None, site_dataset_id=''):
'''
Adds target dataset to the complex coverage for the window specified
'''
if window is None:
raise BadRequest("Window must be specified")
site_path = self._get_coverage_path(site_dataset_id)
device_path = self._get_coverage_path(device_dataset_id)
ccov = ComplexCoverage.load(site_path)
ccov.append_reference_coverage(device_path, ReferenceCoverageExtents('', device_dataset_id, time_extents=window))
ccov.close()
def update_dataset_window_for_complex(self, device_dataset_id='', old_window=None, new_window=None, site_dataset_id=''):
if old_window is None or new_window is None:
raise BadRequest("Windows must be specified")
site_path = self._get_coverage_path(site_dataset_id)
extents = self._list_reference_extents(site_dataset_id)
if device_dataset_id not in extents:
raise BadRequest("Dataset %s does not reference %s at all." % (site_dataset_id, device_dataset_id))
extent_list = extents[device_dataset_id]
listings = []
for i, pairing in enumerate(extent_list):
if list(pairing.time_extents) == list(old_window):
pairing = ReferenceCoverageExtents('', device_dataset_id, time_extents=new_window)
listings.append(pairing)
cov = ComplexCoverage.load(site_path)
cov.set_reference_coverage_extents(device_dataset_id, listings, append=False)
cov.close()
def _list_reference_extents(self, dataset_id):
cov = self._get_coverage(dataset_id, mode='r')
retval = {}
for k,v in cov._persistence_layer.rcov_extents.data.iteritems():
retval[k] = v
cov.close()
return retval
#--------
def add_parameter_to_dataset(self, parameter_context_id='', dataset_id=''):
cov = self._get_simplex_coverage(dataset_id, mode='r+')
parameter_ctx_res = self.read_parameter_context(parameter_context_id)
pc = ParameterContext.load(parameter_ctx_res.parameter_context)
cov.append_parameter(pc)
cov.close()
return True
#--------
def add_stream(self,dataset_id='', stream_id=''):
log.info('Adding stream %s to dataset %s', stream_id, dataset_id)
validate_true(dataset_id and stream_id, 'Clients must provide both the dataset_id and stream_id')
self.clients.resource_registry.create_association(subject=dataset_id, predicate=PRED.hasStream,object=stream_id)
def remove_stream(self,dataset_id='', stream_id=''):
log.info('Removing stream %s from dataset %s', stream_id, dataset_id)
validate_true(dataset_id and stream_id, 'Clients must provide both the dataset_id and stream_id')
assocs = self.clients.resource_registry.find_associations(subject=dataset_id, predicate=PRED.hasStream,object=stream_id)
for assoc in assocs:
self.clients.resource_registry.delete_association(assoc)
#--------
def get_dataset_info(self,dataset_id=''):
coverage = self._get_coverage(dataset_id, mode='r')
return coverage.info
def get_dataset_parameters(self, dataset_id=''):
coverage = self._get_coverage(dataset_id, mode='r')
return coverage.parameter_dictionary.dump()
def get_dataset_length(self, dataset_id=''):
coverage = self._get_coverage(dataset_id, mode='r')
return coverage.num_timesteps
#--------
@classmethod
def numpy_walk(cls,obj):
try:
if np.isnan(obj):
return {'__nan__':0}
except TypeError:
pass
except NotImplementedError:
pass
except ValueError:
pass
if isinstance(obj, np.number):
return np.asscalar(obj)
if isinstance(obj, np.dtype):
return {'__np__':obj.str}
if isinstance(obj,dict):
if '__nan__' in obj and len(obj)==1:
return np.nan
if '__np__' in obj and len(obj)==1:
return np.dtype(obj['__np__'])
return {k:cls.numpy_walk(v) for k,v in obj.iteritems()}
if isinstance(obj,list):
return map(cls.numpy_walk, obj)
if isinstance(obj, tuple):
return tuple(map(cls.numpy_walk, obj))
return obj
def create_parameter(self, parameter_context=None):
"""
Creates a parameter context using the IonObject
"""
context = self.get_coverage_parameter(parameter_context)
parameter_context.parameter_context = self.numpy_walk(context.dump())
parameter_context_id, _ = self.clients.resource_registry.create(parameter_context)
if parameter_context.parameter_function_id:
self.read_parameter_function(parameter_context.parameter_function_id)
self.clients.resource_registry.create_association(
subject=parameter_context_id,
predicate=PRED.hasParameterFunction,
object=parameter_context.parameter_function_id)
return parameter_context_id
@classmethod
def get_coverage_parameter(cls, parameter_context):
"""
Creates a Coverage Model based Parameter Context given the
ParameterContext IonObject.
Note: If the parameter is a parameter function and depends on dynamically
created calibrations, this will fail.
"""
# Only CF and netCDF compliant variable names
parameter_context.name = re.sub(r'[^a-zA-Z0-9_]', '_', parameter_context.name)
from ion.services.dm.utility.types import TypesManager
# The TypesManager does all the parsing and converting to the coverage model instances
tm = TypesManager(None, {}, {})
# First thing to do is create the parameter type
param_type = tm.get_parameter_type(
parameter_context.parameter_type,
parameter_context.value_encoding,
parameter_context.code_report,
parameter_context.parameter_function_id,
parameter_context.parameter_function_map,
{
'name' : parameter_context.name,
'target_dataset': parameter_context.target_dataset,
'target_name' : parameter_context.target_name
})
# Ugh, I hate it but I did copy this section from
# ion/processes/bootstrap/ion_loader.py
context = ParameterContext(name=parameter_context.name, param_type=param_type)
# Now copy over all the attrs
context.uom = parameter_context.units
try:
if isinstance(context.uom, basestring):
tm.get_unit(context.uom)
except UdunitsError:
log.warning('Parameter %s has invalid units: %s', parameter_context.name, context.uom)
# Fill values can be a bit tricky...
context.fill_value = tm.get_fill_value(parameter_context.fill_value,
parameter_context.value_encoding,
param_type)
context.reference_urls = parameter_context.reference_urls
context.internal_name = parameter_context.name
context.display_name = parameter_context.display_name
context.standard_name = parameter_context.standard_name
context.ooi_short_name = parameter_context.ooi_short_name
context.description = parameter_context.description
context.precision = parameter_context.precision
context.visible = parameter_context.visible
return context
def create_parameter_context(self, name='', parameter_context=None, description='', reference_urls=None, parameter_type='', internal_name='', value_encoding='', code_report='', units='', fill_value='', display_name='', parameter_function_id='', parameter_function_map='', standard_name='', ooi_short_name='', precision='', visible=True):
validate_true(name, 'Name field may not be empty')
validate_is_instance(parameter_context, dict, 'parameter_context field is not dictable.')
name = re.sub(r'[^a-zA-Z0-9_]', '_', name)
parameter_context = self.numpy_walk(parameter_context)
parameter_context['name'] = name
pc_res = ParameterContextResource(name=name, parameter_context=parameter_context, description=description)
pc_res.reference_urls = reference_urls or []
pc_res.parameter_type = parameter_type
pc_res.internal_name = internal_name or name
pc_res.value_encoding = value_encoding
pc_res.code_report = code_report or ''
pc_res.units = units
pc_res.fill_value = fill_value
pc_res.display_name = display_name
pc_res.parameter_function_id = parameter_function_id
pc_res.parameter_function_map = parameter_function_map
pc_res.standard_name = standard_name
pc_res.ooi_short_name = ooi_short_name
pc_res.precision = precision or '5'
pc_res.visible = visible
pc_id, ver = self.clients.resource_registry.create(pc_res)
if parameter_function_id:
self.read_parameter_function(parameter_function_id)
self.clients.resource_registry.create_association(subject=pc_id, predicate=PRED.hasParameterFunction, object=parameter_function_id)
return pc_id
def read_parameter_context(self, parameter_context_id=''):
res = self.clients.resource_registry.read(parameter_context_id)
validate_is_instance(res,ParameterContextResource)
res.parameter_context = self.numpy_walk(res.parameter_context)
return res
def delete_parameter_context(self, parameter_context_id=''):
self.read_parameter_context(parameter_context_id)
self.clients.resource_registry.delete(parameter_context_id)
return True
#--------
def read_parameter_context_by_name(self, name='', id_only=False):
res, _ = self.clients.resource_registry.find_resources(restype=RT.ParameterContext, name=name, id_only=id_only)
if not len(res):
raise NotFound('Unable to locate context with name: %s' % name)
retval = res[0]
if not id_only:
retval.parameter_context = self.numpy_walk(retval.parameter_context)
return retval
#--------
def create_parameter_function(self, parameter_function=None):
validate_is_instance(parameter_function, ParameterFunctionResource)
pf_id, ver = self.clients.resource_registry.create(parameter_function)
return pf_id
def read_parameter_function(self, parameter_function_id=''):
res = self.clients.resource_registry.read(parameter_function_id)
validate_is_instance(res, ParameterFunctionResource)
return res
def delete_parameter_function(self, parameter_function_id=''):
self.read_parameter_function(parameter_function_id)
self.clients.resource_registry.retire(parameter_function_id)
return True
@classmethod
def get_coverage_function(self, parameter_function):
func = None
if parameter_function.function_type == PFT.PYTHON:
func = PythonFunction(name=parameter_function.name,
owner=parameter_function.owner,
func_name=parameter_function.function,
arg_list=parameter_function.args,
kwarg_map=None,
param_map=None,
egg_uri=parameter_function.egg_uri)
elif parameter_function.function_type == PFT.NUMEXPR:
func = NumexprFunction(name=parameter_function.name,
expression=parameter_function.function,
arg_list=parameter_function.args)
if not isinstance(func, AbstractFunction):
raise Conflict("Incompatible parameter function loaded: %s" % parameter_function._id)
return func
#--------
def load_parameter_function(self, row):
name = row['Name']
ftype = row['Function Type']
func_expr = row['Function']
owner = row['Owner']
args = ast.literal_eval(row['Args'])
#kwargs = row['Kwargs']
descr = row['Description']
data_process_management = DataProcessManagementServiceProcessClient(self)
function_type=None
if ftype == 'PythonFunction':
function_type = PFT.PYTHON
elif ftype == 'NumexprFunction':
function_type = PFT.NUMEXPR
else:
raise Conflict('Unsupported Function Type: %s' % ftype)
parameter_function = ParameterFunctionResource(
name=name,
function=func_expr,
function_type=function_type,
owner=owner,
args=args,
description=descr)
parameter_function.alt_ids = ['PRE:' + row['ID']]
parameter_function_id = self.create_parameter_function(parameter_function)
dpd = DataProcessDefinition()
dpd.name = name
dpd.description = 'Parameter Function Definition for %s' % name
dpd.data_process_type = DataProcessTypeEnum.PARAMETER_FUNCTION
dpd.parameters = args
data_process_management.create_data_process_definition(dpd, parameter_function_id)
return parameter_function_id
#--------
def read_parameter_function_by_name(self, name='', id_only=False):
res, _ = self.clients.resource_registry.find_resources(restype=RT.ParameterFunction,name=name, id_only=id_only)
if not len(res):
raise NotFound('Unable to locate parameter function with name: %s' % name)
retval = res[0]
retval.parameter_function = self.numpy_walk(retval.parameter_function)
return retval
#--------
def create_parameter_dictionary(self, name='', parameter_context_ids=None, temporal_context='', description=''):
validate_true(name, 'Name field may not be empty.')
parameter_context_ids = parameter_context_ids or []
pd_res = ParameterDictionaryResource(name=name, temporal_context=temporal_context, description=description)
pd_res_id, ver = self.clients.resource_registry.create(pd_res)
for pc_id in parameter_context_ids:
self._link_pcr_to_pdr(pc_id, pd_res_id)
return pd_res_id
def read_parameter_dictionary(self, parameter_dictionary_id=''):
res = self.clients.resource_registry.read(parameter_dictionary_id)
validate_is_instance(res, ParameterDictionaryResource, 'Resource is not a valid ParameterDictionaryResource')
return res
def delete_parameter_dictionary(self, parameter_dictionary_id=''):
self.read_parameter_dictionary(parameter_dictionary_id)
self._unlink_pdr(parameter_dictionary_id)
self.clients.resource_registry.delete(parameter_dictionary_id)
return True
#--------
def read_parameter_contexts(self, parameter_dictionary_id='', id_only=False):
pcs, assocs = self.clients.resource_registry.find_objects(subject=parameter_dictionary_id, predicate=PRED.hasParameterContext, id_only=id_only)
if not id_only:
for pc in pcs:
pc.parameter_context = self.numpy_walk(pc.parameter_context)
return pcs
def read_parameter_dictionary_by_name(self, name='', id_only=False):
res, _ = self.clients.resource_registry.find_resources(restype=RT.ParameterDictionary, name=name, id_only=id_only)
if not len(res):
raise NotFound('Unable to locate dictionary with name: %s' % name)
return res[0]
#--------
def dataset_bounds(self, dataset_id='', parameters=None):
self.read_dataset(dataset_id) # Validates proper dataset
parameters = parameters or None
try:
doc = self.container.object_store.read_doc(dataset_id)
except NotFound:
return {}
if parameters is not None:
retval = {}
for p in parameters:
if p in doc['bounds']:
retval[p] = doc['bounds'][p]
return retval
return doc['bounds']
def dataset_bounds_by_axis(self, dataset_id='', axis=None):
bounds = self.dataset_bounds(dataset_id)
if bounds and axis and axis in bounds:
return bounds[axis]
return {}
def dataset_temporal_bounds(self, dataset_id):
dataset = self.read_dataset(dataset_id)
parameter_dictionary_id = self.clients.resource_registry.find_objects(dataset_id, PRED.hasParameterDictionary, id_only=True)[0][0]
pdict = self._coverage_parameter_dictionary(parameter_dictionary_id)
if not dataset:
return {}
temporal_parameter = pdict.temporal_parameter_name
units = pdict.get_temporal_context().uom
bounds = self.dataset_bounds(dataset_id)
if not bounds:
return {}
bounds = bounds[temporal_parameter or 'time']
bounds = [TimeUtils.units_to_ts(units, i) for i in bounds]
return bounds
def dataset_extents(self, dataset_id='', parameters=None):
self.read_dataset(dataset_id) # Validates proper dataset
parameters = parameters or None
try:
doc = self.container.object_store.read_doc(dataset_id)
except NotFound:
return {}
if parameters is not None:
if isinstance(parameters, list):
retval = {}
for p in parameters:
retval[p] = doc['extents'][p]
return retval
elif isinstance(parameters, basestring):
return doc['extents'][parameters]
return doc['extents']
def dataset_extents_by_axis(self, dataset_id='', axis=None):
extents = self.dataset_extents(dataset_id)
if extents and axis and axis in extents:
return extents[axis]
return {}
def dataset_size(self,dataset_id='', in_bytes=False):
self.read_dataset(dataset_id) # Validates proper dataset
try:
doc = self.container.object_store.read_doc(dataset_id)
except NotFound:
return 0.
size = doc['size']
if not in_bytes:
size = size / 1024.
return size
def dataset_latest(self, dataset_id=''):
self.read_dataset(dataset_id) # Validates proper dataset
try:
doc = self.container.object_store.read_doc(dataset_id)
except NotFound:
return {}
return doc['last_values']
#--------
@classmethod
def get_parameter_context(cls, parameter_context_id=''):
"""
Preferred client-side class method for constructing a parameter context
from a service call.
"""
dms_cli = DatasetManagementServiceClient()
pc_res = dms_cli.read_parameter_context(parameter_context_id=parameter_context_id)
pc = ParameterContext.load(pc_res.parameter_context)
pc._identifier = pc_res._id
return pc
@classmethod
def get_parameter_function(cls, parameter_function_id=''):
"""
Preferred client-side class method for constructing a parameter function
"""
dms_cli = DatasetManagementServiceClient()
pf_res = dms_cli.read_parameter_function(parameter_function_id=parameter_function_id)
pf = AbstractFunction.load(pf_res.parameter_function)
pf._identifier = pf._id
return pf
@classmethod
def get_parameter_context_by_name(cls, name=''):
dms_cli = DatasetManagementServiceClient()
pc_res = dms_cli.read_parameter_context_by_name(name=name, id_only=False)
pc = ParameterContext.load(pc_res.parameter_context)
pc._identifier = pc_res._id
return pc
@classmethod
def get_parameter_dictionary(cls, parameter_dictionary_id=''):
"""
Class method to return a CoverageModel ParameterDictionary object from the
ION Resources. The object is built from the associated parameter contexts.
"""
dms_cli = DatasetManagementServiceClient()
pd = dms_cli.read_parameter_dictionary(parameter_dictionary_id)
pcs = dms_cli.read_parameter_contexts(parameter_dictionary_id=parameter_dictionary_id, id_only=False)
return cls.build_parameter_dictionary(pd, pcs)
@classmethod
def build_parameter_dictionary(cls, parameter_dictionary_obj, parameter_contexts):
pdict = cls._merge_contexts([ParameterContext.load(i.parameter_context) for i in parameter_contexts],
parameter_dictionary_obj.temporal_context)
pdict._identifier = parameter_dictionary_obj._id
return pdict
def _coverage_parameter_dictionary(self, parameter_dictionary_id):
pd = self.read_parameter_dictionary(parameter_dictionary_id)
pcs = self.read_parameter_contexts(parameter_dictionary_id, id_only=False)
parameter_dict = self._merge_contexts([ParameterContext.load(i.parameter_context) for i in pcs], pd.temporal_context)
return parameter_dict
@classmethod
def get_parameter_dictionary_by_name(cls, name=''):
dms_cli = DatasetManagementServiceClient()
pd_res = dms_cli.read_parameter_dictionary_by_name(name=name, id_only=True)
return cls.get_parameter_dictionary(pd_res)
#--------
def create_parameters_mult(self, parameter_function_list=None, parameter_context_list=None,
parameter_dictionary_list=None, parameter_dictionary_assocs=None):
pass
#--------
def _link_pcr_to_pdr(self, pcr_id, pdr_id):
self.clients.resource_registry.create_association(subject=pdr_id, predicate=PRED.hasParameterContext,object=pcr_id)
def _unlink_pcr_to_pdr(self, pcr_id, pdr_id):
assocs = self.clients.resource_registry.find_associations(subject=pdr_id, predicate=PRED.hasParameterContext, object=pcr_id, id_only=True)
for assoc in assocs:
self.clients.resource_registry.delete_association(assoc)
def _unlink_pdr(self, pdr_id):
objects, assocs = self.clients.resource_registry.find_objects(subject=pdr_id, predicate=PRED.hasParameterContext, id_only=True)
for assoc in assocs:
self.clients.resource_registry.delete_association(assoc)
def _create_coverage(self, dataset_id, description, parameter_dict):
#file_root = FileSystem.get_url(FS.CACHE,'datasets')
temporal_domain, spatial_domain = time_series_domain()
pdict = ParameterDictionary.load(parameter_dict)
scov = self._create_simplex_coverage(dataset_id, pdict, spatial_domain, temporal_domain)
#vcov = ViewCoverage(file_root, dataset_id, description or dataset_id, reference_coverage_location=scov.persistence_dir)
scov.close()
return scov
def _create_view_coverage(self, dataset_id, description, parent_dataset_id):
# As annoying as it is we need to load the view coverage belonging to parent dataset id and use the information
# inside to build the new one...
file_root = FileSystem.get_url(FS.CACHE,'datasets')
pscov = self._get_simplex_coverage(parent_dataset_id, mode='r')
scov_location = pscov.persistence_dir
pscov.close()
vcov = ViewCoverage(file_root, dataset_id, description or dataset_id, reference_coverage_location=scov_location)
return vcov
@classmethod
def _create_complex_coverage(cls, dataset_id, description, parameter_dict):
pdict = ParameterDictionary.load(parameter_dict)
file_root = FileSystem.get_url(FS.CACHE, 'datasets')
ccov = ComplexCoverage(file_root, dataset_id, 'Complex Coverage for %s' % dataset_id, parameter_dictionary=pdict, complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
return ccov
@classmethod
def _create_simplex_coverage(cls, dataset_id, parameter_dictionary, spatial_domain, temporal_domain):
file_root = FileSystem.get_url(FS.CACHE,'datasets')
scov = SimplexCoverage(file_root,dataset_id,'Simplex Coverage for %s' % dataset_id, parameter_dictionary=parameter_dictionary, temporal_domain=temporal_domain, spatial_domain=spatial_domain )
return scov
@classmethod
def _splice_coverage(cls, dataset_id, scov):
file_root = FileSystem.get_url(FS.CACHE,'datasets')
vcov = cls._get_coverage(dataset_id,mode='a')
scov_pth = scov.persistence_dir
if isinstance(vcov.reference_coverage, SimplexCoverage):
ccov = ComplexCoverage(file_root, uuid4().hex, 'Complex coverage for %s' % dataset_id,
reference_coverage_locs=[vcov.head_coverage_path,],
parameter_dictionary=ParameterDictionary(),
complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
log.info('Creating Complex Coverage: %s', ccov.persistence_dir)
ccov.append_reference_coverage(scov_pth)
ccov_pth = ccov.persistence_dir
ccov.close()
vcov.replace_reference_coverage(ccov_pth)
elif isinstance(vcov.reference_coverage, ComplexCoverage):
log.info('Appending simplex coverage to complex coverage')
#vcov.reference_coverage.append_reference_coverage(scov_pth)
dir_path = vcov.reference_coverage.persistence_dir
vcov.close()
ccov = AbstractCoverage.load(dir_path, mode='a')
ccov.append_reference_coverage(scov_pth)
ccov.refresh()
ccov.close()
vcov.refresh()
vcov.close()
@classmethod
def _save_coverage(cls, coverage):
coverage.flush()
@classmethod
def _get_coverage(cls,dataset_id,mode='r'):
file_root = FileSystem.get_url(FS.CACHE,'datasets')
coverage = AbstractCoverage.load(file_root, dataset_id, mode=mode)
return coverage
@classmethod
def _get_nonview_coverage(cls, dataset_id, mode='r'):
cov = cls._get_coverage(dataset_id, mode)
if isinstance(cov, ViewCoverage):
rcov = cov.reference_coverage
pdir = rcov.persistence_dir
rcov = None
cov.close()
cov = AbstractCoverage.load(pdir, mode=mode)
return cov
@classmethod
def _get_simplex_coverage(cls, dataset_id, mode='r'):
cov = cls._get_coverage(dataset_id, mode=mode)
if isinstance(cov, SimplexCoverage):
return cov
if isinstance(cov, ViewCoverage):
path = cov.head_coverage_path
guid = os.path.basename(path)
cov.close()
return cls._get_simplex_coverage(guid, mode=mode)
raise BadRequest('Unsupported coverage type found: %s' % type(cov))
@classmethod
def _get_coverage_path(cls, dataset_id):
file_root = FileSystem.get_url(FS.CACHE,'datasets')
return os.path.join(file_root, '%s' % dataset_id)
@classmethod
def _compare_pc(cls, pc1, pc2):
if pc1:
pc1 = ParameterContext.load(pc1) or {}
if pc2:
pc2 = ParameterContext.load(pc2) or {}
if hasattr(pc1,'lookup_value') or hasattr(pc2,'lookup_value'):
if hasattr(pc1,'lookup_value') and hasattr(pc2,'lookup_value'):
return bool(pc1 == pc2) and pc1.document_key == pc2.document_key
return False
return bool(pc1 == pc2)
@classmethod
def _merge_contexts(cls, contexts, temporal):
pdict = ParameterDictionary()
for context in contexts:
if context.name == temporal:
context.axis = AxisTypeEnum.TIME
pdict.add_context(context, is_temporal=True)
else:
pdict.add_context(context)
return pdict
def read_qc_table(self, obj_id):
obj = self.container.object_store.read(obj_id)
if '_type' in obj and obj['_type'] == 'QC':
return obj
else:
raise BadRequest('obj_id %s not QC' % obj_id)
def _create_single_resource(self,dataset_id, param_dict):
'''
EOI
Creates a foreign data table and a geoserver layer for the given dataset
and parameter dictionary
'''
self.resource_parser.create_single_resource(dataset_id,param_dict)
def _remove_single_resource(self,dataset_id):
'''
EOI
Removes foreign data table and geoserver layer for the given dataset
'''
self.resource_parser.remove_single_resource(dataset_id)
def _get_eoi_service_available(self):
'''
EOI
Returns true if geoserver endpoint is running and verified by table
loader process.
Once a true is returned, the result is cached and the process is no
longer queried
'''
return self.resource_parser and self.resource_parser.get_eoi_service_available()
|
ooici/coi-services
|
ion/services/dm/inventory/dataset_management_service.py
|
Python
|
bsd-2-clause
| 35,734
|
[
"NetCDF"
] |
5bc2627a26d8f61033b8d75cac673121c5fd7f47152c9ef75bd087e9602e4b44
|
########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
''' This manages remote shared Ansible objects, mainly roles'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.compat.six import string_types
from ansible.errors import AnsibleError
# default_readme_template
# default_meta_template
class Galaxy(object):
''' Keeps global galaxy info '''
def __init__(self, options):
self.options = options
# self.options.roles_path needs to be a list and will be by default
roles_path = getattr(self.options, 'roles_path', [])
# cli option handling is responsible for making roles_path a list
self.roles_paths = roles_path
self.roles = {}
# load data path for resource usage
this_dir, this_filename = os.path.split(__file__)
type_path = 'container_enabled' if getattr(self.options, 'container_enabled', False) else 'default'
self.DATA_PATH = os.path.join(this_dir, 'data', type_path)
@property
def default_role_skeleton_path(self):
return self.DATA_PATH
def add_role(self, role):
self.roles[role.name] = role
def remove_role(self, role_name):
del self.roles[role_name]
|
grimmjow8/ansible
|
lib/ansible/galaxy/__init__.py
|
Python
|
gpl-3.0
| 2,066
|
[
"Brian",
"Galaxy"
] |
172ee6ac8f642c2bb761f32f51e5ca1267441fc9eed596caef9909bd96c61994
|
# import necessary python packages
import numpy as np
import datetime
import os,sys
import urllib
import cStringIO
import json
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
import dateutil.parser
from matplotlib import pyplot as plt
from matplotlib import cm
from collections import Counter
from pymongo import MongoClient
from scipy import stats
from PIL import Image
from collections import OrderedDict
from scipy.linalg.basic import LinAlgError
import matplotlib.patches as patches
from matplotlib.path import Path
import requests
from StringIO import StringIO
#------------------------------------------------------------------------------------------------------------
# Setup path locations
rgz_dir = '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'
csv_dir = '%s/csv' % rgz_dir
plot_dir = '%s/plots/expert' % rgz_dir
if not os.path.isdir(plot_dir):
os.mkdir(plot_dir)
dat_dir = '%s/datfiles/expert/expert_all' % rgz_dir
if not os.path.isdir(dat_dir):
os.mkdir(dat_dir)
# Set constants
beta_release_date = datetime.datetime(2013, 10, 20, 12, 0, 0, 0) # date of beta release (YYY,MM,DD,HH,MM,SS,MS)
main_release_date = datetime.datetime(2013, 12, 17, 0, 0, 0, 0)
IMG_HEIGHT = 424.0 # number of pixels in the JPG image along the y axis
IMG_WIDTH = 424.0 # number of pixels in the JPG image along the x axis
FITS_HEIGHT = 301.0 # number of pixels in the FITS image along the y axis
FITS_WIDTH = 301.0 # number of pixels in the FITS image along the x axis
PIXEL_SIZE = 0.00016667#/3600.0 # the number of arcseconds per pixel in the FITS image
xmin = 1.
xmax = IMG_HEIGHT
ymin = 1.
ymax = IMG_WIDTH
xjpg2fits = float(IMG_WIDTH/FITS_WIDTH) # map the JPG pixels to the FITS pixels in x
yjpg2fits = float(IMG_HEIGHT/FITS_HEIGHT) # map the JPG pixels to the FITS pixels in y
bad_keys = ('finished_at','started_at','user_agent','lang')
plt.ion()
def list_flatten(x):
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(list_flatten(el))
else:
result.append(el)
return result
def plot_npeaks(volunteers=False):
if volunteers:
readfile = 'expert/npeaks_ir_expert_volunteer.csv'
writefile = 'expert_all/ir_peaks_histogram_expert_all.png'
else:
readfile = 'expert/npeaks_ir_expert_all.csv'
writefile = 'expert_all/ir_peaks_histogram_expert_volunteer.png'
# Read in data
with open('%s/%s' % (csv_dir,readfile),'rb') as f:
npeaks = [int(line.rstrip()) for line in f]
# Plot the distribution of the total number of IR sources per image
fig = plt.figure(figsize=(8,7))
ax1 = fig.add_subplot(111)
h = plt.hist(npeaks,bins=np.arange(np.max(npeaks)+1),axes=ax1)
ax1.set_title('RGZ source distribution')
ax1.set_xlabel('Number of IR peaks per image')
ax1.set_ylabel('Count')
#fig.show()
fig.tight_layout()
# Save hard copy of the figure
fig.savefig('%s/%s' % (plot_dir,writefile))
plt.close()
return None
def find_ir_peak(x,y):
# Perform a kernel density estimate on the data:
X, Y = np.mgrid[xmin:xmax, ymin:ymax]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
# Find the number of peaks
# http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
#neighborhood = generate_binary_structure(2,2)
neighborhood = np.ones((10,10))
local_max = maximum_filter(Z, footprint=neighborhood)==Z
background = (Z==0)
eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
detected_peaks = local_max - eroded_background
npeaks = detected_peaks.sum()
return X,Y,Z,npeaks
def plot_image(ir_x,ir_y,sub,X,Y,Z,npeaks,all_radio,radio_unique,volunteers=False):
if volunteers:
writefile = 'expert_volunteers/ir_peaks/%s_ir_peak.png' % sub['zooniverse_id']
else:
writefile = 'expert_all/ir_peaks/%s_ir_peak.png' % sub['zooniverse_id']
# Plot the infrared results
fig = plt.figure(1,(15,4))
ax3 = fig.add_subplot(143)
assert len(ir_x) == len(ir_y), \
'Length of IR x- and y-vectors must be the same'
if len(ir_x) > 2:
# Find the peak
xpeak = X[Z==Z.max()][0]
ypeak = Y[Z==Z.max()][0]
# Plot the KDE map
ax3.imshow(np.rot90(Z), cmap=plt.cm.hot_r,extent=[xmin, xmax, ymin, ymax])
# Plot the individual sources
if len(ir_x) > 2: # At least 3 non-singular IR points means that the KDE map can be generated and peaks estimated
ax3.text(50,40,r'Main IR peak: $(%i,%i)$' % (xpeak,ypeak),color='k',fontsize=12)
ax3.text(50,70,r'$N_{IR\/peaks}$ = %i' % npeaks,color='k',fontsize=12)
ax3.plot(ir_x, ir_y, 'go', markersize=4)
ax3.plot([xpeak],[ypeak],'c*',markersize=12)
elif len(ir_x) == 2: # 2 IR points are simply plotted, with no KDE estimation
ax3.text(50,40,r'IR peaks: $(%i,%i),(%i,%i)$' % (ir_x[0],ir_y[0],ir_x[1],ir_y[1]),color='k',fontsize=12)
ax3.text(50,70,r'$N_{IR\/peaks}$ = 2',color='k',fontsize=12)
ax3.plot(ir_x,ir_y,'c*',markersize=12)
elif len(ir_x) == 1: # 1 IR point is simply plotted, with no KDE estimation
print ir_x,ir_y
ax3.text(50,40,r'IR peak: $(%i,%i)$' % (ir_x[0],ir_y[0]),color='k',fontsize=12)
ax3.text(50,70,r'$N_{IR\/peaks}$ = 1',color='k',fontsize=12)
ax3.plot(ir_x,ir_y,'c*',markersize=12)
if len(ir_x) == 0: # 0 IR points identified by any user
ax3.text(50,70,'No IR sources',color='k',fontsize=12)
# Plot the radio counts
radio_flattened = [item for sublist in all_radio for item in sublist]
uniques = set(radio_flattened)
d = dict(zip(uniques,np.arange(len(uniques))))
c = Counter(all_radio)
cmc = c.most_common()[::-1]
# Sort by number of components?
for idx,(c_xval,n) in enumerate(cmc):
if len(c_xval) > 1:
tlist = [str(d[x]) for x in c_xval]
t = ' and R'.join(sorted(tlist))
else:
t = d[c_xval[0]]
singular = 's' if n != 1 else ''
ax3.text(550,400-idx*25,'%3i vote%s: R%s' % (n,singular,t),fontsize=11)
# Download contour data
r = requests.get(sub['location']['contours'])
contours = r.json()
sf_x = 500./contours['width']
sf_y = 500./contours['height']
verts_all = []
codes_all = []
components = contours['contours']
for comp in components:
for idx,level in enumerate(comp):
verts = [((p['x'])*sf_x,(p['y']-1)*sf_y) for p in level['arr']]
codes = np.ones(len(verts),int) * Path.LINETO
codes[0] = Path.MOVETO
verts_all.extend(verts)
codes_all.extend(codes)
path = Path(verts_all, codes_all)
patch_black = patches.PathPatch(path, facecolor = 'none', edgecolor='black', lw=1)
# Scaling factor for FITS to radio files
radio_ir_scaling_factor = 500./132
# Rectangle showing the radio box size
box_counts = Counter(radio_flattened)
for ru in radio_unique:
x0,x1,y0,y1 = [float(ru_) * radio_ir_scaling_factor for ru_ in ru]
# Assume xmax matching is still good
xmax_index = '%.6f' % float(ru[1])
component_number = d[xmax_index]
number_votes = box_counts[xmax_index]
rectangle = plt.Rectangle((x0,y0), x1-x0, y1-y0, fill=False, linewidth=number_votes/5., edgecolor = 'c')
ax3.add_patch(rectangle)
ax3.text(x0-15,y0-15,'R%s' % component_number)
ax3.set_xlim([0, 500])
ax3.set_ylim([500, 0])
ax3.set_title('%s \n %s' % (sub['zooniverse_id'],sub['metadata']['source']))
ax3.set_aspect('equal')
# Display IR and radio images
url_standard = sub['location']['standard']
im_standard = Image.open(cStringIO.StringIO(urllib.urlopen(url_standard).read()))
ax1 = fig.add_subplot(141)
ax1.imshow(im_standard,origin='upper')
ax1.add_patch(patch_black)
ax1.set_title('WISE')
url_radio = sub['location']['radio']
im_radio = Image.open(cStringIO.StringIO(urllib.urlopen(url_radio).read()))
ax2 = fig.add_subplot(142)
ax2.imshow(im_radio,origin='upper')
ax2.set_title('FIRST')
if volunteers:
ax2.set_xlabel('volunteers')
else:
ax2.set_xlabel('experts')
# Save hard copy of the figure
fig.savefig('%s/%s' % (plot_dir,writefile))
# Close figure after it's done; otherwise mpl complains about having thousands of stuff open
plt.close()
return None
def find_consensus(sub,classifications,verbose=False,volunteers=False):
Nclass = sub["classification_count"] # number of classifications made per image
srcid = sub["metadata"]["source"] # determine the image source id
imgid = sub["_id"] # grab the ObjectId corresponding for this image
# locate all the classifications of this image by either the experts or volunteers
if volunteers:
user_classifications = classifications.find({"subject_ids": imgid, 'expert':{'$exists':False}})
Nusers = classifications.find({"subject_ids": imgid, 'expert':{'$exists':False}}).count()
prefix = 'volunteer'
else:
user_classifications = classifications.find({"subject_ids": imgid, 'expert':True})
Nusers = classifications.find({"subject_ids": imgid, 'expert':True}).count()
prefix = 'expert'
# loop over the number of classifications
if Nusers > 0: # the number of classifications should equal the number of users who classified
classfile2 = open('%s/RGZ-%s-%s-classifications.txt' % (dat_dir,prefix,srcid), 'w')
# initialise coordinate variables
radio_ra = []
radio_dec = []
radio_x = []
radio_y = []
radio_w = []
radio_h = []
ir_ra = []
ir_dec = []
ir_radius = []
ir_x = []
ir_y = []
radio_comp = []
ir_comp = []
all_radio = []
all_radio_markings = []
Nuser_id = 0 # User id number
#---------------------------------------------------------------------------------------------------------------------
#---START: loop through the users who classified the image
for classification in list(user_classifications):
compid = 0 # Component id per image
rclass = classification["annotations"] # For now, analyze only the first set of continuous regions selected.
# Note that last two fields in annotations are timestamp and user_agent
Nuser_id += 1 # Increase the number of users who classified by 1.
#-------------------------------------------------------------------------------------------------------------------
#---START: loop through the keys in the annotation array, making sure that a classification has been made
for ann in rclass:
if ann.has_key('started_at') or ann.has_key('finished_at') or ann.has_key('user_agent') or ann.has_key('lang'):
continue
Nradio = 0 # counter for the number of radio components per classification
Nir = 0 # counter for the number of IR components per classification
if (ann.has_key('radio') and ann['radio'] != 'No Contours'): # get the radio annotations
radio = ann["radio"]
Nradio = len(radio) # count the number of radio components per classification
'''
print 'RADIO:'
print radio
'''
compid += 1 # we have a radio source - all components will be id with this number
list_radio = []
#---------------------------------------------------------------------------------------------------------------
#---START: loop through number of radio components in user classification
for rr in radio:
radio_marking = radio[rr]
# Find the location and size of the radio box in pixels
list_radio.append('%.6f' % float(radio_marking['xmax']))
all_radio_markings.append(radio_marking)
print >> classfile2, Nuser_id, compid,'RADIO', radio_marking['xmin'], radio_marking['xmax'], radio_marking['ymin'], radio_marking['ymax']
all_radio.append(tuple(sorted(list_radio)))
#---END: loop through number of radio components in user classification
#---------------------------------------------------------------------------------------------------------------
# get IR counterpart
irkey = ann.has_key('ir')
ir_nosources = True if (irkey and ann['ir'] == 'No Sources') else False
if (irkey and not ir_nosources): # get the infrared annotation for the radio classification.
ir = ann["ir"]
Nir = 1 #len(ir) # number of IR counterparts.
'''
print 'IR:'
print ir
'''
#exit()
#jj = 0
for ii in ir:
ir_marking = ir[ii]
# write to annotation file
print >> classfile2, Nuser_id, compid, 'IR', float(ir_marking['x']), float(ir_marking['y'])
ir_x.append(float(ir_marking['x']))
ir_y.append(float(ir_marking['y']))
else: # user did not classify an infrared source
Nir = 0
xir = -99.
yir = -99.
radiusir = -99.
print >> classfile2, Nuser_id, compid, 'IR', xir, yir
else: # user did not classify a radio source
Nradio = 0
Nir = 0
# there should always be a radio source, bug in program if we reach this part.
if not ann.has_key('radio'):
print >> classfile2,'%i No radio source - error in processing on image %s' % (Nuser_id, srcid)
elif ann['radio'] == 'No Contours':
print >> classfile2,'%i No radio source labeled by user for image %s' % (Nuser_id,srcid)
else:
print >> classfile2,'Unknown error processing radio source'
radio_comp.append( Nradio ) # add the number of radio components per user source to array.
ir_comp.append( Nir ) # add the number of IR counterparts per user soruce to array.
#---END: loop through the users who classified the image
#---------------------------------------------------------------------------------------------------------------------
# Process the radio markings into unique components
rlist = [(rr['xmin'],rr['xmax'],rr['ymin'],rr['ymax']) for rr in all_radio_markings]
if len(rlist) > 0:
radio_unique = [rlist[0]]
if len(all_radio_markings) > 1:
for rr in rlist[1:]:
if rr not in radio_unique:
radio_unique.append(rr)
nr = False
else:
nr = True
radio_unique = [(0,0,0,0)]
# Use a 2-D Gaussian kernel to find the center of the IR sources and plot the analysis images
if len(ir_x) > 2:
try:
xpeak,ypeak,Z,npeaks = find_ir_peak(ir_x,ir_y)
plot_image(ir_x,ir_y,sub,xpeak,ypeak,Z,npeaks,all_radio,radio_unique,volunteers=volunteers)
except LinAlgError:
npeaks = len(ir_x)
else:
print 'Length of IR vector was less than 2'
npeaks = len(ir_x)
xpeak,ypeak,Z = np.zeros((423,423)),np.zeros((423,423)),np.zeros((423,423))
plot_image(ir_x,ir_y,sub,xpeak,ypeak,Z,npeaks,all_radio,radio_unique,volunteers=volunteers)
'''
# Plot analysis images
npeaks = len(ir_x)
if npeaks == 0:
ir_x,ir_y = [0],[0]
plot_image(ir_x[0],ir_y[0],npeaks,sub,all_radio,radio_unique,no_radio = nr)
'''
# calculate the median number of components for both IR and radio for each object in image.
radio_med = np.median(radio_comp) # median number of radio components
Ncomp_radio = np.size(np.where(radio_comp == radio_med)) # number of classifications = median number
ir_med = np.median(ir_comp) # median number of infrared components
Ncomp_ir = np.size(np.where(ir_comp == ir_med)) # number of classifications = median number
if verbose:
print ' '
print 'Source............................................................................................: %s' % srcid
print 'Number of %s users who classified the object..................................................: %d' % (prefix,Nusers)
print '................'
print 'Number of %s users who classified the radio source with the median value of radio components..: %d' % (prefix,Ncomp_radio)
print 'Median number of radio components per %s user.................................................: %f' % (prefix,radio_med)
print 'Number of %s users who classified the IR source with the median value of IR components........: %d' % (prefix,Ncomp_ir)
print 'Median number of IR components per %s user....................................................: %f' % (prefix,ir_med)
print ' '
classfile2.close()
else:
print '%ss did not classify subject %s.' % (prefix.capitalize(),sub['zooniverse_id'])
ir_x,ir_y = 0,0
return None
def load_rgz_data():
# Connect to Mongo database
# Make sure to run mongorestore /path/to/database to restore the updated files
# mongod client must be running locally
client = MongoClient('localhost', 27017)
db = client['radio']
subjects = db['radio_subjects'] # subjects = images
classifications = db['radio_classifications'] # classifications = classifications of each subject per user
return subjects,classifications
def load_expert_parameters():
expert_path = '%s/expert' % rgz_dir
# Note all times should be in UTC (Zulu)
json_data = open('%s/expert_params.json' % expert_path).read()
experts = json.loads(json_data)
return experts
def run_expert_sample(subjects,classifications):
expert_zid = open('%s/expert/expert_all_zooniverse_ids.txt' % rgz_dir).read().splitlines()
N = 0
with open('%s/expert/npeaks_ir_expert_all.csv' % (csv_dir),'wb') as f:
for sub in list(subjects.find({'zooniverse_id':{'$in':expert_zid},'classification_count':{'$gt':0}})):
Nclass = sub["classification_count"] # number of classifications made per image
if Nclass > 0: # if no classifications move to next image
npeak = find_consensus(sub,classifications)
print >> f, npeak
N += 1
# Check progress by printing to screen every 10 classifications
if not N % 10:
print N, datetime.datetime.now().strftime('%H:%M:%S.%f')
return None
def run_volunteer_sample(subjects,classifications):
expert_zid = open('%s/expert/expert_all_zooniverse_ids.txt' % rgz_dir).read().splitlines()
N = 0
with open('%s/expert/npeaks_ir_expert_volunteer.csv' % (csv_dir),'wb') as f:
for sub in list(subjects.find({'zooniverse_id':{'$in':expert_zid},'classification_count':{'$gt':0}})):
Nclass = sub["classification_count"] # number of classifications made per image
if Nclass > 0: # if no classifications move to next image
npeak = find_consensus(sub,classifications,volunteers=True)
print >> f, npeak
N += 1
# Check progress by printing to screen every 10 classifications
if not N % 10:
print N, datetime.datetime.now().strftime('%H:%M:%S.%f')
return None
def sdi(data):
# Shannon diversity index
def p(n, N):
""" Relative abundance """
if n is 0:
return 0
else:
return (float(n)/N) * np.log(float(n)/N)
N = sum(data.values())
return -sum(p(n, N) for n in data.values() if n is not 0)
def compare_expert_consensus():
with open('%s/expert/expert_all_first_ids.txt' % rgz_dir,'rb') as f:
first_ids = [line.rstrip() for line in f]
exlist = load_expert_parameters()
ir_array = []
ex_array = []
for first in first_ids:
ir_temp = []
exlist_arr = []
for ex in exlist:
with open('%s/datfiles/expert/%s/RGZBETA2-%s-classifications.txt' % (rgz_dir,ex['expert_user'],first)) as f:
x = [line.rstrip() for line in f]
try:
last_line = x[-1].split()
n_ir = int(last_line[1])
x_ir = last_line[3]
if x_ir == '-99.0':
n_ir = 0
ir_temp.append(n_ir)
exlist_arr.append(ex['expert_user'])
except:
pass
ex_array.append(exlist_arr)
ir_array.append(ir_temp)
'''
# Plot number of users per galaxy
excount = [len(x) for x in ex_array]
fig2 = plt.figure(2)
ax2 = fig2.add_subplot(111)
ax2.hist(excount,bins=6,range=(5,11))
ax2.set_xlim(6,11)
fig2.show()
fig2.tight_layout()
'''
c = [Counter(i) for i in ir_array]
fig = plt.figure(1,(15,4))
ax = fig.add_subplot(111)
larr = []
varr = []
sdi_arr = []
for idx,cc in enumerate(c):
l,v = zip(*cc.items())
larr.append(list(l))
varr.append(list(v))
if len(l) > 1:
sdi_arr.append(sdi(cc)/np.log(len(l)))
else:
sdi_arr.append(sdi(cc))
iarr = []
sarr = []
for idx,(l,s) in enumerate(zip(larr,sdi_arr)):
iarr.append(np.zeros(len(l),dtype=int)+idx)
sarr.append(np.zeros(len(l),dtype=float)+s)
iarr = list_flatten(iarr)
larr = list_flatten(larr)
varr = list_flatten(varr)
sarr = list_flatten(sarr)
zipped = zip(sarr,larr,iarr,varr)
zipped.sort()
sarr,larr,iarr,varr = zip(*zipped)
ikeys = list(OrderedDict.fromkeys(iarr))
inew = [ikeys.index(ii) for ii in iarr]
sc = ax.scatter(inew,larr,c=sarr,s=((np.array(varr)+5)**2),edgecolor='k',cmap = cm.RdBu_r,vmin=0.,vmax =1.0)
cbar = plt.colorbar(sc)
cbar.set_label('Normalized Shannon entropy')
ax.set_xlim(-1,101)
ax.set_xlabel('Galaxy')
ax.set_ylabel('Number of IR sources')
ax.set_aspect('auto')
lnp = np.array(larr)
inp = np.array(inew)
for i in inp:
if (inp == i).sum() > 1:
lmin = np.min(lnp[inp == i])
lmax = np.max(lnp[inp == i])
ax.plot([i,i],[lmin,lmax],color='black')
fig.show()
fig.tight_layout()
# Now analyzing the radio components. Order can definitely change, even when associated with a single IR source.
# Sort first? Sort first on second column, then radio? That would make sure everything agrees...
'''
OK. So, Kevin doesn't have any classifications in the set recorded. There are 134 classifications by his IP address in the timeframe
in question, but they're very short (only 18 minutes), and none of them are in the set of 100. Lots of duplicates, too.
Eight users classified every galaxy in the sample of 100.
42jkb, ivywong, enno.middelberg, xDocR, KWillett, stasmanian, akpinska, vrooje
klmasters only shows up for 25 of them (as expected) Galaxies done in timeframe:
Kevin appears in 0 139 - Claims he was logged in, but zero classifications under username Kevin
There are 139 galaxies done by someone matching his IP address,
but none are in the expert sample of 100 (no idea why).
Assume we got no useful classification data from him.
'''
return ir_array
def compare_volunteer_consensus(subjects,classifications):
# Just looks at the total number of IR sources per subject as measured by volunteers.
# Should be able to get this by querying MongoDB directly.
with open('%s/expert/expert_all_zooniverse_ids.txt' % rgz_dir,'rb') as f:
zooniverse_ids = [line.rstrip() for line in f]
# Empty arrays
ir_array = []
usernames_array = []
# Load parameters for the expert science team
experts = load_expert_parameters()
# Loop over each object in the sample of 100
for zid in zooniverse_ids:
ir_temp = []
username_temp = []
# Get all classifications for the subject
subject_id = subjects.find_one({'zooniverse_id':zid})['_id']
clist = classifications.find({'subject_ids.0':subject_id})
# List of classifications whose classifications shouldn't be included. Start with experts.
usernames_bad = [x['expert_user'] for x in experts]
# Loop over all classifications
for c in clist:
# Test if user was logged in
try:
user_name = c['user_name']
if user_name not in usernames_bad:
annotations = c['annotations']
# Record the number of galaxies (that is, IR sources) in image
na = len(annotations)
for a in annotations:
# Don't count metadata
if a.keys()[0] in bad_keys:
na -= 1
# Don't count if source has no sources or contours; likely a glitch in system
if 'ir' in a.keys():
if a['ir'] is 'No Sources' and a['radio'] is 'No Contours':
na -= 1
# Count the total number of galaxies in image recorded by the user
ir_temp.append(na)
username_temp.append(user_name)
# Prevent counts of duplicate classifications by the same user by adding name to the prohibited list
usernames_bad.append(user_name)
'''
else:
print 'Eliminating %s for %s' % (user_name,zid)
'''
# Do not include classifications by anonymous users
except KeyError:
pass
'''
username_temp.append('Anonymous')
annotations = c['annotations']
na = len(annotations)
for a in annotations:
if a.keys()[0] in bad_keys:
na -= 1
if 'ir' in a.keys():
if a['ir'] is 'No Sources' and a['radio'] is 'No Contours':
na -= 1
ir_temp.append(na)
'''
# Append counts to the master arrays
ir_array.append(ir_temp)
usernames_array.append(username_temp)
i_nozeros = []
for ii,zz,uu in zip(ir_array,zooniverse_ids,usernames_array):
if len(ii) > 0:
i_nozeros.append(ii)
else:
print 'No non-expert classifications for %s' % zz,uu
c = [Counter(i) for i in i_nozeros]
fig = plt.figure(2,(15,4))
fig.clf()
ax = fig.add_subplot(111)
larr = []
varr = []
sdi_arr = []
for idx,cc in enumerate(c):
l,v = zip(*cc.items())
larr.append(list(l))
varr.append(list(v))
if len(l) > 1:
sdi_arr.append(sdi(cc)/np.log(len(l)))
else:
sdi_arr.append(sdi(cc))
iarr = []
sarr = []
for idx,(l,s) in enumerate(zip(larr,sdi_arr)):
iarr.append(np.zeros(len(l),dtype=int)+idx)
sarr.append(np.zeros(len(l),dtype=float)+s)
iarr = list_flatten(iarr) # Index of galaxy image
larr = list_flatten(larr) # Number of IR sources
varr = list_flatten(varr) # Number of users who selected the given number of IR sources
sarr = list_flatten(sarr) # Shannon diversity index
zipped = zip(sarr,larr,iarr,varr)
zipped.sort()
sarr,larr,iarr,varr = zip(*zipped)
ikeys = list(OrderedDict.fromkeys(iarr))
inew = [ikeys.index(ii) for ii in iarr]
sc = ax.scatter(inew,larr,c=sarr,s=((np.array(varr)+5)**2),edgecolor='k',cmap = cm.RdBu_r,vmin=0.,vmax =1.0)
cbar = plt.colorbar(sc)
cbar.set_label('Normalized Shannon entropy')
ax.set_xlim(-1,len(c)+1)
ax.set_title('RGZ volunteer classifications')
ax.set_xlabel('Galaxy')
ax.set_ylabel('Number of IR sources')
ax.set_aspect('auto')
lnp = np.array(larr)
inp = np.array(inew)
# Draw black lines between dots
for i in inp:
if (inp == i).sum() > 1:
lmin = np.min(lnp[inp == i])
lmax = np.max(lnp[inp == i])
ax.plot([i,i],[lmin,lmax],color='black')
fig.show()
#fig.tight_layout()
return None
def expert_vs_volunteer():
# Direct comparison of the expert vs. volunteer classifications for all galaxies?
return None
def histogram_experts(classifications):
# DEPRECATED
# As of 7 Feb 2016, RGZ data dumps do not include the users collection.
# Goal: find the distribution and average number of IR sources per image for the science team
do_experts = True
if do_experts:
experts = load_expert_parameters()
# Add Larry's regular username as well as Ray Norris
experts.append({'expert_user':'DocR'})
experts.append({'expert_user':'raynorris'})
expert_avg = []
for ex in experts:
username = ex['expert_user']
classcount = classifications.find({'user_name':username}).count()
if classcount > 0:
c = classifications.find({'user_name':username})
nir = []
for cc in list(c):
annotations = cc['annotations']
na = len(annotations)
for a in annotations:
if a.keys()[0] in bad_keys:
na -= 1
if 'ir' in a.keys():
if a['ir'] is 'No Sources' and a['radio'] is 'No Contours':
na -= 1
nir.append(na)
print '%20s averages %.2f IR sources per image over %i classifications' % (username,np.mean(nir),classcount)
expert_avg.append(np.mean(nir))
print '-----------------------------------'
# Now look at the volunteers
rgz_id = classifications.find_one()['project_id']
# Odd that about half of the users don't seem to have a classification count for RGZ. Is that actually true?
'''
In [80]: users.find({'projects.%s.classification_count' % rgz_id:{'$exists':True}}).count()
Out[80]: 4907
In [79]: users.find({'projects.%s.classification_count' % rgz_id:{'$exists':False}}).count()
Out[79]: 3312
All users with a classification count do have at least one classification.
In the second group, though, most have zero, but some have a couple classifications (maximum of 6)
742 have at least one classification
2570 have no classifications
So we actually have only 4907+742 = 5,649 contributing users, rather than the 8,219 people in the users db and the 4,955 listed on the API
'''
# Concatenate the two groups
users_good = list(users.find({'projects.%s.classification_count' % rgz_id:{'$exists':True}}))
users_unsure = users.find({'projects.%s.classification_count' % rgz_id:{'$exists':False}})
for u in list(users_unsure):
if classifications.find({'user_id':u['_id']}).count() > 0:
users_good.append(u)
nir_username_volunteers = []
nir_avg_volunteers = []
nir_count_volunteers = []
for u in users_good:
classcount = classifications.find({'user_id':u['_id']}).count()
if classcount > 0:
c = classifications.find({'user_id':u['_id']})
nir = []
for cc in list(c):
annotations = cc['annotations']
na = len(annotations)
for a in annotations:
if a.keys()[0] in bad_keys:
na -= 1
if 'ir' in a.keys():
if a['ir'] is 'No Sources' and a['radio'] is 'No Contours':
na -= 1
nir.append(na)
avg = np.mean(nir)
#print '%20s averages %.2f IR sources per image over %i classifications' % (u['name'],avg,classcount)
else: # Shouldn't happen with this list
print 'No classifications found for %s' % u['name']
avg = 0.
nir_username_volunteers.append(u['name'])
nir_avg_volunteers.append(avg)
nir_count_volunteers.append(classcount)
# If we eliminate users who average more than two IR sources per image, how much of the data would that reject?
# counts_lost = np.sum([c for a,b,c in zip(nir_username_volunteers, nir_avg_volunteers, nir_count_volunteers) if b > 2.0])
# Only 413 classifications. Negligible.
return nir_username_volunteers, nir_avg_volunteers, nir_count_volunteers, expert_avg
def plot_histogram_experts(names, avgs, counts, expert_avg):
xpairs = [[x,x] for x in expert_avg]
xlist = []
for xends in xpairs:
xlist.extend(xends)
xlist.append(None)
avg_cutoff = np.linspace(0,4,50)
frac_lost = []
for ac in avg_cutoff:
frac_lost.append(np.sum([c for a,c in zip(avgs,counts) if a > ac])/float(sum(counts)))
# Plot results
fig = plt.figure(1)
fig.clf()
ax1 = fig.add_subplot(221)
ax1.scatter(avgs,counts,color='black',marker='.',s=1,alpha=0.5)
ax1.set_xlabel('Mean IR sources per image')
ax1.set_ylabel('Total number of classifications')
ax1.set_yscale('log')
ax2 = fig.add_subplot(222)
ax2.plot(avg_cutoff,frac_lost,color='green',lw=3)
ax2.set_ylim(-0.02,1.02)
ax2.set_xlabel('Cutoff for IR sources/image')
ax2.set_ylabel('Fraction of data affected')
ax3 = fig.add_subplot(223)
ax3.hist(avgs,bins=np.linspace(0,4,100))
ax3.text(2.5,700,'All',fontsize=16)
ax3.set_xlabel('Mean IR sources per image')
ax3.set_ylabel('Count (users)')
ax4 = fig.add_subplot(224)
ax4.hist(np.array(avgs)[np.array(counts) > 10],bins=np.linspace(0,4,100),color='cyan')
ax4.text(2.5,250,r'$N_{class}>10$',fontsize=16)
ax4.set_xlabel('Mean IR sources per image')
ax4.set_ylabel('Count (users)')
ax4.set_xlim(ax3.get_xlim())
for ax in (ax2,ax3,ax4):
ypairs = [ax.get_ylim() for x in range(len(xpairs))]
ylist = []
for yends in ypairs:
ylist.extend(yends)
ylist.append(None)
ax.plot(xlist,ylist,color='red',alpha=0.5)
fig.show()
# Save hard copy of the figure
fig.savefig('%s/plots/histogram_avg_ir_sources.png' % rgz_dir)
return None
def update_experts(classifications,experts):
for ex in experts:
expert_dates = (dateutil.parser.parse(ex['started_at']),dateutil.parser.parse(ex['ended_at']))
classifications.update({"updated_at": {"$gt": expert_dates[0],"$lt":expert_dates[1]},"user_name":ex['expert_user']},{'$set':{'expert':True}},multi=True)
return None
########################################
# Call program from the command line
########################################
if __name__ == '__main__':
plt.ioff()
subjects,classifications = load_rgz_data()
experts = load_expert_parameters()
update_experts(classifications,experts)
# Experts on sample of 100
run_expert_sample(subjects,classifications)
#plot_npeaks()
# Volunteers on sample of 100
run_volunteer_sample(subjects,classifications)
#plot_npeaks(volunteers=True)
|
willettk/rgz-analysis
|
python/expert_all.py
|
Python
|
mit
| 37,623
|
[
"Galaxy",
"Gaussian"
] |
abf970a430821831a8770b180182163a5fd9dbce665ef0bb03b09c120d94d186
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 04 07:08:30 2015
@author: Scott
"""
import numpy as np
from neurosrc.spectral.pac import pacfn
from NeuroTools import stgen
import matplotlib.pyplot as plt
# Network parameters
finame = 'pac_mc.hdf5'
Nneu = 100 # Number of neurons
spikeHz_baseline1 = np.arange(20) # Firing rate of a neuron (Hz) at baseline
spikeHz_baseline = np.append(spikeHz_baseline1,[19.1,19.2,19.3,19.4,19.5,19.6,19.7,19.8,19.9])
spikeHz_biased = 20 # Firing rate of a neuron (Hz) at favored phase
frac_bias = .2
dur = 3 # Length of simulation (s)
flo_range = (4, 8)
fhi_range = (80, 150)
# Process parameters
E = len(spikeHz_baseline)
t = np.arange(0,dur,.001)
T = len(t)
bias_ratio = spikeHz_biased / spikeHz_baseline
bias_ratio[bias_ratio==np.inf] = 100 #approximation for plotting purposes
log_bias_ratio = np.log10(bias_ratio)
# Define rates for Inhomogeneous Poisson Process (IPP)
# Assign a phase for all points in time
thal_phase0 = 2*np.pi*np.random.rand()
thal_freq = flo_range[0] + (flo_range[1]-flo_range[0])*np.random.rand(1)
thal_phase = t % (1/thal_freq) * 2*np.pi*thal_freq
thal_phase = (thal_phase + thal_phase0) % (2*np.pi)
# Calculate the IPP firing rate at each point in time, dependent on phase
mod_idxs = thal_phase < (frac_bias*2*np.pi)
mod_idxs = mod_idxs + 0
IPP_t = np.arange(dur*1000)
# Simulate IPP
stg = stgen.StGen()
field_raster = np.zeros(E,dtype=object)
for e in xrange(E):
print e
IPP_r_dep = (spikeHz_biased-spikeHz_baseline[e])*mod_idxs
IPP_r_indep = spikeHz_baseline[e]*np.ones(T)
IPP_rates = IPP_r_dep + IPP_r_indep
tspikes = np.zeros(Nneu,dtype=object)
raster = np.zeros((Nneu,T))
for neu in range(Nneu):
tspikes[neu] = stg.inh_poisson_generator(IPP_rates,IPP_t,dur*1000,array=True)
for tt in IPP_t:
raster[neu,tt] = sum(np.logical_and(tspikes[neu]>tt,tspikes[neu]<tt+1))
field_raster[e] = np.sum(raster,0)
# Define alpha function
alpha_dur = .5
alpha_t = np.arange(0,alpha_dur,.001)
alpha_tau = .001
alpha_gmax = .1
alpha = alpha_gmax * (alpha_t/alpha_tau) * np.exp(-(alpha_t-alpha_tau)/alpha_tau)
# Calculate LFP by using convolving spike train with alpha function
lfp = np.zeros(E,dtype=object)
for e in xrange(E):
lfppre = np.convolve(field_raster[e],alpha,'same')
# Normalize LFP
lfppre = lfppre - np.mean(lfppre)
lfp[e] = lfppre / np.std(lfppre)
# Set PAC parameters and calculate PAC
pac_method = 'plv'
filt_method = 'eegfilt'
rate = 1000
#kwargs = {'order' : 2} # for butter
#kwargs = {'transition' : 12, 'ripple' : 50} # for kaiser
kwargs = {'trans' : .15} # for eegfilt
pac_plv = np.zeros(E)
pac_mi = np.zeros(E)
pac_glm = np.zeros(E)
for e in xrange(E):
pac_plv[e] = pacfn(lfp[e], flo_range, fhi_range, rate, 'plv', filt_method, **kwargs) # add **kwargs if desired
pac_mi[e] = pacfn(lfp[e], flo_range, fhi_range, rate, 'mi', filt_method, **kwargs) # add **kwargs if desired
pac_glm[e] = pacfn(lfp[e], flo_range, fhi_range, rate, 'glm', filt_method, **kwargs) # add **kwargs if desired
# Visualize relationships between metrics
plt.figure()
plt.subplot(1,3,1)
plt.plot(pac_plv,pac_mi,'.')
plt.xlabel('plv')
plt.ylabel('mi')
plt.subplot(1,3,2)
plt.plot(pac_glm,pac_mi,'.')
plt.xlabel('glm')
plt.ylabel('mi')
plt.subplot(1,3,3)
plt.plot(pac_plv,pac_glm,'.')
plt.xlabel('plv')
plt.ylabel('glm')
# Visualize relationships between metrics
plt.figure()
plt.subplot(1,3,1)
plt.plot(log_bias_ratio,pac_mi,'.')
plt.xlabel('log_bias_ratio')
plt.ylabel('PAC: plv')
plt.subplot(1,3,2)
plt.plot(log_bias_ratio,pac_mi,'.')
plt.xlabel('log_bias_ratioglm')
plt.ylabel('PAC: mi')
plt.subplot(1,3,3)
plt.plot(log_bias_ratio,pac_glm,'.')
plt.xlabel('log_bias_ratio')
plt.ylabel('PAC: glm')
|
parenthetical-e/neurosrc
|
spectral/sim_PACoutput.py
|
Python
|
mit
| 3,746
|
[
"NEURON"
] |
5732e30850c1da9307b53943cb17de4446475908fb7f5d5aa2e6d124c3610632
|
"""
merged implementation of the cache provider
the name cache was not chosen to ensure pluggy automatically
ignores the external pytest-cache
"""
from __future__ import absolute_import, division, print_function
import py
import pytest
import json
import os
from os.path import sep as _sep, altsep as _altsep
class Cache(object):
def __init__(self, config):
self.config = config
self._cachedir = Cache.cache_dir_from_config(config)
self.trace = config.trace.root.get("cache")
if config.getoption("cacheclear"):
self.trace("clearing cachedir")
if self._cachedir.check():
self._cachedir.remove()
self._cachedir.mkdir()
@staticmethod
def cache_dir_from_config(config):
cache_dir = config.getini("cache_dir")
cache_dir = os.path.expanduser(cache_dir)
cache_dir = os.path.expandvars(cache_dir)
if os.path.isabs(cache_dir):
return py.path.local(cache_dir)
else:
return config.rootdir.join(cache_dir)
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
if _sep in name or _altsep is not None and _altsep in name:
raise ValueError("name is not allowed to contain path separators")
return self._cachedir.ensure_dir("d", name)
def _getvaluepath(self, key):
return self._cachedir.join('v', *key.split('/'))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
if path.check():
try:
with path.open("r") as f:
return json.load(f)
except ValueError:
self.trace("cache-invalid at %s" % (path,))
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
path.dirpath().ensure_dir()
except (py.error.EEXIST, py.error.EACCES):
self.config.warn(
code='I9', message='could not create cache path %s' % (path,)
)
return
try:
f = path.open('w')
except py.error.ENOTDIR:
self.config.warn(
code='I9', message='cache could not write path %s' % (path,))
else:
with f:
self.trace("cache-write %s: %r" % (key, value,))
json.dump(value, f, indent=2, sort_keys=True)
class LFPlugin(object):
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = 'lf', 'failedfirst'
self.active = any(config.getoption(key) for key in active_keys)
self.lastfailed = config.cache.get("cache/lastfailed", {})
self._previously_failed_count = None
def pytest_report_collectionfinish(self):
if self.active:
if not self._previously_failed_count:
mode = "run all (no recorded failures)"
else:
noun = 'failure' if self._previously_failed_count == 1 else 'failures'
suffix = " first" if self.config.getoption(
"failedfirst") else ""
mode = "rerun previous {count} {noun}{suffix}".format(
count=self._previously_failed_count, suffix=suffix, noun=noun
)
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if (report.when == 'call' and report.passed) or report.skipped:
self.lastfailed.pop(report.nodeid, None)
elif report.failed:
self.lastfailed[report.nodeid] = True
def pytest_collectreport(self, report):
passed = report.outcome in ('passed', 'skipped')
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update(
(item.nodeid, True)
for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active and self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
self._previously_failed_count = len(previously_failed)
if not previously_failed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
return
if self.config.getoption("lf"):
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
else:
items[:] = previously_failed + previously_passed
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
saved_lastfailed = config.cache.get("cache/lastfailed", {})
if saved_lastfailed != self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
'--lf', '--last-failed', action='store_true', dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)")
group.addoption(
'--ff', '--failed-first', action='store_true', dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown")
group.addoption(
'--cache-show', action='store_true', dest="cacheshow",
help="show cache contents, don't perform collection or tests")
group.addoption(
'--cache-clear', action='store_true', dest="cacheclear",
help="remove all cache contents at start of test run.")
parser.addini(
"cache_dir", default='.pytest_cache',
help="cache directory path.")
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
if config.option.verbose:
relpath = py.path.local().bestrelpath(config.cache._cachedir)
return "cachedir: %s" % relpath
def cacheshow(config, session):
from pprint import pprint
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.check():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir.join("v")
tw.sep("-", "cache values")
for valpath in sorted(vdir.visit(lambda x: x.isfile())):
key = valpath.relto(vdir).replace(valpath.sep, "/")
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, "
"will be ignored" % key)
else:
tw.line("%s contains:" % key)
stream = py.io.TextIO()
pprint(val, stream=stream)
for line in stream.getvalue().splitlines():
tw.line(" " + line)
ddir = basedir.join("d")
if ddir.isdir() and ddir.listdir():
tw.sep("-", "cache directories")
for p in sorted(basedir.join("d").visit()):
# if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.isfile():
key = p.relto(basedir)
tw.line("%s is a file of length %d" % (
key, p.size()))
return 0
|
tareqalayan/pytest
|
_pytest/cacheprovider.py
|
Python
|
mit
| 9,653
|
[
"VisIt"
] |
ab141ea2169c1b8c6f7bf0475df184f677d79a16d6061b312602ae67d84a7253
|
from brian import *
import time
if __name__=='__main__':
input_freqs = arange(200,450,0.5)
n_inputs = 50
numsims = len(input_freqs)
duration = 2000*ms
v_reset = '''
v = 13.65*mV
v_th = 15*mV
'''
v_rest = 0*mV
tau_mem = 10*ms
refr = 2*ms
eqs = Equations('''
dv/dt = (-v+v_rest)/tau_mem : volt
dv_th/dt = (-v_th+15*mV)/msecond : volt
''')
# dv_th/dt = -(exp(-(v_th/mV)**2)*sqrt(exp((v_th/mV)**2))*v_th)/(sqrt(2*pi)*ms) : volt
DV_s = 0.16*mV
nrns = NeuronGroup(numsims,eqs,reset=v_reset,threshold='v>v_th',refractory=refr)
nrns.v_th = 15*mV
rates = array([])
for f_in in input_freqs:
rates = append(rates,ones(n_inputs)*f_in)
inp = PoissonGroup(n_inputs*numsims,rates)
con_matrix = zeros([n_inputs*numsims,numsims])
for i in range(numsims):
con_matrix[i*n_inputs:(i+1)*n_inputs,i] = ones(n_inputs)*DV_s
cons = Connection(source=inp,target=nrns,state='v',weight=con_matrix)
# inp_mon = SpikeMonitor(inp)
# mem_mon = StateMonitor(nrns,'v',record=True)
# thr_mon = StateMonitor(nrns,'v_th',record=True)
out_mon = SpikeMonitor(nrns)
print "Running",numsims,"simulations ..."
run(duration,report='stdout')
# for i in range(numsims):
# hold(True)
# plot(mem_mon.times,mem_mon[i])
# plot(thr_mon.times,thr_mon[i])
# show()
transfer = np.zeros([numsims,2])
variability = np.zeros([numsims,2])
print "Done. Preparing plots ..."
f_in = input_freqs
numspikes = zeros(numsims)
mean_isi = zeros(numsims)
std_isi = zeros(numsims)
for nrn in out_mon.spiketimes.iterkeys():
nrnspikes = out_mon.spiketimes[nrn]
numspikes[nrn] = len(nrnspikes)
if numspikes[nrn] > 2:
isi = diff(nrnspikes)
mean_isi[nrn] = mean(isi)
std_isi[nrn] = std(isi)
f_out = numspikes/duration
cv = std_isi/mean_isi
variability = array([mean_isi, cv])
transfer = array([f_in, f_out])
subplot(2,1,1)
hold(True)
plot(transfer[0,:], transfer[1,:],'.')
xlabel('f_in (Hz)')
ylabel('f_out (Hz)')
hold(False)
subplot(2,1,2)
hold(True)
t = arange(0.002,max(mean_isi),0.0001)
theo_cv = sqrt((t-0.002)/t)
plot(t,theo_cv) # theoretical cv curve
plot(variability[0,:], variability[1,:],'.')
xlabel('mean ISI')
ylabel('CV')
show()
|
achilleas-k/brian-scripts
|
lif_transfer.py
|
Python
|
apache-2.0
| 2,425
|
[
"Brian"
] |
c3438cd962fc7b3caadb809df872426f1cad76f2850470761ed2786c084b5a35
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: Philip J Grabner <phil@canary.md>
# date: 2014/12/03
# copy: (C) Copyright 2014-EOT Canary Health, Inc., All Rights Reserved.
#------------------------------------------------------------------------------
import logging
import requests
from aadict import aadict
import morph
import json
import asset
#------------------------------------------------------------------------------
log = logging.getLogger(__name__)
#------------------------------------------------------------------------------
class Error(Exception): pass
class AuthorizationError(Error): pass
class ProtocolError(Error): pass
#------------------------------------------------------------------------------
class Purpose:
DISCOVER = 'discover'
PREPARE = 'prepare'
AUGMENT = 'augment'
EXTEND = 'extend'
ALL = (DISCOVER, PREPARE, AUGMENT, EXTEND)
#------------------------------------------------------------------------------
class Transport:
SITE = 'site'
EMAIL = 'email'
PAPER = 'paper'
SMS = 'sms'
VOICE = 'voice'
ALL = (SITE, EMAIL, PAPER, SMS, VOICE)
#------------------------------------------------------------------------------
class Environment:
DEV = 'dev'
TEST = 'test'
CI = 'ci'
QA = 'qa'
UAT = 'uat'
PPE = 'ppe'
STG = 'stg'
PROD = 'prod'
ALL = (DEV, TEST, CI, QA, UAT, PPE, STG, PROD)
#------------------------------------------------------------------------------
class Client(object):
# todo: add
#----------------------------------------------------------------------------
def __init__(self, principal, credential, env=Environment.PROD, root=None):
'''
Constructs a new `canarymd.Client` object that is used to
communicate with the Canary API. The following parameterns
are accepted:
:Parameters:
principal : str
The principal (i.e. username) to authenticate with to the Canary
servers.
credential : str
The `principal` 's credential/token (i.e. password) to use to
authenticate with the Canary servers.
env : str, optional, default: canarymd.Environment.PROD
The Canary environment to connect to -- defaults to the
production servers. For testing, the staging environment
should be used, i.e. ``canarymd.Environment.STG``.
'''
if env not in Environment.ALL:
raise ValueError('invalid/unknown environment: %r' % (env,))
self.cookies = {}
self.env = env
self.root = root or {
Environment.PROD : 'https://api.canary.md/api',
Environment.DEV : 'http://api-dev.canary.md:8899/api',
}.get(env, 'https://api-{env}.canary.md/api').format(env=env)
self.principal = None
self.credential = None
self.session = None
self.updateAuth(principal, credential)
self._version = aadict(client=asset.version('canarymd'))
self._checkVersion()
#----------------------------------------------------------------------------
def _checkVersion(self):
res = self.session.get(self.root + '/version').json()
if 'apis' not in res:
sapi = res.get('api', 'UNKNOWN')
if sapi == '1.1.0':
self._version.update(api='v1', server=res.get('server'))
return
raise ProtocolError(
'incompatible client/server versions (1.1.0 != %s)' % (sapi,))
sapi = res.get('apis', [])
for version in ('v2',):
if version in sapi:
self._version.update(api=version, server=res.get('server'))
self.root += '/' + version
return
raise ProtocolError(
'incompatible client/server versions ("v2" not in %r)' % (sapi,))
#----------------------------------------------------------------------------
def _apiError(self, res):
ret = str(res.status_code) + ': '
# todo: handle case where `res.content_type` is not application/json...
res = res.json()
ret += res['message']
if 'field' in res:
ret += ' (' + ', '.join([
key + ': ' + value
for key, value in morph.flatten(res['field']).items()]) + ')'
return ret
#----------------------------------------------------------------------------
def _req(self, method, url, data=None, *args, **kw):
log.debug('sending %r request to %r', method, url)
if data is not None:
data = json.dumps(data)
res = getattr(self.session, method)(self.root + url, data=data, *args, **kw)
if res.status_code != 401:
return res
res = self.session.post(self.root + '/auth/session', json.dumps({
'username' : self.principal,
'password' : self.credential,
}))
if res.status_code != 200:
err = self._apiError(res)
log.error('authentication failure: %s', err)
raise AuthorizationError(err)
res = getattr(self.session, method)(self.root + url, data=data, *args, **kw)
if res.status_code != 401 and res.status_code != 403:
return res
err = self._apiError(res)
log.error('post-authentication authorization failure: %s', err)
raise AuthorizationError(err)
#----------------------------------------------------------------------------
def version(self):
return self._version
#----------------------------------------------------------------------------
def updateAuth(self, principal, credential):
'''
Updates this connection's authentication credentials to
Canary. This is typically used in server context when the
credentials are updated and a reboot of the server is not desired.
'''
# todo: perhaps keep a hash of the auth? but how to provide that
# to the server then? we could force a re-auth immediately...
if self.principal == principal and self.credential == credential:
return self
self.principal = principal
self.credential = credential
# todo: or just de-auth the current session?...
self.session = requests.Session()
self.session.headers['content-type'] = 'application/json'
return self
#----------------------------------------------------------------------------
def select(self, context, peo, timeout=None):
'''
Request a message selection. If no applicable messages are found,
then this returns ``None``, otherwise a
:class:`canarymd.Selection` object is returned.
:Parameters:
context : str, required
The Canary context under which to make this messaging request.
peo : dict, required
The Patient Engagement Opportunity (PEO) description. See the
Canary documentation for a detailed description of all possible
attributes. Among the common ones are:
transport : str, required
How the PEO is being delivered to the `recipient`. Must be one
of the transports defined in `canarymd.Transport`. For the
``paper`` transport, `width` and `height` are required.
purpose : str, required
What the relative purpose of the PEO is to the patient
circumstance.Must be one of the transports defined in
`canarymd.Purpose`.
width : int, default: null
Indicative width of the available space in pixels.
*Indicative* means that Canary will attempt to fill the space
completely, but may go over or under by some amount.
For ``paper`` transports, the physical dimensions should be
converted to pixels using a 300 DPI resolution.
height : int, default: null
Indicative height of the available space in pixels. See
`width` for details.
recipient : { dict, str }, required
The recipient of this PEO; either as a dictionary of
attributes or in HL7 serialized form.
appointment : { dict, str }, optional, default: null
If this PEO is for an appointment, the details of the
appointment; either as a dictionary of attributes or in HL7
serialized form.
'''
if peo.get('transport') not in Transport.ALL:
raise ValueError('invalid/unknown transport: %r' % (peo.get('transport'),))
if peo.get('purpose') not in Purpose.ALL:
raise ValueError('invalid/unknown purpose: %r' % (peo.get('purpose'),))
# todo: do a full parameter check?...
params = {
'selection' : {
'context' : context,
'peo' : peo,
},
}
if timeout is not None:
params['timeout'] = timeout
res = self._req('post', '/selection', params)
if res.status_code != 200:
err = self._apiError(res)
log.error('selection failure: %s', err)
raise ProtocolError(err)
jdat = res.json()
if 'selection' not in jdat:
log.error(
'unexpected error: no `selection` attribute in selection response: %r',
res.text)
raise ProtocolError(
'unexpected error: no `selection` attribute in selection response')
if jdat['selection'] is None:
return None
return Selection(jdat)
#----------------------------------------------------------------------------
def reasons(self):
'''
Returns the list of all known reasons-for-visit known to Canary.
'''
res = self._req('get', '/reason')
if res.status_code != 200:
err = self._apiError(res)
log.error('reasons fetch failure: %s', err)
raise ProtocolError(err)
return aadict.d2ar(res.json().get('reasons', []))
#------------------------------------------------------------------------------
class Selection(object):
'''
The result of a message selection operation.
:Attributes:
id : uuid
The unique identifier for this selection.
content : str
The transport-specific rendered form of the messages. For example,
for SITE and EMAIL, this will be in HTML format, and for SMS this
will be in plain-text format.
items : list
An itemized list of the messages contained in `content`. (Useful
for forensic purposes.)
'''
#----------------------------------------------------------------------------
def __init__(self, data):
self._data = aadict.d2ar(data)
self.id = self._data.selection.id
self.items = self._data.selectionitems
self.content = self._data.content
#------------------------------------------------------------------------------
# end of $Id$
#------------------------------------------------------------------------------
|
canaryhealth/canarymd-python
|
canarymd/client.py
|
Python
|
mit
| 10,597
|
[
"VisIt"
] |
2c06dafb5756f36d77d2478505c4f92f77e3528868474bfb3d02a8fafe6ba85e
|
from classylss.binding import ClassEngine, Background, Spectra, Perturbs, Primordial, Thermo
from classylss.astropy_compat import AstropyCompat
import numpy
from six import string_types
import os
import functools
def store_user_kwargs():
"""
Decorator that adds the ``_user_kwargs`` attribute to the class to track
which arguments the user actually supplied.
"""
def decorator(function):
@functools.wraps(function)
def inner(self, *args, **kwargs):
self._user_args = args
self._user_kwargs = kwargs
return function(self, *args, **kwargs)
return inner
return decorator
class Cosmology(object):
r"""
A cosmology calculator based on the CLASS binding in :mod:`classylss`.
It is a collection of all method provided by the CLASS interfaces.
The object is immutable. To obtain an instance with a new set of parameters
use :func:`clone` or :func:`match`.
The individual interfaces can be accessed too, such that
`c.Spectra.get_transfer` and `c.get_transfer` are identical.
.. important::
A default set of units is assumed. Those units are:
* temperature: :math:`\mathrm{K}`
* distance: :math:`h^{-1} \mathrm{Mpc}`
* wavenumber: :math:`h \mathrm{Mpc}^{-1}`
* power: :math:`h^{-3} \mathrm{Mpc}^3`
* density: :math:`10^{10} (M_\odot/h) (\mathrm{Mpc}/h)^{-3}`
* neutrino mass: :math:`\mathrm{eV}`
* time: :math:`\mathrm{Gyr}`
* :math:`H_0`: :math:`(\mathrm{km} \ \mathrm{s^{-1}}) / (h^{-1} \ \mathrm{Mpc})`
Notes
-----
* The default configuration assumes a flat cosmology, :math:`\Omega_{0,k}=0`.
Pass ``Omega0_k`` as a keyword to specify the desired non-flat curvature.
* For consistency of variable names, the present day values can be passed
with or without '0' postfix, e.g., ``Omega0_cdm`` is translated to
``Omega_cdm`` as CLASS always uses the names without `0` as input
parameters.
* By default, a cosmological constant (``Omega0_lambda``) is assumed, with
its density value inferred by the curvature condition.
* Non-cosmological constant dark energy can be used by specifying the
``w0_fld``, ``wa_fld``, and/or ``Omega_fld`` values.
* To pass in CLASS parameters that are not valid Python argument names, use
the dictionary/keyword arguments trick, e.g.
``Cosmology(..., **{'temperature contributions': 'y'})``
* ``Cosmology(**dict(c))`` is not supposed to work; use ``Cosmology.from_dict(dict(c))``.
Parameters
----------
h : float
the dimensionless Hubble parameter
T0_cmb : float
the temperature of the CMB in Kelvins
Omega0_b : float
the current baryon density parameter, :math:`\Omega_{b,0}`. Currently
unrealistic cosmology where Omega_b == 0 is not supported.
Omega0_cdm : float
the current cold dark matter density parameter, :math:`\Omega_{cdm,0}`
N_ur : float
the number of ultra-relativistic (massless neutrino) species; the
default number is inferred based on the number of massive neutrinos
via the following logic: if you have respectively 1,2,3 massive
neutrinos and use the default ``T_ncdm`` value (0.71611 K), designed
to give m/omega of 93.14 eV, and you wish to have ``N_eff=3.046`` in
the early universe, then ``N_ur`` is set to 2.0328, 1.0196, 0.00641,
respectively.
m_ncdm : list, None
the masses (in eV) for all massive neutrino species; an empty list
should be passed for no massive neutrinos. The default is a single
massive neutrino with mass of 0.06 eV
P_k_max : float
the maximum ``k`` value to compute power spectrum results to, in units
of :math:`h/Mpc`
P_z_max : float
the maximum redshift to compute power spectrum results to
gauge : str,
either synchronous or newtonian
n_s : float
the tilt of the primordial power spectrum
nonlinear : bool
whether to compute nonlinear power spectrum results via HaloFit
verbose : bool
whether to turn on the default CLASS logging for all submodules
**kwargs :
extra keyword parameters to pass to CLASS. Mainly used to pass-in
parameter names that are not valid Python function argument names,
e.g. ``temperature contributions``, or ``number count contributions``.
Users should be wary of configuration options that may conflict
with the base set of parameters. To override parameters, chain the
result with :func:`clone`.
"""
# delegate resolve order -- a pun at mro; which in
# this case introduces the meta class bloat and doesn't solve
# the issue. We want delayed initialization of interfaces
# or so-called 'mixins'.
# easier to just use delegates with a customized getattr.
# this doesn't work well with automated documentation tools though,
# unfortunately.
dro = [AstropyCompat, Thermo, Spectra, Perturbs, Primordial, Background, ClassEngine]
dro_dict = dict([(n.__name__, n) for n in dro])
@store_user_kwargs()
def __init__(self,
h=0.67556,
T0_cmb=2.7255,
Omega0_b=0.022032/0.67556**2,
Omega0_cdm=0.12038/0.67556**2,
N_ur=None,
m_ncdm=[0.06],
P_k_max=10.,
P_z_max=100.,
gauge='synchronous',
n_s=0.9667,
nonlinear=False,
verbose=False,
**kwargs # additional arguments to pass to CLASS
):
# quickly copy over all arguments --
# at this point locals only contains the arguments.
args = dict(locals())
# store the extra CLASS params
kwargs = args.pop('kwargs')
# remove some non-CLASS variables
args.pop('self')
# check for deprecated init signature
deprecated_args = check_deprecated_init(self._user_args, self._user_kwargs)
if deprecated_args is not None:
# check for conflicts between named args user passed and
# deprecated args passed via **kwargs
for a in deprecated_args:
if a in self._user_kwargs:
raise ValueError("Parameter conflicts; use '%s' parameter only" %a)
# if we make it here, it is a valid deprecated syntax
import warnings
warnings.warn(("This init signature is deprecated; see the Cosmology "
"docstring for new signature"), FutureWarning)
args = deprecated_args
else:
# merge the kwargs; without resolving conflicts.
args.update(kwargs)
# check for input conflicts (using kwargs user actually input)
check_args(self._user_kwargs)
# verify and set defaults
pars = compile_args(args)
# use set state to de-serialize the object.
self.__setstate__(pars)
def __str__(self):
"""
Return a dict string when printed
"""
return dict(self).__str__()
def __iter__(self):
"""
Allows dict() to be used on class.
Use :func:`from_dict` to reconstruct an instance.
"""
pars = self.pars.copy()
for k in pars:
yield k, pars[k]
def __dir__(self):
""" a list of all members from all delegate classes """
r = []
# first allow tab completion of delegate names; to help resolve conflicts
r.extend([n.__name__ for n in self.dro])
# then allow tab completion of all delegate methods
for i in reversed(self.dro):
r.extend(dir(i))
return sorted(list(set(r)))
def __setattr__(self, key, value):
# do not allow setting of properties of the delegate classes
if any(hasattr(n, key) for n in self.dro):
raise ValueError(("the Cosmology object is immutable; use clone() or "
"match() to update parameters"))
return object.__setattr__(self, key, value)
def __getattr__(self, name):
"""
Find the proper delegate, initialize it, and run the method
"""
# getting a delegate explicitly, e.g. c.Background
if name in self.dro_dict:
iface = self.dro_dict[name]
if iface not in self.delegates:
self.delegates[iface] = iface(self.engine)
return self.delegates[iface]
# resolving a name from the delegates : c.Om0 => c.Background.Om0
for iface in self.dro:
if hasattr(iface, name):
if iface not in self.delegates:
self.delegates[iface] = iface(self.engine)
d = self.delegates[iface]
return getattr(d, name)
else:
raise AttributeError("Attribute `%s` not found in any of the delegate objects" % name)
def __getstate__(self):
return (self.pars)
@property
def sigma8(self):
"""
The amplitude of matter fluctuations at :math:`z=0` in a sphere
of radius :math:`r = 8 \ h^{-1}\mathrm{Mpc}`.
This is not an input CLASS parameter. To scale ``sigma8``, use
:func:`match`, which adjusts scalar amplitude ``A_s`` to
achieve the desired ``sigma8``.
"""
return self.Spectra.sigma8
@property
def Omega0_cb(self):
"""
The total density of CDM and Baryon.
This is not an input CLASS parameter. To scale ``Omega0_cb``, use
:func:`match`.
"""
return self.Background.Omega0_cdm + self.Background.Omega0_b
def match(self, sigma8=None, Omega0_cb=None, Omega0_m=None):
"""
Creates a new cosmology that matches a derived parameter. This is different
from clone, where CLASS parameters are used.
Note that we only supoort matching one derived parameter at a time,
because the matching is in general non-commutable.
Parameters
----------
sigma8 : float or None
We scale the scalar amplitude ``A_s`` to achieve the desired ``sigma8``.
Omega0_cb: float or None
Desired total energy density of CDM and baryon.
Omega0_m: float or None
Desired total energy density of matter-like components (included ncdm)
Returns
-------
A new cosmology parameter where the derived parameter matches the given constrain.
"""
if sum(0 if i is None else 1 for i in [sigma8, Omega0_cb, Omega0_m]) != 1:
raise ValueError("Only match one derived parameter at one time; but multiple is given.")
if sigma8 is not None:
return self.clone(A_s=self.A_s * (sigma8/self.sigma8)**2)
if Omega0_cb is not None:
rat = Omega0_cb / self.Omega0_cb
return self.clone(Omega_b=rat * self.Omega0_b, Omega_cdm=rat * self.Omega0_cdm)
if Omega0_m is not None:
Omega0_cb = Omega0_m - (self.Omega0_ncdm_tot - self.Omega0_pncdm_tot) - self.Omega0_dcdm
return self.match(Omega0_cb=Omega0_cb)
return self
def to_astropy(self):
"""
Initialize and return a subclass of :class:`astropy.cosmology.FLRW`
from the :class:`Cosmology` class.
Returns
-------
subclass of :class:`astropy.cosmology.FLRW` :
the astropy class holding the cosmology values
"""
import astropy.cosmology as ac
import astropy.units as au
is_flat = True
needs_w0 = False
needs_wa = False
pars = {}
pars['H0'] = 100*self.h
pars['Om0'] = self.Omega0_b + self.Omega0_cdm # exclude massive neutrinos to better match astropy
pars['Tcmb0'] = self.Tcmb0
pars['Neff'] = self.Neff
pars['Ob0'] = self.Ob0
if self.has_massive_nu:
# all massless by default
m_nu = numpy.zeros(int(numpy.floor(self.Neff)))
# then add massive species
m_nu[:len(self.m_ncdm)] = self.m_ncdm[:]
pars['m_nu'] = au.Quantity(m_nu, au.eV)
if self.Ok0 != 0.:
pars['Ode0'] = self.Ode0
is_flat = False
if self.wa_fld != 0:
pars['wa'] = self.wa_fld
pars['w0'] = self.wa_fld
needs_wa = True
if self.w0_fld != -1:
pars['w0'] = self.w0_fld
needs_w0 = True
# determine class to return
prefix = "" if not is_flat else "Flat"
if needs_wa:
cls = prefix + "w0waCDM"
elif needs_w0:
cls = prefix + "wCDM"
else:
cls = prefix + "LambdaCDM"
cls = getattr(ac, cls)
return cls(**pars)
@classmethod
def from_astropy(kls, cosmo, **kwargs):
"""
Initialize and return a :class:`Cosmology` object from a subclass of
:class:`astropy.cosmology.FLRW`.
Parameters
----------
cosmo : subclass of :class:`astropy.cosmology.FLRW`.
the astropy cosmology instance
**kwargs :
extra keyword parameters to pass when initializing;
they shall not be in conflict with the parameters
inferred from cosmo. To override parameters,
chain the result with :func:`clone`.
Returns
-------
:class:`Cosmology` :
the initialized cosmology object
"""
args = astropy_to_dict(cosmo)
# merge in additional arguments -- this will die if
# there are conflicts.
args.update(kwargs)
# astropy_to_dict creates args, so we can use the 'user-friendly'
# constructor.
return Cosmology(**args)
@classmethod
def from_file(cls, filename, **kwargs):
"""
Initialize a :class:`Cosmology` object from the CLASS parameter file
Parameters
----------
filename : str
the name of the parameter file to read
**kwargs :
extra keyword parameters to pass when initializing;
they shall not be in conflict with the parameters
inferred from cosmo. To override parameters,
chain the result with :func:`clone`.
"""
from classylss import load_ini
# extract dictionary of parameters from the file
pars = load_ini(filename)
# intentionally not using merge; use clone if
# parameters are to modified.
pars.update(kwargs)
return cls.from_dict(pars)
@classmethod
def from_dict(kls, pars):
"""
Creates a Cosmology from a pars dictionary.
This is a rather 'raw' API.
The dictionary must be readable by ClassEngine.
Unlike ``Cosmology(**args)``, ``pars`` must
not contain any convenient names defined here.
"""
self = object.__new__(Cosmology)
self.__setstate__(pars)
return self
def __setstate__(self, state):
pars = state
# initialize the engine as the backup delegate.
self.engine = ClassEngine(pars)
self.delegates = {ClassEngine: self.engine}
self.pars = pars
def clone(self, **kwargs):
"""
Create a new cosmology based on modification of self, with the
input keyword parameters changed.
Parameters
----------
**kwargs :
keyword parameters to adjust
Returns
-------
:class:`Cosmology`
a copy of self, with the input ``kwargs`` adjusted
"""
# this call to merge_args is OK because self.pars is
# a valid set of args
args = merge_args(self.pars, kwargs)
check_args(args)
pars = compile_args(args)
return type(self).from_dict(pars)
def astropy_to_dict(cosmo):
"""
Convert an astropy cosmology object to a dictionary of parameters
suitable for initializing a Cosmology object.
"""
from astropy import cosmology, units
args = {}
args['h'] = cosmo.h
args['T0_cmb'] = cosmo.Tcmb0.value
if cosmo.Ob0 is not None:
args['Omega0_b'] = cosmo.Ob0
else:
raise ValueError("please specify a value 'Ob0' ")
args['Omega0_cdm'] = cosmo.Om0 - cosmo.Ob0 # should be okay for now
# handle massive neutrinos
if cosmo.has_massive_nu:
# convert to eV
m_nu = cosmo.m_nu
if hasattr(m_nu, 'unit') and m_nu.unit != units.eV:
m_nu = m_nu.to(units.eV)
else:
m_nu = units.eV * m_nu
# from CLASS notes:
# one more remark: if you have respectively 1,2,3 massive neutrinos,
# if you stick to the default value pm equal to 0.71611, designed to give m/omega of
# 93.14 eV, and if you want to use N_ur to get N_eff equal to 3.046 in the early universe,
# then you should pass here respectively 2.0328,1.0196,0.00641
N_ur = [2.0328, 1.0196, 0.00641]
N_massive = (m_nu > 0.).sum()
args['N_ur'] = (cosmo.Neff/3.046) * N_ur[N_massive-1]
args['m_ncdm'] = [k.value for k in sorted(m_nu[m_nu > 0.], reverse=True)]
else:
args['m_ncdm'] = []
args['N_ur'] = cosmo.Neff
# specify the curvature
args['Omega0_k'] = cosmo.Ok0
# handle dark energy
if isinstance(cosmo, cosmology.LambdaCDM):
pass
elif isinstance(cosmo, cosmology.wCDM):
args['w0_fld'] = cosmo.w0
args['wa_fld'] = 0.
args['Omega0_Lambda'] = 0. # use Omega_fld
elif isinstance(cosmo, cosmology.w0waCDM):
args['w0_fld'] = cosmo.w0
args['wa_fld'] = cosmo.wa
args['Omega0_Lambda'] = 0. # use Omega_fld
else:
cls = cosmo.__class__.__name__
valid = ["LambdaCDM", "wCDM", "w0waCDM"]
msg = "dark energy equation of state not recognized for class '%s'; " %cls
msg += "valid classes: %s" %str(valid)
raise ValueError(msg)
return args
def compile_args(args):
"""
Compile the input args of Cosmology object to the input parameters (pars) to
a :class:`Cosmology` object.
A variety of defaults are set to tune CLASS for quantities used in
large scale structures.
Difference between pars and args:
- anything that is valid pars is also valid args.
- after replacing our customizations in args, we get pars.
Note that CLASS will check for additional conflicts.
see :func:`merge_args`
"""
pars = {} # we try to make pars write only.
# set some default parameters
pars.setdefault('output', "vTk dTk mPk")
pars.setdefault('extra metric transfer functions', 'y')
# args and pars are pretty much compatible;
pars.update(args)
def set_alias(pars_name, args_name):
if args_name not in args: return
v = args[args_name]
pars.pop(args_name) # pop because we copied everything.
if pars_name in args:
v = args[pars_name]
pars[pars_name] = v
set_alias('T_cmb', 'T0_cmb')
set_alias('Omega_cdm', 'Omega0_cdm')
set_alias('Omega_b', 'Omega0_b')
set_alias('Omega_k', 'Omega0_k')
set_alias('Omega_ur', 'Omega0_ur')
set_alias('Omega_Lambda', 'Omega_lambda') # classylss variable has lowercase l
set_alias('Omega_Lambda', 'Omega0_lambda') # classylss variable has lowercase l
set_alias('Omega_Lambda', 'Omega0_Lambda')
set_alias('Omega_fld', 'Omega0_fld')
set_alias('Omega_ncdm', 'Omega0_ncdm')
set_alias('Omega_g', 'Omega0_g')
# turn on verbosity
if 'verbose' in args:
pars.pop('verbose')
verbose = args['verbose']
if verbose:
for par in ['input', 'background', 'thermodynamics', 'perturbations',
'transfer', 'primordial', 'spectra', 'nonlinear', 'lensing']:
name = par + '_verbose'
if name not in pars: pars[name] = 1
# no massive neutrinos
if 'm_ncdm' in args:
pars.pop('m_ncdm')
m_ncdm = args['m_ncdm']
if m_ncdm is None:
m_ncdm = []
if numpy.isscalar(m_ncdm):
# a single massive neutrino
m_ncdm = [m_ncdm]
if isinstance(m_ncdm, (list, numpy.ndarray)):
m_ncdm = list(m_ncdm)
else:
raise TypeError("``m_ncdm`` should be a list of mass values in eV")
for m in m_ncdm:
if m == 0:
raise ValueError("A zero mass is specified in the non-cold dark matter list. "
"This is not needed, as we automatically set N_ur based on "
"the number of entries in m_ncdm such that Neff = 3.046.")
# number of massive neutrino species
pars['N_ncdm'] = len(m_ncdm)
# m_ncdm only needed if we have massive neutrinos
if len(m_ncdm) > 0:
pars['m_ncdm'] = m_ncdm
# from CLASS notes:
# one more remark: if you have respectively 1,2,3 massive neutrinos,
# if you stick to the default value pm equal to 0.71611, designed to give m/omega of
# 93.14 eV, and if you want to use N_ur to get N_eff equal to 3.046 in the early universe,
# then you should pass here respectively 2.0328,1.0196,0.00641
N_ur_table = [3.046, 2.0328, 1.0196, 0.00641]
if args['N_ur'] is None:
pars['N_ur'] = N_ur_table[len(m_ncdm)]
if 'N_ur' in args:
if args['N_ur'] is not None:
pars['N_ur'] = args['N_ur']
# check gauge
if 'gauge' in args:
if args['gauge'] not in ['synchronous', 'newtonian']:
raise ValueError("'gauge' should be 'synchronous' or 'newtonian'")
# set cosmological constant to zero if we got fluid w0/wa
if 'w0_fld' in args or 'wa_fld' in args:
if pars.get('Omega_Lambda', 0) > 0:
raise ValueError(("non-zero Omega_Lambda (cosmological constant) specified as "
"well as fluid w0/wa; use Omega_fld instead"))
pars['Omega_Lambda'] = 0.
# maximum k value
set_alias('P_k_max_h/Mpc', 'P_k_max')
# maximum redshift
set_alias('z_max_pk', 'P_z_max')
# nonlinear
set_alias('non linear', 'nonlinear')
# sorry we use a boolean but
# class uses existence of string.
if pars.pop('non linear', False):
pars['non linear'] = 'halofit'
# remove None's for remaining parameters -- None means using a default from CLASS
# NOTE: do this last since m_ncdm=None means no massive_neutrinos
for key in list(pars.keys()):
if pars[key] is None: pars.pop(key)
return pars
def merge_args(args, moreargs):
"""
merge moreargs into args.
Those defined in moreargs takes priority than those
defined in args.
see :func:`compile_args`
"""
args = args.copy()
for name in moreargs.keys():
# pop those conflicting with me from the old pars
for eq in find_eqcls(name):
if eq in args: args.pop(eq)
args.update(moreargs)
return args
def check_deprecated_init(args, kwargs):
"""
Check if ``kwargs`` uses the (now deprecated) signature of ``Cosmology``
prior to version 0.2.6.
If using the deprecated syntax, this returns the necessary arguments for
the new signature, and ``None`` otherwise.
"""
from astropy import cosmology, units
defaults = {'H0':67.6, 'Om0':0.31, 'Ob0':0.0486, 'Ode0':0.69, 'w0':-1.,
'Tcmb0':2.7255, 'Neff':3.04, 'm_nu':0., 'flat':False}
# the deprecated kwargs
deprecated_args = [k for k in kwargs if k in defaults]
# all clear; nothing to do
if not len(deprecated_args):
return
# if we got deprecated kwargs, make sure we didn't get any valid kwargs!!
if not all(a in defaults for a in kwargs) or len(kwargs) and len(args):
msg = "mixing deprecated and valid arguments for the Cosmology class; "
msg += 'the following args are deprecated: %s' % str(deprecated_args)
raise ValueError(msg)
# update old defaults with input params
defaults.update(kwargs)
if defaults['m_nu'] is not None:
defaults['m_nu'] = units.Quantity(defaults['m_nu'], 'eV')
# determine the astropy class
if defaults['w0'] == -1.0: # cosmological constant
cls = 'LambdaCDM'
defaults.pop('w0')
else:
cls = 'wCDM'
# use special flat case if Ok0 = 0
if defaults.pop('flat'):
cls = 'Flat' + cls
defaults.pop('Ode0')
# initialize the astropy engine and convert to dict for Cosmology()
astropy_cosmo = getattr(cosmology, cls)(**defaults)
return astropy_to_dict(astropy_cosmo)
def check_args(args):
cf = {}
for name in args.keys():
cf[name] = []
for eq in find_eqcls(name):
if eq == name: continue
if eq in args: cf[name].append(eq)
for name in cf.keys():
if len(cf[name]) > 0:
raise ValueError("Conflicted parameters are given: %s" % str(cf))
# dict that defines input parameters that conflict with each other
CONFLICTS = [('h', 'H0', '100*theta_s'),
('T_cmb', 'Omega_g', 'omega_g', 'Omega0_g'),
('Omega_b', 'omega_b', 'Omega0_b'),
('Omega_fld', 'Omega0_fld'),
('Omega_Lambda', 'Omega0_Lambda'),
('N_ur', 'Omega_ur', 'omega_ur', 'Omega0_ur'),
('Omega_cdm', 'omega_cdm', 'Omega0_cdm'),
('m_ncdm', 'Omega_ncdm', 'omega_ncdm', 'Omega0_ncdm'),
('P_k_max', 'P_k_max_h/Mpc', 'P_k_max_1/Mpc'),
('P_z_max', 'z_max_pk'),
('nonlinear', 'non linear'),
('A_s', 'ln10^{10}A_s'),
]
def find_eqcls(key):
for cls in CONFLICTS:
if key in cls:
return cls
else:
return ()
|
nickhand/nbodykit
|
nbodykit/cosmology/cosmology.py
|
Python
|
gpl-3.0
| 25,992
|
[
"VTK"
] |
e9fc5b3b5188bf1cfae2ba2307d9dea267ba05ca4e14d8c0bca65032fc9021ca
|
# Copyright 2008 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of Crunchberry Pie.
#
# Crunchberry Pie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Crunchberry Pie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Crunchberry Pie. If not, see <http://www.gnu.org/licenses/>.
from django import template
register = template.Library()
@register.inclusion_tag('articles/article.html', takes_context=True)
def show_article(context, article):
context.update({'article':article})
return context
|
brianboyer/newsmixer
|
pie/articles/templatetags/articles.py
|
Python
|
gpl-3.0
| 1,076
|
[
"Brian"
] |
9f754b15d08aaf7dfeac18810a48bc705093db4b432a89597ce9b40844634992
|
from config import config
if config.IMPORT_PYSAM_PRIMER3:
import pysam
# VCF query
def vcf_query(chrom=None, pos=None, ref=None, alt=None, variant_str=None, individual=None, verbose=False, limit=100, release='mainset_July2016'):
if variant_str:
variant_str=str(variant_str).strip().replace('_','-')
chrom, pos, ref, alt = variant_str.split('-')
tb=pysam.TabixFile('UCLex/%s/%s_chr%s.vcf.gz' % (release, release, chrom,))
#mainset_February2016_chrX_filtered.vcf.gz
region=str('%s:%s-%s'%(chrom, pos, int(pos),))
headers=[h for h in tb.header]
headers=(headers[len(headers)-1]).strip().split('\t')
records=tb.fetch(region=region)
records=[r.split('\t') for r in records]
def response(POS, REF, ALT, index, geno, chrom, pos):
alleles=[geno['REF']]+geno['ALT'].split(',')
homozygous_genotype='/'.join([str(index),str(index)])
heterozygous_genotype='/'.join(['0',str(index)])
variant=dict()
variant['POS']=POS
variant['REF']=REF
variant['ALT']=ALT
variant['index']=index
variant['variant_id']='-'.join([str(chrom),str(POS),variant['REF'],variant['ALT']])
variant['synonym_variant_id']='{}-{}-{}-{}'.format(str(chrom), str(pos), ref, alt,)
variant['hgvs']='chr%s:g.%s%s>%s' % (str(chrom), str(POS), REF, ALT,)
#print [geno[h].split(':')[0].split('/') for h in geno]
variant['hom_samples']=[h for h in geno if geno[h].split(':')[0]==homozygous_genotype][0:limit]
variant['HOM_COUNT']=len(variant['hom_samples'])
variant['het_samples']=[h for h in geno if geno[h].split(':')[0]==heterozygous_genotype][0:limit]
variant['HET_COUNT']=len(variant['het_samples'])
variant['wt_samples']=[h for h in geno if geno[h].split(':')[0]=='0/0'][1:100]
variant['WT_COUNT']=len([h for h in geno if geno[h].split(':')[0]=='0/0'])
variant['MISS_COUNT']=len([h for h in geno if geno[h].split(':')[0]=='./.'])
variant['allele_num']= 2*(variant['HOM_COUNT'] + variant['HET_COUNT']+variant['WT_COUNT'])
variant['allele_count']=2*variant['HOM_COUNT'] + variant['HET_COUNT']
if individual: variant['individual']=geno[individual]
#variant['site_quality'] = variant['QUAL']
#variant['filter'] = variant['FILTER']
if variant['WT_COUNT']==0:
variant['allele_freq'] = None
else:
variant['allele_freq'] = float(variant['HET_COUNT']+2*variant['HOM_COUNT']) / float(2*variant['WT_COUNT'])
samples=variant['het_samples']+variant['hom_samples']
#variant['hpo']=[p for p in get_db(app.config['DB_NAME_PATIENTS']).patients.find({'external_id':{'$in':samples}},{'_id':0,'features':1,'external_id':1})]
return variant
for r in records:
geno=dict(zip(headers, r))
POS=geno['POS']
REF=geno['REF']
if verbose:
print 'POS', POS
print 'REF', REF
for i, ALT, in enumerate(geno['ALT'].split(',')):
if verbose: print 'ALT', ALT
# insertion
if ref=='-' and REF+alt==ALT: return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
# deletion
# replace leftmost
elif alt=='-' and ALT==REF.replace(ref,''): return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
# replace rightmost
elif alt=='-' and ALT==REF[::-1].replace(ref[::-1], "", 1)[::-1]: return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
#
elif alt=='-' and ref==REF and ALT=='*': return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
elif alt=='0' and ALT=='*' and ref==REF: return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
elif alt==ALT and ref==REF: return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
continue
# VCF query
def vcf_query2(chrom=None, pos=None, ref=None, alt=None, variant_str=None, individual=None, verbose=False, limit=100):
if variant_str:
variant_str=str(variant_str).strip().replace('_','-')
chrom, pos, ref, alt = variant_str.split('-')
tb=pysam.TabixFile('uclex_files/current/chr%s.vcf.gz' % chrom,)
#mainset_February2016_chrX_filtered.vcf.gz
region=str('%s:%s-%s'%(chrom, pos, int(pos),))
headers=[h for h in tb.header]
headers=(headers[len(headers)-1]).strip().split('\t')
records=tb.fetch(region=region)
records=[r.split('\t') for r in records]
def response(POS, REF, ALT, index, geno, chrom, pos):
alleles=[geno['REF']]+geno['ALT'].split(',')
homozygous_genotype='/'.join([str(index),str(index)])
heterozygous_genotype='/'.join(['0',str(index)])
variant=dict()
variant['POS']=POS
variant['REF']=REF
variant['ALT']=ALT
variant['index']=index
variant['variant_id']='-'.join([str(chrom),str(POS),variant['REF'],variant['ALT']])
variant['synonym_variant_id']='{}-{}-{}-{}'.format(str(chrom), str(pos), ref, alt,)
variant['hgvs']='chr%s:g.%s%s>%s' % (str(chrom), str(POS), REF, ALT,)
#print [geno[h].split(':')[0].split('/') for h in geno]
variant['hom_samples']=[h for h in geno if geno[h].split(':')[0]==homozygous_genotype][0:limit]
variant['HOM_COUNT']=len(variant['hom_samples'])
variant['het_samples']=[h for h in geno if geno[h].split(':')[0]==heterozygous_genotype][0:limit]
variant['HET_COUNT']=len(variant['het_samples'])
variant['wt_samples']=[h for h in geno if geno[h].split(':')[0]=='0/0'][1:100]
variant['WT_COUNT']=len([h for h in geno if geno[h].split(':')[0]=='0/0'])
variant['MISS_COUNT']=len([h for h in geno if geno[h].split(':')[0]=='./.'])
variant['allele_num']= 2*(variant['HOM_COUNT'] + variant['HET_COUNT']+variant['WT_COUNT'])
variant['allele_count']=2*variant['HOM_COUNT'] + variant['HET_COUNT']
if individual: variant['individual']=geno[individual]
#variant['site_quality'] = variant['QUAL']
#variant['filter'] = variant['FILTER']
if variant['WT_COUNT']==0:
variant['allele_freq'] = None
else:
variant['allele_freq'] = float(variant['HET_COUNT']+2*variant['HOM_COUNT']) / float(2*variant['WT_COUNT'])
samples=variant['het_samples']+variant['hom_samples']
#variant['hpo']=[p for p in get_db(app.config['DB_NAME_PATIENTS']).patients.find({'external_id':{'$in':samples}},{'_id':0,'features':1,'external_id':1})]
return variant
for r in records:
geno=dict(zip(headers, r))
POS=geno['POS']
REF=geno['REF']
if verbose:
print 'POS', POS
print 'REF', REF
for i, ALT, in enumerate(geno['ALT'].split(',')):
if verbose: print 'ALT', ALT
# insertion
if ref=='-' and REF+alt==ALT: return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
# deletion
# replace leftmost
elif alt=='-' and ALT==REF.replace(ref,''): return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
# replace rightmost
elif alt=='-' and ALT==REF[::-1].replace(ref[::-1], "", 1)[::-1]: return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
#
elif alt=='-' and ref==REF and ALT=='*': return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
elif alt=='0' and ALT=='*' and ref==REF: return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
elif alt==ALT and ref==REF: return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
continue
# VCF query
def vcf_query3(chrom=None, pos=None, ref=None, alt=None, variant_str=None, individual=None, verbose=False, limit=100):
if variant_str:
variant_str=str(variant_str).strip().replace('_','-')
chrom, pos, ref, alt = variant_str.split('-')
tb=pysam.TabixFile('/slms/UGI/vm_exports/vyp/phenotips/uclex_files/current/chr%s.vcf.gz' % chrom,)
#mainset_February2016_chrX_filtered.vcf.gz
region=str('%s:%s-%s'%(chrom, pos, int(pos),))
headers=[h for h in tb.header]
headers=(headers[len(headers)-1]).strip().split('\t')
records=tb.fetch(region=region)
records=[r.split('\t') for r in records]
def response(POS, REF, ALT, index, geno, chrom, pos):
alleles=[geno['REF']]+geno['ALT'].split(',')
homozygous_genotype='/'.join([str(index),str(index)])
heterozygous_genotype='/'.join(['0',str(index)])
variant=dict()
variant['POS']=POS
variant['REF']=REF
variant['ALT']=ALT
variant['index']=index
variant['variant_id']='-'.join([str(chrom),str(POS),variant['REF'],variant['ALT']])
variant['synonym_variant_id']='{}-{}-{}-{}'.format(str(chrom), str(pos), ref, alt,)
variant['hgvs']='chr%s:g.%s%s>%s' % (str(chrom), str(POS), REF, ALT,)
#print [geno[h].split(':')[0].split('/') for h in geno]
variant['hom_samples']=[h for h in geno if geno[h].split(':')[0]==homozygous_genotype][0:limit]
variant['HOM_COUNT']=len(variant['hom_samples'])
variant['het_samples']=[h for h in geno if geno[h].split(':')[0]==heterozygous_genotype][0:limit]
variant['HET_COUNT']=len(variant['het_samples'])
variant['wt_samples']=[h for h in geno if geno[h].split(':')[0]=='0/0'][1:100]
variant['WT_COUNT']=len([h for h in geno if geno[h].split(':')[0]=='0/0'])
variant['MISS_COUNT']=len([h for h in geno if geno[h].split(':')[0]=='./.'])
variant['allele_num']= 2*(variant['HOM_COUNT'] + variant['HET_COUNT']+variant['WT_COUNT'])
variant['allele_count']=2*variant['HOM_COUNT'] + variant['HET_COUNT']
if individual: variant['individual']=geno[individual]
#variant['site_quality'] = variant['QUAL']
#variant['filter'] = variant['FILTER']
if variant['WT_COUNT']==0:
variant['allele_freq'] = None
else:
variant['allele_freq'] = float(variant['HET_COUNT']+2*variant['HOM_COUNT']) / float(2*variant['WT_COUNT'])
samples=variant['het_samples']+variant['hom_samples']
#variant['hpo']=[p for p in get_db(app.config['DB_NAME_PATIENTS']).patients.find({'external_id':{'$in':samples}},{'_id':0,'features':1,'external_id':1})]
return variant
for r in records:
geno=dict(zip(headers, r))
POS=geno['POS']
REF=geno['REF']
if verbose:
print 'POS', POS
print 'REF', REF
for i, ALT, in enumerate(geno['ALT'].split(',')):
if verbose: print 'ALT', ALT
# insertion
if ref=='-' and REF+alt==ALT: return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
# deletion
# replace leftmost
elif alt=='-' and ALT==REF.replace(ref,''): return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
# replace rightmost
elif alt=='-' and ALT==REF[::-1].replace(ref[::-1], "", 1)[::-1]: return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
#
elif alt=='-' and ref==REF and ALT=='*': return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
elif alt=='0' and ALT=='*' and ref==REF: return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
elif alt==ALT and ref==REF: return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
continue
def vcf_query_gene():
tb=pysam.TabixFile('/slms/gee/research/vyplab/UCLex/%s/%s_chr%s.vcf.gz' % (RELEASE, RELEASE, gene.chrom,))
region ='%s:%s-%s' % (str(gene.chrom), str(gene.start), str(gene.stop),)
headers=[h for h in tb.header]
headers=(headers[len(headers)-1]).strip('#').strip().split('\t')
records=[dict(zip(headers,r.strip().split('\t'))) for r in tb.fetch(region)]
print(len(records))
records=dict([('%s-%s-%s-%s' % (r['CHROM'], r['POS'], r['REF'], r['ALT'],),r,) for r in records])
|
logust79/phenopolis
|
vcf/__init__.py
|
Python
|
mit
| 12,599
|
[
"pysam"
] |
2df7e74340a1481d1c419fcdb92df344a42b6945204e2e3974c639a17302f712
|
import requests
#print requests ## DEBUG
TARGET_URL = "https://dirac3.ba.infn.it:5010/DIRAC/"
#r = requests.get(TARGET_URL,verify=False)
#print r ## DEBUG
#print dir(r) ## DEBUG
#print r.text
#r = requests.get(TARGET_URL+"galaxy",verify=False)
#print r.text
LOGIN = "user_servizio"
PASSWORD = "OO31S3r2tRGEoEiUoqv4OuGx6"
#r = requests.get(TARGET_URL+"galaxy",verify=False, auth=('susan', 'bye'))
r = requests.get(TARGET_URL+"galaxy",verify=False, auth=(LOGIN, PASSWORD))
print r.text
|
SuperDIRAC/TESTDIRAC
|
WebServerSystem/tmpdir/xalfonso/ora-test.py
|
Python
|
gpl-3.0
| 491
|
[
"DIRAC",
"Galaxy"
] |
9d03e998ff9abd6f8f38e11656d20018c5f3df975144cd776977358426e57d91
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb_lb
description:
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
short_description: Creates or destroys Amazon ELB.
version_added: "1.5"
author: Jim Dalton
options:
state:
description:
- Create or destroy the ELB
required: true
name:
description:
- The name of the ELB
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see example)
required: false
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners
required: false
default: true
zones:
description:
- List of availability zones to enable on this ELB
required: false
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones
required: false
default: false
security_group_ids:
description:
- A list of security groups to apply to the elb
require: false
default: None
version_added: "1.6"
health_check:
description:
- An associative array of health check configuration settigs (see example)
require: false
default: None
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
required: false
default: None
aliases: []
version_added: "1.7"
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets
required: false
default: false
version_added: "1.7"
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
required: false
default: 'internet-facing'
version_added: "1.7"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
connection_draining_timeout:
description:
- Wait a specified timeout allowing connections to drain before terminating an instance
required: false
aliases: []
version_added: "1.8"
cross_az_load_balancing:
description:
- Distribute load across all configured Availablity Zones
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
version_added: "1.8"
extends_documentation_fragment: aws
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Basic VPC provisioning example
- local_action:
module: ec2_elb_lb
name: "test-vpc"
scheme: internal
state: present
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
# Ensure ELB is gone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extreneous zones
# will be removed
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- local_action:
module: ec2_elb_lb
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456, subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining and cross availability
# zone load balancing
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
connection_draining_timeout: 60
cross_az_load_balancing: "yes"
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocols: http
- load_balancer_port: 80
- instance_port: 80
"""
import sys
import os
try:
import boto
import boto.ec2.elb
import boto.ec2.elb.attributes
from boto.ec2.elb.healthcheck import HealthCheck
from boto.regioninfo import RegionInfo
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
cross_az_load_balancing=None, region=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
self.elb = self._get_elb()
def ensure_ok(self):
"""Create the ELB"""
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
# boto has introduced support for some ELB attributes in
# different versions, so we check first before trying to
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
def ensure_gone(self):
"""Destroy the ELB"""
if self.elb:
self._delete_elb()
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status
}
else:
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme
}
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [l.get_complex_tuple()
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
# immediately returned result, so just include the
# ones that were added
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
info['cross_az_load_balancing'] = 'yes'
else:
info['cross_az_load_balancing'] = 'no'
return info
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
def _delete_elb(self):
# True if succeeds, exception raised if not
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
"""Takes a list of listener tuples and creates them"""
# True if succeeds, exception raised if not
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
"""Takes a list of listener tuples and deletes them from the elb"""
ports = [l[0] for l in listeners]
# True if succeeds, exception raised if not
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
"""
Creates listeners specified by self.listeners; overwrites existing
listeners on these ports; removes extraneous listeners
"""
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
# Check for any listeners we need to create or overwrite
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
# First we loop through existing listeners to see if one is
# already specified for this port
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incomping port is all we're looking for
if existing_listener[0] == listener['load_balancer_port']:
existing_listener_found = existing_listener.get_complex_tuple()
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the exsiting one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
# We already have this listener, so we're going to keep it
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = existing_listener.get_complex_tuple()
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
if existing_listener_tuple in listeners_to_keep:
# Keep this one around
continue
# Since we're not already removing it and we don't need to keep
# it, let's get rid of it
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
listener['load_balancer_port'],
listener['instance_port'],
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError, e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError, e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
"""Determine which subnets need to be attached or detached on the ELB"""
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _set_zones(self):
"""Determine which zones need to be enabled or disabled on the ELB"""
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids != None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.Changed = True
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
# This just makes it easier to compare each of the attributes
# and look for changes. Keys are attributes of the current
# health_check; values are desired values of new health_check
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
# The health_check attribute is *not* set on newly created
# ELBs! So we have to create our own.
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.iteritems():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _check_attribute_support(self, attr):
return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
def _set_cross_az_load_balancing(self):
attributes = self.elb.get_attributes()
if self.cross_az_load_balancing:
attributes.cross_zone_load_balancing.enabled = True
else:
attributes.cross_zone_load_balancing.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
attributes.cross_zone_load_balancing.enabled)
def _set_connection_draining_timeout(self):
attributes = self.elb.get_attributes()
if self.connection_draining_timeout is not None:
attributes.connection_draining.enabled = True
attributes.connection_draining.timeout = self.connection_draining_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
else:
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False},
connection_draining_timeout={'default': None, 'required': False},
cross_az_load_balancing={'default': None, 'required': False}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
health_check = module.params['health_check']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
if state == 'present' and not listeners:
module.fail_json(msg="At least one port is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, cross_az_load_balancing,
region=region, **aws_connect_params)
# check for unsupported attributes for this version of boto
if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
|
0x46616c6b/ansible-modules-core
|
cloud/ec2_elb_lb.py
|
Python
|
gpl-3.0
| 26,567
|
[
"Dalton"
] |
916d36d920953f5152393d312d85e39871613580147ab51bdda0ee0d1ab13964
|
# -*- coding: utf-8 -*-
#
# test_parrot_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# This script tests the parrot_neuron in NEST.
import nest
import unittest
import math
@nest.check_stack
class ParrotNeuronTestCase(unittest.TestCase):
"""Check parrot_neuron spike repetition properties"""
def setUp(self):
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
# set up source spike generator, as well as parrot neurons
self.spike_time = 1.
self.delay = .2
self.source = nest.Create("spike_generator", 1,
{"spike_times": [self.spike_time]})
self.parrot = nest.Create('parrot_neuron')
self.spikes = nest.Create("spike_detector")
# record source and parrot spikes
nest.Connect(self.source, self.spikes)
nest.Connect(self.parrot, self.spikes)
def test_ParrotNeuronRepeatSpike(self):
"""Check parrot_neuron repeats spikes on port 0"""
# connect with arbitrary delay
nest.Connect(self.source, self.parrot, syn_spec={"delay": self.delay})
nest.Simulate(self.spike_time + 2 * self.delay)
# get spike from parrot neuron
events = nest.GetStatus(self.spikes)[0]["events"]
post_time = events['times'][events['senders'] == self.parrot[0]]
# assert spike was repeated at correct time
assert post_time, "Parrot neuron failed to repeat spike."
assert self.spike_time + self.delay == post_time, \
"Parrot neuron repeated spike at wrong delay"
def test_ParrotNeuronIgnoreSpike(self):
"""Check parrot_neuron ignores spikes on port 1"""
# connect with arbitrary delay to port 1
nest.Connect(self.source, self.parrot,
syn_spec={"receptor_type": 1, "delay": self.delay})
nest.Simulate(self.spike_time + 2. * self.delay)
# get spike from parrot neuron, assert it was ignored
events = nest.GetStatus(self.spikes)[0]["events"]
post_time = events['times'][events['senders'] == self.parrot[0]]
assert len(post_time) == 0, \
"Parrot neuron failed to ignore spike arriving on port 1"
def test_ParrotNeuronOutgoingMultiplicity(self):
"""
Check parrot_neuron correctly repeats multiple spikes
The parrot_neuron receives two spikes in a single time step.
We check that both spikes are forwarded to the spike_detector.
"""
# connect twice
nest.Connect(self.source, self.parrot, syn_spec={"delay": self.delay})
nest.Connect(self.source, self.parrot, syn_spec={"delay": self.delay})
nest.Simulate(self.spike_time + 2. * self.delay)
# get spikes from parrot neuron, assert two were transmitted
events = nest.GetStatus(self.spikes)[0]["events"]
post_times = events['times'][events['senders'] == self.parrot[0]]
assert len(post_times) == 2 and post_times[0] == post_times[1], \
"Parrot neuron failed to correctly repeat multiple spikes."
@nest.check_stack
class ParrotNeuronPoissonTestCase(unittest.TestCase):
"""Check parrot_neuron spike repetition properties"""
def test_ParrotNeuronIncomingMultiplicity(self):
"""
Check parrot_neuron heeds multiplicity information in incoming spikes.
This test relies on the fact that poisson_generator transmits
multiple spikes during a time step using multiplicity, and that
these spikes are delivered directly, i.e., without multiplicity-
unrolling in send_remote().
We create a high-rate poisson_generator. If parrot_neuron
ignored multiplicity, it would only transmit one spike per time
step. We chain two parrot_neurons to check against any loss.
"""
# set up source spike generator, as well as parrot neurons
h = 0.1 # ms
rate = 1000000. # spikes / s
delay = 1. # ms
t_base = 1000. # ms
t_sim = t_base + 3 * delay # after t_sim, spikes from t_base arrived
spikes_expected = rate * t_base / 1000.
spikes_std = math.sqrt(spikes_expected)
# if the test is to be meaningful we must expect signficantly more
# spikes than time steps
assert spikes_expected - 3 * spikes_std > 10. * t_sim / h, \
"Internal inconsistency: too few spikes."
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
nest.SetKernelStatus({'resolution': h,
'grng_seed': 123,
'rng_seeds': [456]})
source = nest.Create('poisson_generator', params={'rate': rate})
parrots = nest.Create('parrot_neuron', 2)
detect = nest.Create('spike_detector')
nest.Connect(source, parrots[:1], syn_spec={'delay': delay})
nest.Connect(parrots[:1], parrots[1:], syn_spec={'delay': delay})
nest.Connect(parrots[1:], detect)
nest.Simulate(t_sim)
n_spikes = nest.GetStatus(detect)[0]['n_events']
assert n_spikes > spikes_expected - 3 * spikes_std, \
"parrot_neuron loses spikes."
assert n_spikes < spikes_expected + 3 * spikes_std, \
"parrot_neuron adds spikes."
@nest.check_stack
class ParrotNeuronSTDPTestCase(unittest.TestCase):
"""
Check STDP protocol between two parrot_neurons connected by a stdp_synapse.
Exact pre- and post-synaptic spike times are set by spike_generators
connected to each parrot neuron. Additional spikes sent through the
stdp_synapse are explicitly ignored in the postsynaptic parrot_neuron
by setting the stdp_synapse to connect to port 1.
"""
def run_protocol(self, dt):
"""Set up a network with pre-post spike pairings with t_post - t_pre = dt"""
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
# set pre and postsynaptic spike times
delay = 1. # delay for connections
dspike = 100. # ISI
# set the correct real spike times for generators (correcting for delays)
pre_times = [100., 100. + dspike]
post_times = [k+dt for k in pre_times]
# create spike_generators with these times
pre_spikes = nest.Create("spike_generator", 1, {"spike_times": pre_times})
post_spikes = nest.Create("spike_generator", 1, {"spike_times": post_times})
# create parrot neurons and connect spike_generators
pre_parrot = nest.Create("parrot_neuron", 1)
post_parrot = nest.Create("parrot_neuron", 1)
nest.Connect(pre_spikes, pre_parrot, syn_spec={"delay": delay})
nest.Connect(post_spikes, post_parrot, syn_spec={"delay": delay})
# create spike detector
spikes = nest.Create("spike_detector")
nest.Connect(pre_parrot, spikes)
nest.Connect(post_parrot, spikes)
# connect both parrot neurons with a stdp synapse onto port 1
# thereby spikes transmitted through the stdp connection are
# not repeated postsynaptically.
syn_spec = {
"model": "stdp_synapse",
"receptor_type": 1, # set receptor 1 postsynaptically, to not generate extra spikes
}
conn_spec = {
"rule": "one_to_one",
}
nest.Connect(pre_parrot, post_parrot, syn_spec=syn_spec, conn_spec=conn_spec)
# get STDP synapse and weight before protocol
syn = nest.GetConnections(source=pre_parrot, synapse_model="stdp_synapse")
syn_status = nest.GetStatus(syn)[0]
w_pre = syn_status['weight']
last_time = max(pre_times[-1], post_times[-1])
nest.Simulate(last_time + 2 * delay)
# get weight post protocol
syn_status = nest.GetStatus(syn)[0]
w_post = syn_status['weight']
return w_pre, w_post
def test_ParrotNeuronSTDPProtocolPotentiation(self):
"""Check pre-post spike pairings between parrot_neurons increments weights."""
dt = 10.
w_pre, w_post = self.run_protocol(dt)
assert w_pre < w_post, "Parrot neuron STDP potentiation protocol failed to elicit positive weight changes."
def test_ParrotNeuronSTDPProtocolDepression(self):
"""Check post-pre spike pairings between parrot_neurons decrement weights."""
dt = -10.
w_pre, w_post = self.run_protocol(dt)
assert w_pre > w_post, "Parrot neuron STDP potentiation protocol failed to elicit negative weight changes."
def suite():
# makeSuite is sort of obsolete http://bugs.python.org/issue2721
# using loadTestsFromTestCase instead.
suite1 = unittest.TestLoader().loadTestsFromTestCase(ParrotNeuronTestCase)
suite2 = unittest.TestLoader().loadTestsFromTestCase(ParrotNeuronPoissonTestCase)
suite3 = unittest.TestLoader().loadTestsFromTestCase(ParrotNeuronSTDPTestCase)
return unittest.TestSuite([suite1, suite2, suite3])
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
zifeo/nest-simulator
|
pynest/nest/tests/test_parrot_neuron.py
|
Python
|
gpl-2.0
| 9,793
|
[
"NEURON"
] |
e2f6ad739b88f2f384dc49e0ac4ab0c3b6012e3b73df94e5941798be1e7f34f8
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.dezinezync.dynamictype.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComDezinezyncDynamictypeModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
dezinezync/DZDynamicType
|
build.py
|
Python
|
mit
| 6,816
|
[
"VisIt"
] |
033dc201cb2f1dc0ee17eff56e7a4023ec4a4712f030f50067d45cb61b23cd3b
|
r"""
Incompressible Stokes flow with Navier (slip) boundary conditions, flow driven
by a moving wall and a small diffusion for stabilization.
This example demonstrates the use of `no-penetration` boundary conditions as
well as `edge direction` boundary conditions together with Navier or slip
boundary conditions.
Find :math:`\ul{u}`, :math:`p` such that:
.. math::
\int_{\Omega} \nu\ \nabla \ul{v} : \nabla \ul{u}
- \int_{\Omega} p\ \nabla \cdot \ul{v}
+ \int_{\Gamma_1} \beta \ul{v} \cdot (\ul{u} - \ul{u}_d)
+ \int_{\Gamma_2} \beta \ul{v} \cdot \ul{u}
= 0
\;, \quad \forall \ul{v} \;,
\int_{\Omega} \mu \nabla q \cdot \nabla p
+ \int_{\Omega} q\ \nabla \cdot \ul{u}
= 0
\;, \quad \forall q \;,
where :math:`\nu` is the fluid viscosity, :math:`\beta` is the slip
coefficient, :math:`\mu` is the (small) numerical diffusion coefficient,
:math:`\Gamma_1` is the top wall that moves with the given driving velocity
:math:`\ul{u}_d` and :math:`\Gamma_2` are the remaining walls. The Navier
conditions are in effect on both :math:`\Gamma_1`, :math:`\Gamma_2` and are
expressed by the corresponding integrals in the equations above.
The `no-penetration` boundary conditions are applied on :math:`\Gamma_1`,
:math:`\Gamma_2`, except the vertices of the block edges, where the `edge
direction` boundary conditions are applied. Optionally, Dirichlet boundary
conditions can be applied on the inlet, see the code below.
The mesh is created by ``gen_block_mesh()`` function - try different mesh
dimensions and resolutions below. For large meshes use the ``'ls_i'`` linear
solver - PETSc + petsc4py is needed in that case.
See also :ref:`navier_stokes-stokes_slip_bc_penalty`.
"""
from __future__ import absolute_import
import numpy as nm
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.homogenization.utils import define_box_regions
# Mesh dimensions.
dims = nm.array([3, 1, 0.5])
# Mesh resolution: increase to improve accuracy.
shape = [11, 15, 15]
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh(dims, shape, [0, 0, 0], name='user_block',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
regions = define_box_regions(3, 0.5 * dims)
regions.update({
'Omega' : 'all',
'Edges_v' : ("""(r.Near *v r.Bottom) +v
(r.Bottom *v r.Far) +v
(r.Far *v r.Top) +v
(r.Top *v r.Near)""", 'edge'),
'Gamma1_f' : ('copy r.Top', 'face'),
'Gamma2_f' : ('r.Near +v r.Bottom +v r.Far', 'face'),
'Gamma_f' : ('r.Gamma1_f +v r.Gamma2_f', 'face'),
'Gamma_v' : ('r.Gamma_f -v r.Edges_v', 'face'),
'Inlet_f' : ('r.Left -v r.Gamma_f', 'face'),
})
fields = {
'velocity' : ('real', 3, 'Omega', 1),
'pressure' : ('real', 1, 'Omega', 1),
}
def get_u_d(ts, coors, region=None):
"""
Given stator velocity.
"""
out = nm.zeros_like(coors)
out[:] = [1.0, 1.0, 0.0]
return out
functions = {
'get_u_d' : (get_u_d,),
}
variables = {
'u' : ('unknown field', 'velocity', 0),
'v' : ('test field', 'velocity', 'u'),
'u_d' : ('parameter field', 'velocity',
{'setter' : 'get_u_d'}),
'p' : ('unknown field', 'pressure', 1),
'q' : ('test field', 'pressure', 'p'),
}
# Try setting the inlet velocity by un-commenting the 'inlet' ebcs.
ebcs = {
## 'inlet' : ('Inlet_f', {'u.0' : 1.0, 'u.[1, 2]' : 0.0}),
}
lcbcs = {
'walls' : ('Gamma_v', {'u.all' : None}, None, 'no_penetration',
'normals_Gamma.vtk'),
'edges' : ('Edges_v', [(-0.5, 1.5)], {'u.all' : None}, None,
'edge_direction', 'edges_Edges.vtk'),
}
materials = {
'm' : ({
'nu' : 1e-3,
'beta' : 1e-2,
'mu' : 1e-10,
},),
}
equations = {
'balance' :
"""dw_div_grad.5.Omega(m.nu, v, u)
- dw_stokes.5.Omega(v, p)
+ dw_surface_dot.5.Gamma1_f(m.beta, v, u)
+ dw_surface_dot.5.Gamma2_f(m.beta, v, u)
=
+ dw_surface_dot.5.Gamma1_f(m.beta, v, u_d)""",
'incompressibility' :
"""dw_laplace.5.Omega(m.mu, q, p)
+ dw_stokes.5.Omega(u, q) = 0""",
}
solvers = {
'ls_d' : ('ls.scipy_direct', {}),
'ls_i' : ('ls.petsc', {
'method' : 'bcgsl', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'ilu', # sub_pc_type
'eps_a' : 0.0, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e10, # Divergence tolerance.
'i_max' : 2500, # maxits
}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
}
options = {
'nls' : 'newton',
'ls' : 'ls_d',
}
|
lokik/sfepy
|
examples/navier_stokes/stokes_slip_bc.py
|
Python
|
bsd-3-clause
| 4,819
|
[
"VTK"
] |
7a12e74a9bff3bd437f11bc25253f44d84d18bec633da49cb1694387d9d5157c
|
"""
@name: Modules/House/Family/insteon/insteon_constants.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2013-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Apr 19, 2013
@summary: This module is for communicating with Insteon controllers.
Note! This is designed for: 'from Insteon_constants import *'
"""
__updated__ = '2020-02-18'
STX = 0x02
ACK = 0x06
NAK = 0x15
DEVICE_TYPE = ['N/A', 'Lighting', 'HVAC', 'Security'] # documenting it
# PLM Serial Commands
PLM_COMMANDS = {
'insteon_received': 0x50,
'insteon_ext_received': 0x51,
'x10_received': 0x52,
'all_link_complete': 0x53,
'plm_button_event': 0x54,
'user_user_reset': 0x55,
'all_link_clean_failed': 0x56,
'all_link_record': 0x57,
'all_link_clean_status': 0x58,
'plm_info': 0x60,
'all_link_send': 0x61,
'insteon_send': 0x62,
'x10_send': 0x63,
'all_link_start': 0x64,
'plm_reset': 0x67,
'plm_first_all_link': 0x69,
'plm_next_all_link': 0x6A,
'plm_set_config': 0x6B,
'plm_led_on': 0x6D,
'plm_led_off': 0x6E,
'manage_all_link_record': 0x6F,
'insteon_nak': 0x70,
'insteon_ack': 0x71,
'rf_sleep': 0x72,
'plm_get_config': 0x73
}
MESSAGE_TYPES = {
'assign_to_group': 0x01,
'delete_from_group': 0x02,
'product_data_request': 0x03,
'cleanup_success': 0x06,
'linking_mode': 0x09,
'unlinking_mode': 0x0A,
'engine_version': 0x0D,
'ping' : 0x0F,
'id_request': 0x10,
'on': 0x11,
'on_fast': 0x12,
'off': 0x13,
'off_fast': 0x14,
'bright': 0x15,
'dim': 0x16,
'start_manual_change': 0x17,
'stop_manual_change': 0x18,
'status_request': 0x19,
'get_operating_flags': 0x1f,
'set_operating_flags': 0x20,
'do_read_ee': 0x24,
'remote_set_button_tap': 0x25,
'set_led_status': 0x27,
'set_address_msb': 0x28,
'poke': 0x29,
'poke_extended': 0x2a,
'peek': 0x2b,
'peek_internal': 0x2c,
'poke_internal': 0x2d,
'on_at_ramp_rate': 0x2e, # 'extended_set_get'
'off_at_ramp_rate': 0x2f,
'read_write_aldb': 0x2f,
# sprinkler_valve_on => 0x40,
# sprinkler_valve_off => 0x41,
# sprinkler_program_on => 0x42,
# sprinkler_program_off => 0x43,
# sprinkler_control => 0x44,
# sprinkler_timers_request => 0x45,
'thermostat_temp_up': 0x68,
'thermostat_temp_down': 0x69,
'thermostat_status': 0x6a,
'thermostat_control': 0x6b,
'thermostat_setpoint_cool': 0x6c,
'thermostat_setpoint_heat': 0x6d,
'thermostat_report_temperature': 0x6e,
'thermostat_report_humidity': 0x6f,
'thermostat_report_mode': 0x70,
'thermostat_report_cool_setpoint': 0x71,
'thermostat_report_heat_setpoint': 0x72
}
# This is the length of the response from the PLM.
# Wait till we get the proper number of bytes before decoding the response.
# we sometimes only have a partial response when reading async.
MESSAGE_LENGTH = {
0x50: 11,
0x51: 25,
0x52: 4,
0x53: 10,
0x54: 3,
0x55: 2,
0x56: 7,
0x57: 10,
0x58: 3,
0x60: 9,
0x61: 6,
0x62: 9,
0x63: 5,
0x64: 5,
0x65: 3,
0x66: 6,
0x67: 3,
0x68: 4,
0x69: 3,
0x6A: 3,
0x6B: 4,
0x6C: 3,
0x6D: 3,
0x6E: 3,
0x6F: 12,
0x70: 4,
0x71: 5,
0x72: 3,
0x73: 6
}
COMMAND_LENGTH = {
0x60: 2,
0x61: 5,
0x62: 8,
0x63: 4,
0x64: 4,
0x65: 2,
0x66: 5,
0x67: 2,
0x68: 3,
0x69: 2,
0x6A: 2,
0x6B: 3,
0x6C: 2,
0x6D: 2,
0x6E: 2,
0x6F: 11,
0x70: 3,
0x71: 4,
0x72: 2,
0x73: 2
}
X10_HOUSE = {
0x00: 'M',
0x01: 'E',
0x02: 'C',
0x03: 'K',
0x04: 'O',
0x05: 'G',
0x06: 'A',
0x07: 'I',
0x08: 'N',
0x09: 'F',
0x0A: 'D',
0x0B: 'L',
0x0C: 'P',
0x0D: 'H',
0x0E: 'B',
0x0F: 'J'
}
X10_UNIT = {
0x00: '13',
0x01: '5',
0x02: '3',
0x03: '11',
0x04: '15',
0x05: '7',
0x06: '1',
0x07: '9',
0x08: '14',
0x09: '6',
0x0A: '4',
0x0B: '12',
0x0C: '16',
0x0D: '8',
0x0E: '2',
0x0F: '10'
}
X10_COMMAND = {
0x00: 'All Units Off',
0x01: 'All Lights On',
0x02: 'On',
0x03: 'Off',
0x04: 'Dim',
0x05: 'Bright',
0x06: 'All Lights Off',
0x07: 'Extend Code',
0x08: 'Hail Request',
0x09: 'Hail Acknowledge',
0x0A: 'Preset Dim',
0x0B: 'Preset Dim',
0x0C: 'Extended Data (analog)',
0x0D: 'Status = On',
0x0E: 'Status = Off',
0x0F: 'Status Request'
}
class InsteonError(Exception):
"""
General Insteon error.
"""
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/House/Family/Insteon/insteon_constants.py
|
Python
|
mit
| 4,802
|
[
"Brian"
] |
28e15d033f920d25fce90b06551d125bd7795e66109c081dd59ed3aef60afb61
|
# An implementation of the Scaler interface using CCP4 programs and Aimless.
from __future__ import annotations
import copy
import logging
import math
import os
import re
from xia2.Handlers.CIF import CIF, mmCIF
from xia2.Handlers.Citations import Citations
from xia2.Handlers.Files import FileHandler
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Syminfo import Syminfo
from xia2.lib.bits import is_mtz_file, nifty_power_of_ten, transpose_loggraph
from xia2.lib.SymmetryLib import sort_lattices
from xia2.Modules import MtzUtils
from xia2.Modules.Scaler.CCP4ScalerHelpers import (
CCP4ScalerHelper,
SweepInformationHandler,
_prepare_pointless_hklin,
ersatz_resolution,
get_umat_bmat_lattice_symmetry_from_mtz,
)
from xia2.Modules.Scaler.CommonScaler import CommonScaler as Scaler
from xia2.Modules.Scaler.rebatch import rebatch
from xia2.Toolkit.AimlessSurface import (
evaluate_1degree,
generate_map,
scrape_coefficients,
)
from xia2.Wrappers.CCP4.CCP4Factory import CCP4Factory
logger = logging.getLogger("xia2.Modules.Scaler.CCP4ScalerA")
class CCP4ScalerA(Scaler):
"""An implementation of the Scaler interface using CCP4 programs."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sweep_handler = None
self._scalr_scaled_refl_files = {}
self._wavelengths_in_order = []
# flags to keep track of the corrections we will be applying
model = PhilIndex.params.xia2.settings.scale.model
self._scalr_correct_absorption = "absorption" in model
self._scalr_correct_decay = "decay" in model
self._scalr_corrections = True
# useful handles...!
self._prepared_reflections = None
self._reference = None
self._factory = CCP4Factory()
self._helper = CCP4ScalerHelper()
# overloaded from the Scaler interface... to plumb in the factory
def to_dict(self):
obj = super().to_dict()
if self._sweep_handler is not None:
obj["_sweep_handler"] = self._sweep_handler.to_dict()
obj["_prepared_reflections"] = self._prepared_reflections
return obj
@classmethod
def from_dict(cls, obj):
return_obj = super().from_dict(obj)
if return_obj._sweep_handler is not None:
return_obj._sweep_handler = SweepInformationHandler.from_dict(
return_obj._sweep_handler
)
return_obj._prepared_reflections = obj["_prepared_reflections"]
return return_obj
def set_working_directory(self, working_directory):
self._working_directory = working_directory
self._factory.set_working_directory(working_directory)
self._helper.set_working_directory(working_directory)
# this is an overload from the factory - it returns Aimless wrapper set up
# with the desired corrections
def _updated_aimless(self):
"""Generate a correctly configured Aimless..."""
aimless = None
params = PhilIndex.params.ccp4.aimless
if not self._scalr_corrections:
aimless = self._factory.Aimless()
else:
aimless = self._factory.Aimless(
absorption_correction=self._scalr_correct_absorption,
decay_correction=self._scalr_correct_decay,
)
aimless.set_mode(PhilIndex.params.xia2.settings.scale.scales)
aimless.set_spacing(params.rotation.spacing)
aimless.set_bfactor(brotation=params.brotation.spacing)
if PhilIndex.params.xia2.settings.small_molecule:
aimless.set_spacing(15.0)
aimless.set_bfactor(
bfactor=PhilIndex.params.xia2.settings.small_molecule_bfactor
)
aimless.set_surface_tie(params.surface_tie)
aimless.set_surface_link(params.surface_link)
if params.secondary.frame == "camera":
secondary = "secondary"
else:
secondary = "absorption"
lmax = params.secondary.lmax
aimless.set_secondary(secondary, lmax)
if PhilIndex.params.xia2.settings.multi_crystal:
aimless.set_surface_link(False)
# if profile fitting off use summation intensities
if PhilIndex.params.xia2.settings.integration.profile_fitting:
aimless.set_intensities(params.intensities)
else:
aimless.set_intensities("summation")
return aimless
def _pointless_indexer_jiffy(self, hklin, refiner):
return self._helper.pointless_indexer_jiffy(hklin, refiner)
def _pointless_indexer_multisweep(self, hklin, refiners):
return self._helper.pointless_indexer_multisweep(hklin, refiners)
def _scale_prepare(self):
"""Perform all of the preparation required to deliver the scaled
data. This should sort together the reflection files, ensure that
they are correctly indexed (via pointless) and generally tidy
things up."""
# acknowledge all of the programs we are about to use...
Citations.cite("pointless")
Citations.cite("aimless")
Citations.cite("ccp4")
# ---------- GATHER ----------
self._sweep_handler = SweepInformationHandler(self._scalr_integraters)
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
pname, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
exclude_sweep = False
for sweep in PhilIndex.params.xia2.settings.sweep:
if sweep.id == sname and sweep.exclude:
exclude_sweep = True
break
if exclude_sweep:
self._sweep_handler.remove_epoch(epoch)
logger.debug("Excluding sweep %s", sname)
else:
logger.debug("%-30s %s/%s/%s", "adding data from:", xname, dname, sname)
# gather data for all images which belonged to the parent
# crystal - allowing for the fact that things could go wrong
# e.g. epoch information not available, exposure times not in
# headers etc...
for e in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(e)
assert is_mtz_file(
si.get_reflections()
), f"{si.get_reflections()!r} is not a valid MTZ file"
p, x = self._sweep_handler.get_project_info()
self._scalr_pname = p
self._scalr_xname = x
# verify that the lattices are consistent, calling eliminate if
# they are not N.B. there could be corner cases here
need_to_return = False
multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing
# START OF if more than one epoch
if len(self._sweep_handler.get_epochs()) > 1:
# if we have multi-sweep-indexing going on then logic says all should
# share common lattice & UB definition => this is not used here?
# START OF if multi_sweep indexing and not input pg
if multi_sweep_indexing and not self._scalr_input_pointgroup:
pointless_hklins = []
max_batches = 0
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
batches = MtzUtils.batches_from_mtz(hklin)
if 1 + max(batches) - min(batches) > max_batches:
max_batches = max(batches) - min(batches) + 1
logger.debug("Biggest sweep has %d batches", max_batches)
max_batches = nifty_power_of_ten(max_batches)
counter = 0
refiners = []
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
integrater = si.get_integrater()
refiner = integrater.get_integrater_refiner()
refiners.append(refiner)
hklin = self._prepare_pointless_hklin(
hklin, si.get_integrater().get_phi_width()
)
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_%s_%s_prepointless.mtz"
% (pname, xname, dname, si.get_sweep_name()),
)
# we will want to delete this one exit
FileHandler.record_temporary_file(hklout)
first_batch = min(si.get_batches())
si.set_batch_offset(counter * max_batches - first_batch + 1)
rebatch(
hklin,
hklout,
first_batch=counter * max_batches + 1,
pname=pname,
xname=xname,
dname=dname,
)
pointless_hklins.append(hklout)
# update the counter & recycle
counter += 1
# SUMMARY - have added all sweeps to pointless_hklins
s = self._factory.Sortmtz()
pointless_hklin = os.path.join(
self.get_working_directory(),
"%s_%s_prepointless_sorted.mtz"
% (self._scalr_pname, self._scalr_xname),
)
s.set_hklout(pointless_hklin)
for hklin in pointless_hklins:
s.add_hklin(hklin)
s.sort()
# FIXME xia2-51 in here look at running constant scaling on the
# pointless hklin to put the runs on the same scale. Ref=[A]
pointless_const = os.path.join(
self.get_working_directory(),
"%s_%s_prepointless_const.mtz"
% (self._scalr_pname, self._scalr_xname),
)
FileHandler.record_temporary_file(pointless_const)
aimless_const = self._factory.Aimless()
aimless_const.set_hklin(pointless_hklin)
aimless_const.set_hklout(pointless_const)
aimless_const.const()
pointless_const = os.path.join(
self.get_working_directory(),
"%s_%s_prepointless_const_unmerged.mtz"
% (self._scalr_pname, self._scalr_xname),
)
FileHandler.record_temporary_file(pointless_const)
pointless_hklin = pointless_const
# FIXME xia2-51 in here need to pass all refiners to ensure that the
# information is passed back to all of them not just the last one...
logger.debug(
"Running multisweep pointless for %d sweeps", len(refiners)
)
pointgroup, reindex_op, ntr, pt = self._pointless_indexer_multisweep(
pointless_hklin, refiners
)
logger.debug("X1698: %s: %s", pointgroup, reindex_op)
lattices = [Syminfo.get_lattice(pointgroup)]
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
intgr = si.get_integrater()
hklin = si.get_reflections()
refiner = intgr.get_integrater_refiner()
if ntr:
intgr.integrater_reset_reindex_operator()
need_to_return = True
# SUMMARY - added all sweeps together into an mtz, ran
# _pointless_indexer_multisweep on this, made a list of one lattice
# and potentially reset reindex op?
# END OF if multi_sweep indexing and not input pg
# START OF if not multi_sweep, or input pg given
else:
lattices = []
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
intgr = si.get_integrater()
hklin = si.get_reflections()
refiner = intgr.get_integrater_refiner()
if self._scalr_input_pointgroup:
pointgroup = self._scalr_input_pointgroup
reindex_op = "h,k,l"
ntr = False
else:
pointless_hklin = self._prepare_pointless_hklin(
hklin, si.get_integrater().get_phi_width()
)
pointgroup, reindex_op, ntr, pt = self._pointless_indexer_jiffy(
pointless_hklin, refiner
)
logger.debug("X1698: %s: %s", pointgroup, reindex_op)
lattice = Syminfo.get_lattice(pointgroup)
if lattice not in lattices:
lattices.append(lattice)
if ntr:
intgr.integrater_reset_reindex_operator()
need_to_return = True
# SUMMARY do pointless_indexer on each sweep, get lattices and make a list
# of unique lattices, potentially reset reindex op.
# END OF if not multi_sweep, or input pg given
# SUMMARY - still within if more than one epoch, now have a list of number
# of lattices
# START OF if multiple-lattices
if len(lattices) > 1:
# why not using pointless indexer jiffy??!
correct_lattice = sort_lattices(lattices)[0]
logger.info("Correct lattice asserted to be %s", correct_lattice)
# transfer this information back to the indexers
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
refiner = si.get_integrater().get_integrater_refiner()
sname = si.get_sweep_name()
state = refiner.set_refiner_asserted_lattice(correct_lattice)
if state == refiner.LATTICE_CORRECT:
logger.info(
"Lattice %s ok for sweep %s", correct_lattice, sname
)
elif state == refiner.LATTICE_IMPOSSIBLE:
raise RuntimeError(
f"Lattice {correct_lattice} impossible for {sname}"
)
elif state == refiner.LATTICE_POSSIBLE:
logger.info(
"Lattice %s assigned for sweep %s", correct_lattice, sname
)
need_to_return = True
# END OF if multiple-lattices
# SUMMARY - forced all lattices to be same and hope its okay.
# END OF if more than one epoch
# if one or more of them was not in the lowest lattice,
# need to return here to allow reprocessing
if need_to_return:
self.set_scaler_done(False)
self.set_scaler_prepare_done(False)
return
# ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ----------
# all should share the same pointgroup, unless twinned... in which
# case force them to be...
pointgroups = {}
reindex_ops = {}
probably_twinned = False
need_to_return = False
multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing
# START OF if multi-sweep and not input pg
if multi_sweep_indexing and not self._scalr_input_pointgroup:
pointless_hklins = []
max_batches = 0
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
batches = MtzUtils.batches_from_mtz(hklin)
if 1 + max(batches) - min(batches) > max_batches:
max_batches = max(batches) - min(batches) + 1
logger.debug("Biggest sweep has %d batches", max_batches)
max_batches = nifty_power_of_ten(max_batches)
counter = 0
refiners = []
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
integrater = si.get_integrater()
refiner = integrater.get_integrater_refiner()
refiners.append(refiner)
hklin = self._prepare_pointless_hklin(
hklin, si.get_integrater().get_phi_width()
)
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_%s_%s_prepointless.mtz"
% (pname, xname, dname, si.get_sweep_name()),
)
# we will want to delete this one exit
FileHandler.record_temporary_file(hklout)
first_batch = min(si.get_batches())
si.set_batch_offset(counter * max_batches - first_batch + 1)
rebatch(
hklin,
hklout,
first_batch=counter * max_batches + 1,
pname=pname,
xname=xname,
dname=dname,
)
pointless_hklins.append(hklout)
# update the counter & recycle
counter += 1
# FIXME related to xia2-51 - this looks very very similar to the logic
# in [A] above - is this duplicated logic?
s = self._factory.Sortmtz()
pointless_hklin = os.path.join(
self.get_working_directory(),
"%s_%s_prepointless_sorted.mtz"
% (self._scalr_pname, self._scalr_xname),
)
s.set_hklout(pointless_hklin)
for hklin in pointless_hklins:
s.add_hklin(hklin)
s.sort()
pointless_const = os.path.join(
self.get_working_directory(),
f"{self._scalr_pname}_{self._scalr_xname}_prepointless_const.mtz",
)
FileHandler.record_temporary_file(pointless_const)
aimless_const = self._factory.Aimless()
aimless_const.set_hklin(pointless_hklin)
aimless_const.set_hklout(pointless_const)
aimless_const.const()
pointless_const = os.path.join(
self.get_working_directory(),
"%s_%s_prepointless_const_unmerged.mtz"
% (self._scalr_pname, self._scalr_xname),
)
FileHandler.record_temporary_file(pointless_const)
pointless_hklin = pointless_const
pointgroup, reindex_op, ntr, pt = self._pointless_indexer_multisweep(
pointless_hklin, refiners
)
for epoch in self._sweep_handler.get_epochs():
pointgroups[epoch] = pointgroup
reindex_ops[epoch] = reindex_op
# SUMMARY ran pointless multisweep on combined mtz and made a dict
# of pointgroups and reindex_ops (all same)
# END OF if multi-sweep and not input pg
# START OF if not mulit-sweep or pg given
else:
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
integrater = si.get_integrater()
refiner = integrater.get_integrater_refiner()
if self._scalr_input_pointgroup:
logger.debug(
"Using input pointgroup: %s", self._scalr_input_pointgroup
)
pointgroup = self._scalr_input_pointgroup
reindex_op = "h,k,l"
pt = False
else:
pointless_hklin = self._prepare_pointless_hklin(
hklin, si.get_integrater().get_phi_width()
)
pointgroup, reindex_op, ntr, pt = self._pointless_indexer_jiffy(
pointless_hklin, refiner
)
logger.debug("X1698: %s: %s", pointgroup, reindex_op)
if ntr:
integrater.integrater_reset_reindex_operator()
need_to_return = True
if pt and not probably_twinned:
probably_twinned = True
logger.debug("Pointgroup: %s (%s)", pointgroup, reindex_op)
pointgroups[epoch] = pointgroup
reindex_ops[epoch] = reindex_op
# SUMMARY - for each sweep, run indexer jiffy and get reindex operators
# and pointgroups dictionaries (could be different between sweeps)
# END OF if not mulit-sweep or pg given
overall_pointgroup = None
pointgroup_set = {pointgroups[e] for e in pointgroups}
if len(pointgroup_set) > 1 and not probably_twinned:
raise RuntimeError(
"non uniform pointgroups: %s" % str(list(pointgroup_set))
)
if len(pointgroup_set) > 1:
logger.debug(
"Probably twinned, pointgroups: %s",
" ".join(p.replace(" ", "") for p in pointgroup_set),
)
numbers = (Syminfo.spacegroup_name_to_number(ps) for ps in pointgroup_set)
overall_pointgroup = Syminfo.spacegroup_number_to_name(min(numbers))
self._scalr_input_pointgroup = overall_pointgroup
logger.info("Twinning detected, assume pointgroup %s", overall_pointgroup)
need_to_return = True
else:
overall_pointgroup = pointgroup_set.pop()
# SUMMARY - Have handled if different pointgroups & chosen an overall_pointgroup
# which is the lowest symmetry
# Now go through sweeps and do reindexing
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
integrater = si.get_integrater()
integrater.set_integrater_spacegroup_number(
Syminfo.spacegroup_name_to_number(overall_pointgroup)
)
integrater.set_integrater_reindex_operator(
reindex_ops[epoch], reason="setting point group"
)
# This will give us the reflections in the correct point group
si.set_reflections(integrater.get_integrater_intensities())
if need_to_return:
self.set_scaler_done(False)
self.set_scaler_prepare_done(False)
return
# in here now optionally work through the data files which should be
# indexed with a consistent point group, and transform the orientation
# matrices by the lattice symmetry operations (if possible) to get a
# consistent definition of U matrix modulo fixed rotations
if PhilIndex.params.xia2.settings.unify_setting:
self.unify_setting()
if self.get_scaler_reference_reflection_file():
self._reference = self.get_scaler_reference_reflection_file()
logger.debug("Using HKLREF %s", self._reference)
elif PhilIndex.params.xia2.settings.scale.reference_reflection_file:
self._reference = (
PhilIndex.params.xia2.settings.scale.reference_reflection_file
)
logger.debug("Using HKLREF %s", self._reference)
params = PhilIndex.params
use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs
if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs:
self.brehm_diederichs_reindexing()
# If not Brehm-deidrichs, set reference as first sweep
elif len(self._sweep_handler.get_epochs()) > 1 and not self._reference:
first = self._sweep_handler.get_epochs()[0]
si = self._sweep_handler.get_sweep_information(first)
self._reference = si.get_reflections()
# Now reindex to be consistent with first dataset - run pointless on each
# dataset with reference
if self._reference:
md = self._factory.Mtzdump()
md.set_hklin(self._reference)
md.dump()
datasets = md.get_datasets()
# then get the unit cell, lattice etc.
reference_lattice = Syminfo.get_lattice(md.get_spacegroup())
reference_cell = md.get_dataset_info(datasets[0])["cell"]
# then compute the pointgroup from this...
# ---------- REINDEX TO CORRECT (REFERENCE) SETTING ----------
for epoch in self._sweep_handler.get_epochs():
# if we are working with unified UB matrix then this should not
# be a problem here (note, *if*; *should*)
# what about e.g. alternative P1 settings?
# see JIRA MXSW-904
if PhilIndex.params.xia2.settings.unify_setting:
continue
pl = self._factory.Pointless()
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
pl.set_hklin(
self._prepare_pointless_hklin(
hklin, si.get_integrater().get_phi_width()
)
)
hklout = os.path.join(
self.get_working_directory(),
"%s_rdx2.mtz" % os.path.split(hklin)[-1][:-4],
)
# we will want to delete this one exit
FileHandler.record_temporary_file(hklout)
# now set the initial reflection set as a reference...
pl.set_hklref(self._reference)
# https://github.com/xia2/xia2/issues/115 - should ideally iteratively
# construct a reference or a tree of correlations to ensure correct
# reference setting - however if small molecule assume has been
# multi-sweep-indexed so can ignore "fatal errors" - temporary hack
pl.decide_pointgroup(
ignore_errors=PhilIndex.params.xia2.settings.small_molecule
)
logger.debug("Reindexing analysis of %s", pl.get_hklin())
pointgroup = pl.get_pointgroup()
reindex_op = pl.get_reindex_operator()
logger.debug("Operator: %s", reindex_op)
# apply this...
integrater = si.get_integrater()
integrater.set_integrater_reindex_operator(
reindex_op, reason="match reference"
)
integrater.set_integrater_spacegroup_number(
Syminfo.spacegroup_name_to_number(pointgroup)
)
si.set_reflections(integrater.get_integrater_intensities())
md = self._factory.Mtzdump()
md.set_hklin(si.get_reflections())
md.dump()
datasets = md.get_datasets()
if len(datasets) > 1:
raise RuntimeError(
"more than one dataset in %s" % si.get_reflections()
)
# then get the unit cell, lattice etc.
lattice = Syminfo.get_lattice(md.get_spacegroup())
cell = md.get_dataset_info(datasets[0])["cell"]
if lattice != reference_lattice:
raise RuntimeError(
"lattices differ in %s and %s"
% (self._reference, si.get_reflections())
)
logger.debug("Cell: %.2f %.2f %.2f %.2f %.2f %.2f" % cell)
logger.debug("Ref: %.2f %.2f %.2f %.2f %.2f %.2f" % reference_cell)
for j in range(6):
if (
math.fabs((cell[j] - reference_cell[j]) / reference_cell[j])
> 0.1
):
raise RuntimeError(
"unit cell parameters differ in %s and %s"
% (self._reference, si.get_reflections())
)
# ---------- SORT TOGETHER DATA ----------
self._sort_together_data_ccp4()
self._scalr_resolution_limits = {}
# store central resolution limit estimates
batch_ranges = [
self._sweep_handler.get_sweep_information(epoch).get_batch_range()
for epoch in self._sweep_handler.get_epochs()
]
self._resolution_limit_estimates = ersatz_resolution(
self._prepared_reflections, batch_ranges
)
def _scale(self):
"Perform all of the operations required to deliver the scaled data."
epochs = self._sweep_handler.get_epochs()
sc = self._updated_aimless()
sc.set_hklin(self._prepared_reflections)
sc.set_chef_unmerged(True)
sc.set_new_scales_file("%s.scales" % self._scalr_xname)
user_resolution_limits = {}
for epoch in epochs:
si = self._sweep_handler.get_sweep_information(epoch)
pname, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
intgr = si.get_integrater()
if intgr.get_integrater_user_resolution():
dmin = intgr.get_integrater_high_resolution()
if (dname, sname) not in user_resolution_limits:
user_resolution_limits[(dname, sname)] = dmin
elif dmin < user_resolution_limits[(dname, sname)]:
user_resolution_limits[(dname, sname)] = dmin
start, end = si.get_batch_range()
if (dname, sname) in self._scalr_resolution_limits:
resolution, _ = self._scalr_resolution_limits[(dname, sname)]
sc.add_run(start, end, exclude=False, resolution=resolution, name=sname)
else:
sc.add_run(start, end, name=sname)
sc.set_hklout(
os.path.join(
self.get_working_directory(),
f"{self._scalr_pname}_{self._scalr_xname}_scaled_test.mtz",
)
)
if self.get_scaler_anomalous():
sc.set_anomalous()
# what follows, sucks
failover = PhilIndex.params.xia2.settings.failover
if failover:
try:
sc.scale()
except RuntimeError as e:
es = str(e)
if (
"bad batch" in es
or "negative scales run" in es
or "no observations" in es
):
# first ID the sweep from the batch no
batch = int(es.split()[-1])
epoch = self._identify_sweep_epoch(batch)
sweep = self._scalr_integraters[epoch].get_integrater_sweep()
# then remove it from my parent xcrystal
self.get_scaler_xcrystal().remove_sweep(sweep)
# then remove it from the scaler list of intergraters
# - this should really be a scaler interface method
del self._scalr_integraters[epoch]
# then tell the user what is happening
logger.info(
"Sweep %s gave negative scales - removing", sweep.get_name()
)
# then reset the prepare, do, finish flags
self.set_scaler_prepare_done(False)
self.set_scaler_done(False)
self.set_scaler_finish_done(False)
# and return
return
else:
raise e
else:
sc.scale()
# then gather up all of the resulting reflection files
# and convert them into the required formats (.sca, .mtz.)
loggraph = sc.parse_ccp4_loggraph()
resolution_info = {}
reflection_files = sc.get_scaled_reflection_files()
for dataset in reflection_files:
FileHandler.record_temporary_file(reflection_files[dataset])
for key in loggraph:
if "Analysis against resolution" in key:
dataset = key.split(",")[-1].strip()
resolution_info[dataset] = transpose_loggraph(loggraph[key])
# check in here that there is actually some data to scale..!
if not resolution_info:
raise RuntimeError("no resolution info")
highest_suggested_resolution = self.assess_resolution_limits(
sc.get_unmerged_reflection_file(), user_resolution_limits
)
if not self.get_scaler_done():
logger.debug("Returning as scaling not finished...")
return
batch_info = {}
for key in loggraph:
if "Analysis against Batch" in key:
dataset = key.split(",")[-1].strip()
batch_info[dataset] = transpose_loggraph(loggraph[key])
sc = self._updated_aimless()
FileHandler.record_log_file(
f"{self._scalr_pname} {self._scalr_xname} aimless", sc.get_log_file()
)
sc.set_hklin(self._prepared_reflections)
sc.set_new_scales_file("%s_final.scales" % self._scalr_xname)
for epoch in epochs:
si = self._sweep_handler.get_sweep_information(epoch)
pname, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
start, end = si.get_batch_range()
resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]
sc.add_run(
start, end, exclude=False, resolution=resolution_limit, name=xname
)
sc.set_hklout(
os.path.join(
self.get_working_directory(),
f"{self._scalr_pname}_{self._scalr_xname}_scaled.mtz",
)
)
if self.get_scaler_anomalous():
sc.set_anomalous()
sc.scale()
FileHandler.record_xml_file(
f"{self._scalr_pname} {self._scalr_xname} aimless", sc.get_xmlout()
)
data = sc.get_summary()
scales_file = sc.get_new_scales_file()
loggraph = sc.parse_ccp4_loggraph()
standard_deviation_info = {}
for key in loggraph:
if "standard deviation v. Intensity" in key:
dataset = key.split(",")[-1].strip()
standard_deviation_info[dataset] = transpose_loggraph(loggraph[key])
resolution_info = {}
for key in loggraph:
if "Analysis against resolution" in key:
dataset = key.split(",")[-1].strip()
resolution_info[dataset] = transpose_loggraph(loggraph[key])
batch_info = {}
for key in loggraph:
if "Analysis against Batch" in key:
dataset = key.split(",")[-1].strip()
batch_info[dataset] = transpose_loggraph(loggraph[key])
# finally put all of the results "somewhere useful"
self._scalr_statistics = data
self._scalr_scaled_refl_files = copy.deepcopy(sc.get_scaled_reflection_files())
sc = self._updated_aimless()
sc.set_hklin(self._prepared_reflections)
sc.set_scales_file(scales_file)
self._wavelengths_in_order = []
for epoch in epochs:
si = self._sweep_handler.get_sweep_information(epoch)
pname, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
start, end = si.get_batch_range()
resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]
sc.add_run(
start, end, exclude=False, resolution=resolution_limit, name=sname
)
if dname not in self._wavelengths_in_order:
self._wavelengths_in_order.append(dname)
sc.set_hklout(
os.path.join(
self.get_working_directory(),
f"{self._scalr_pname}_{self._scalr_xname}_scaled.mtz",
)
)
sc.set_scalepack()
if self.get_scaler_anomalous():
sc.set_anomalous()
sc.scale()
self._update_scaled_unit_cell()
self._scalr_scaled_reflection_files = {}
self._scalr_scaled_reflection_files["sca"] = {}
self._scalr_scaled_reflection_files["sca_unmerged"] = {}
self._scalr_scaled_reflection_files["mtz_unmerged"] = {}
for key in self._scalr_scaled_refl_files:
hklout = self._scalr_scaled_refl_files[key]
scaout = "%s.sca" % hklout[:-4]
self._scalr_scaled_reflection_files["sca"][key] = scaout
FileHandler.record_data_file(scaout)
scalepack = os.path.join(
os.path.split(hklout)[0],
os.path.split(hklout)[1]
.replace("_scaled", "_scaled_unmerged")
.replace(".mtz", ".sca"),
)
self._scalr_scaled_reflection_files["sca_unmerged"][key] = scalepack
FileHandler.record_data_file(scalepack)
mtz_unmerged = os.path.splitext(scalepack)[0] + ".mtz"
self._scalr_scaled_reflection_files["mtz_unmerged"][key] = mtz_unmerged
FileHandler.record_data_file(mtz_unmerged)
if self._scalr_cell_esd is not None:
# patch .mtz and overwrite unit cell information
import xia2.Modules.Scaler.tools as tools
override_cell = self._scalr_cell_dict.get(
f"{self._scalr_pname}_{self._scalr_xname}_{key}"
)[0]
tools.patch_mtz_unit_cell(mtz_unmerged, override_cell)
tools.patch_mtz_unit_cell(hklout, override_cell)
self._scalr_scaled_reflection_files["mtz_unmerged"][key] = mtz_unmerged
FileHandler.record_data_file(mtz_unmerged)
if PhilIndex.params.xia2.settings.merging_statistics.source == "cctbx":
for key in self._scalr_scaled_refl_files:
stats = self._compute_scaler_statistics(
self._scalr_scaled_reflection_files["mtz_unmerged"][key],
selected_band=(highest_suggested_resolution, None),
wave=key,
)
self._scalr_statistics[
(self._scalr_pname, self._scalr_xname, key)
] = stats
sc = self._updated_aimless()
sc.set_hklin(self._prepared_reflections)
sc.set_scales_file(scales_file)
self._wavelengths_in_order = []
for epoch in epochs:
si = self._sweep_handler.get_sweep_information(epoch)
pname, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
start, end = si.get_batch_range()
resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]
sc.add_run(
start, end, exclude=False, resolution=resolution_limit, name=sname
)
if dname not in self._wavelengths_in_order:
self._wavelengths_in_order.append(dname)
sc.set_hklout(
os.path.join(
self.get_working_directory(),
f"{self._scalr_pname}_{self._scalr_xname}_chef.mtz",
)
)
sc.set_chef_unmerged(True)
if self.get_scaler_anomalous():
sc.set_anomalous()
sc.scale()
if not PhilIndex.params.dials.fast_mode:
try:
self._generate_absorption_map(sc)
except Exception as e:
# Map generation may fail for number of reasons, eg. matplotlib borken
logger.debug("Could not generate absorption map (%s)", e)
def _generate_absorption_map(self, scaler):
output = scaler.get_all_output()
aimless = "AIMLESS, CCP4"
pattern = re.compile(" +#+ *CCP4.*#+")
for line in output:
if pattern.search(line):
aimless = re.sub(r"\s\s+", ", ", line.strip("\t\n #"))
break
coefficients = scrape_coefficients(log=output)
if coefficients:
absmap = evaluate_1degree(coefficients)
absmin, absmax = absmap.min(), absmap.max()
else:
absmin, absmax = 1.0, 1.0
block = CIF.get_block("xia2")
mmblock = mmCIF.get_block("xia2")
mmblock["_exptl.entry_id"] = "xia2"
mmblock["_exptl.method"] = "X-RAY DIFFRACTION"
block["_exptl_absorpt_correction_T_min"] = mmblock[
"_exptl.absorpt_correction_T_min"
] = (
absmin / absmax
) # = scaled
block["_exptl_absorpt_correction_T_max"] = mmblock[
"_exptl.absorpt_correction_T_max"
] = (
absmax / absmax
) # = 1
block["_exptl_absorpt_correction_type"] = mmblock[
"_exptl.absorpt_correction_type"
] = "empirical"
block["_exptl_absorpt_process_details"] = mmblock[
"_exptl.absorpt_process_details"
] = (
"""
%s
Scaling & analysis of unmerged intensities, absorption correction using spherical harmonics
"""
% aimless
)
log_directory = self._base_path / "LogFiles"
if absmax - absmin > 0.000001:
log_directory.mkdir(parents=True, exist_ok=True)
mapfile = log_directory / "absorption_surface.png"
generate_map(absmap, str(mapfile))
else:
logger.debug(
"Cannot create absorption surface: map is too flat (min: %f, max: %f)",
absmin,
absmax,
)
def _identify_sweep_epoch(self, batch):
"""Identify the sweep epoch a given batch came from - N.B.
this assumes that the data are rebatched, will raise an exception if
more than one candidate is present."""
epochs = []
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
if batch in si.get_batches():
epochs.append(epoch)
if len(epochs) > 1:
raise RuntimeError("batch %d found in multiple sweeps" % batch)
return epochs[0]
def _prepare_pointless_hklin(self, hklin, phi_width):
return _prepare_pointless_hklin(self.get_working_directory(), hklin, phi_width)
def get_batch_to_dose(self):
batch_to_dose = {}
epoch_to_dose = {}
for xsample in self.get_scaler_xcrystal()._samples.values():
epoch_to_dose.update(xsample.get_epoch_to_dose())
for e0 in self._sweep_handler._sweep_information:
si = self._sweep_handler._sweep_information[e0]
batch_offset = si.get_batch_offset()
printed = False
for b in range(si.get_batches()[0], si.get_batches()[1] + 1):
if epoch_to_dose:
# when handling Eiger data this table appears to be somewhat broken
# see https://github.com/xia2/xia2/issues/90 - proper fix should be
# to work out why the epochs are not set correctly in first place...
if si._image_to_epoch[b - batch_offset] in epoch_to_dose:
if not printed:
logger.debug("Epoch found; all good")
printed = True
batch_to_dose[b] = epoch_to_dose[
si._image_to_epoch[b - batch_offset]
]
else:
if not printed:
logger.debug("Epoch not found; using offset %f", e0)
printed = True
batch_to_dose[b] = epoch_to_dose[
si._image_to_epoch[b - batch_offset] - e0
]
else:
# backwards compatibility 2015-12-11
batch_to_dose[b] = b
return batch_to_dose
def get_UBlattsymm_from_sweep_info(self, sweep_info):
"""Return U, B, lattice symmetry from the data (i.e. mtz file)."""
return get_umat_bmat_lattice_symmetry_from_mtz(sweep_info.get_reflections())
def apply_reindex_operator_to_sweep_info(self, sweep_info, reindex_op, reason):
"""Apply the reindex operator to the data.
Delegate to the integrater reindex operator method."""
intgr = sweep_info.get_integrater()
intgr.set_integrater_reindex_operator(reindex_op, reason=reason)
sweep_info.set_reflections(intgr.get_integrater_intensities())
def get_mtz_data_from_sweep_info(self, sweep_info):
"""Get the data in mtz form.
Trivial for CCP4ScalerA, as always use the integrator to
generate a new mtz when reindexing, so just return this."""
return sweep_info.get_reflections()
|
xia2/xia2
|
src/xia2/Modules/Scaler/CCP4ScalerA.py
|
Python
|
bsd-3-clause
| 45,928
|
[
"CRYSTAL"
] |
3da192c102ba91391fa308eea8b510fc5badc15e0b022c0385bcd1ff74baf4b8
|
#!/usr/bin/env python
"""script to concatenate the dirac.cfg file's Systems sections with the content of the ConfigTemplate.cfg files."""
import sys
from diracdoctools.cmd.concatcfg import run
from diracdoctools.Config import CLParser
sys.exit(run(**(CLParser().optionDict())))
|
fstagni/DIRAC
|
docs/diracdoctools/scripts/dirac-docs-concatenate-diraccfg.py
|
Python
|
gpl-3.0
| 279
|
[
"DIRAC"
] |
04a0cb20d7df79ac13749191a58ebbf898430fdcc2eb4245572b3be5dca3168f
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAdsplit(RPackage):
"""This package implements clustering of microarray gene expression
profiles according to functional annotations. For each term genes
are annotated to, splits into two subclasses are computed and a
significance of the supporting gene set is determined."""
homepage = "https://www.bioconductor.org/packages/adSplit/"
url = "https://git.bioconductor.org/packages/adSplit"
version('1.46.0', git='https://git.bioconductor.org/packages/adSplit', commit='7e81a83f34d371447f491b3a146bf6851e260c7c')
depends_on('r@3.4.0:3.4.9', when='@1.46.0')
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-cluster', type=('build', 'run'))
depends_on('r-go-db', type=('build', 'run'))
depends_on('r-kegg-db', type=('build', 'run'))
depends_on('r-multtest', type=('build', 'run'))
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-adsplit/package.py
|
Python
|
lgpl-2.1
| 2,170
|
[
"Bioconductor"
] |
75d40917b18fc885833e816130b4b7a0b8b964605fc572c5213d17d17a570860
|
#! /usr/bin/python
######
# Copyright 2007-2009 Sun Microsystems, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
#
# This code is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2
# only, as published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License version 2 for more details (a copy is
# included in the LICENSE file that accompanied this code).
#
# You should have received a copy of the GNU General Public License
# version 2 along with this work; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# Please contact Sun Microsystems, Inc., 16 Network Circle, Menlo
# Park, CA 94025 or visit www.sun.com if you need additional
# information or have any questions.
#####
import os
import pyaura.bridge as B
import pyaura.timestats as TS
import cPickle as C
def do_dump(regHost, output_folder):
aB = B.AuraBridge(regHost=regHost)
tS = TS.TimeStats(100000, 500)
cnt = 0
idx = 0
tag_dump = []
for artist in aB.get_all_iterator("ARTIST"):
tag_map = {}
for tag in aB.mdb.get_tags(artist):
tag_map[tag.name] = tag.count
tag_dump.append( (artist.getName(), artist.getKey(), tag_map) )
cnt += 1
tS.next()
if cnt > 20000:
C.dump(tag_dump, open(os.path.join(output_folder, "all-artist-dump-%d.dump" % idx), "w"))
cnt = 0
tag_dump = []
idx += 1
# do final dump when we're done
C.dump(tag_dump, open(os.path.join(output_folder, "all-artist-dump-%d.dump" % idx), "w"))
|
SunLabsAST/AURA
|
Bridge/pyaura/var/dump_artists.py
|
Python
|
gpl-2.0
| 1,933
|
[
"VisIt"
] |
3f1c35ee271495b06c22d67f710df1019880d99f52ca34ebee359d9bfecf8145
|
"""Metrics to assess performance on classification task given class prediction.
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better.
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# Bernardo Stein <bernardovstein@gmail.com>
# Shangwu Yao <shangwuyao@gmail.com>
# Michal Karbownik <michakarbownik@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
from ._base import _check_pos_label_consistency
def _check_zero_division(zero_division):
if isinstance(zero_division, str) and zero_division == "warn":
return
elif isinstance(zero_division, (int, float)) and zero_division in [0, 1]:
return
raise ValueError(
'Got zero_division={0}. Must be one of ["warn", 0, 1]'.format(zero_division)
)
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task.
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``.
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = {type_true, type_pred}
if y_type == {"binary", "multiclass"}:
y_type = {"multiclass"}
if len(y_type) > 1:
raise ValueError(
"Classification metrics can't handle a mix of {0} and {1} targets".format(
type_true, type_pred
)
)
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if y_type not in ["binary", "multiclass", "multilabel-indicator"]:
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type == "binary":
try:
unique_values = np.union1d(y_true, y_pred)
except TypeError as e:
# We expect y_true and y_pred to be of the same data type.
# If `y_true` was provided to the classifier as strings,
# `y_pred` given by the classifier will also be encoded with
# strings. So we raise a meaningful error
raise TypeError(
"Labels in y_true and y_pred should be of the same type. "
f"Got y_true={np.unique(y_true)} and "
f"y_pred={np.unique(y_pred)}. Make sure that the "
"predictions provided by the classifier coincides with "
"the true labels."
) from e
if len(unique_values) > 2:
y_type = "multiclass"
if y_type.startswith("multilabel"):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = "multilabel-indicator"
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, *, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, default=True
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the fraction of correctly
classified samples (float), else returns the number of correctly
classified samples (int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See Also
--------
jaccard_score, hamming_loss, zero_one_loss
Notes
-----
In binary classification, this function is equal to the `jaccard_score`
function.
Examples
--------
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type.startswith("multilabel"):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(
y_true, y_pred, *, labels=None, sample_weight=None, normalize=None
):
"""Compute confusion matrix to evaluate the accuracy of a classification.
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` and
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated targets as returned by a classifier.
labels : array-like of shape (n_classes), default=None
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If ``None`` is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.18
normalize : {'true', 'pred', 'all'}, default=None
Normalizes confusion matrix over the true (rows), predicted (columns)
conditions or all the population. If None, confusion matrix will not be
normalized.
Returns
-------
C : ndarray of shape (n_classes, n_classes)
Confusion matrix whose i-th row and j-th
column entry indicates the number of
samples with true label being i-th class
and predicted label being j-th class.
See Also
--------
ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix
given an estimator, the data, and the label.
ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix
given the true and predicted labels.
ConfusionMatrixDisplay : Confusion Matrix visualization.
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
(Wikipedia and other references may use a different
convention for axes).
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
In the binary case, we can extract true positives, etc as follows:
>>> tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel()
>>> (tn, fp, fn, tp)
(0, 2, 1, 1)
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
if n_labels == 0:
raise ValueError("'labels' should contains at least one label.")
elif y_true.size == 0:
return np.zeros((n_labels, n_labels), dtype=int)
elif len(np.intersect1d(y_true, labels)) == 0:
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int64)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
if normalize not in ["true", "pred", "all", None]:
raise ValueError("normalize must be one of {'true', 'pred', 'all', None}")
n_labels = labels.size
# If labels are not consecutive integers starting from zero, then
# y_true and y_pred must be converted into index form
need_index_conversion = not (
labels.dtype.kind in {"i", "u", "b"}
and np.all(labels == np.arange(n_labels))
and y_true.min() >= 0
and y_pred.min() >= 0
)
if need_index_conversion:
label_to_ind = {y: x for x, y in enumerate(labels)}
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
if not np.all(ind):
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
# Choose the accumulator dtype to always have high precision
if sample_weight.dtype.kind in {"i", "u", "b"}:
dtype = np.int64
else:
dtype = np.float64
cm = coo_matrix(
(sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels),
dtype=dtype,
).toarray()
with np.errstate(all="ignore"):
if normalize == "true":
cm = cm / cm.sum(axis=1, keepdims=True)
elif normalize == "pred":
cm = cm / cm.sum(axis=0, keepdims=True)
elif normalize == "all":
cm = cm / cm.sum()
cm = np.nan_to_num(cm)
return cm
def multilabel_confusion_matrix(
y_true, y_pred, *, sample_weight=None, labels=None, samplewise=False
):
"""Compute a confusion matrix for each class or sample.
.. versionadded:: 0.21
Compute class-wise (default) or sample-wise (samplewise=True) multilabel
confusion matrix to evaluate the accuracy of a classification, and output
confusion matrices for each class or sample.
In multilabel confusion matrix :math:`MCM`, the count of true negatives
is :math:`MCM_{:,0,0}`, false negatives is :math:`MCM_{:,1,0}`,
true positives is :math:`MCM_{:,1,1}` and false positives is
:math:`MCM_{:,0,1}`.
Multiclass data will be treated as if binarized under a one-vs-rest
transformation. Returned confusion matrices will be in the order of
sorted unique labels in the union of (y_true, y_pred).
Read more in the :ref:`User Guide <multilabel_confusion_matrix>`.
Parameters
----------
y_true : {array-like, sparse matrix} of shape (n_samples, n_outputs) or \
(n_samples,)
Ground truth (correct) target values.
y_pred : {array-like, sparse matrix} of shape (n_samples, n_outputs) or \
(n_samples,)
Estimated targets as returned by a classifier.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like of shape (n_classes,), default=None
A list of classes or column indices to select some (or to force
inclusion of classes absent from the data).
samplewise : bool, default=False
In the multilabel case, this calculates a confusion matrix per sample.
Returns
-------
multi_confusion : ndarray of shape (n_outputs, 2, 2)
A 2x2 confusion matrix corresponding to each output in the input.
When calculating class-wise multi_confusion (default), then
n_outputs = n_labels; when calculating sample-wise multi_confusion
(samplewise=True), n_outputs = n_samples. If ``labels`` is defined,
the results will be returned in the order specified in ``labels``,
otherwise the results will be returned in sorted order by default.
See Also
--------
confusion_matrix : Compute confusion matrix to evaluate the accuracy of a
classifier.
Notes
-----
The `multilabel_confusion_matrix` calculates class-wise or sample-wise
multilabel confusion matrices, and in multiclass tasks, labels are
binarized under a one-vs-rest way; while
:func:`~sklearn.metrics.confusion_matrix` calculates one confusion matrix
for confusion between every two classes.
Examples
--------
Multilabel-indicator case:
>>> import numpy as np
>>> from sklearn.metrics import multilabel_confusion_matrix
>>> y_true = np.array([[1, 0, 1],
... [0, 1, 0]])
>>> y_pred = np.array([[1, 0, 0],
... [0, 1, 1]])
>>> multilabel_confusion_matrix(y_true, y_pred)
array([[[1, 0],
[0, 1]],
<BLANKLINE>
[[1, 0],
[0, 1]],
<BLANKLINE>
[[0, 1],
[1, 0]]])
Multiclass case:
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> multilabel_confusion_matrix(y_true, y_pred,
... labels=["ant", "bird", "cat"])
array([[[3, 1],
[0, 2]],
<BLANKLINE>
[[5, 0],
[1, 0]],
<BLANKLINE>
[[2, 1],
[1, 2]]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type not in ("binary", "multiclass", "multilabel-indicator"):
raise ValueError("%s is not supported" % y_type)
present_labels = unique_labels(y_true, y_pred)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack(
[labels, np.setdiff1d(present_labels, labels, assume_unique=True)]
)
if y_true.ndim == 1:
if samplewise:
raise ValueError(
"Samplewise metrics are not available outside of "
"multilabel classification."
)
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = np.bincount(
tp_bins, weights=tp_bins_weights, minlength=len(labels)
)
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = np.bincount(y_pred, weights=sample_weight, minlength=len(labels))
if len(y_true):
true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
else:
sum_axis = 1 if samplewise else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.array_equal(labels, present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError(
"All labels must be in [0, n labels) for "
"multilabel targets. "
"Got %d > %d" % (np.max(labels), np.max(present_labels))
)
if np.min(labels) < 0:
raise ValueError(
"All labels must be in [0, n labels) for "
"multilabel targets. "
"Got %d < 0"
% np.min(labels)
)
if n_labels is not None:
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(
true_and_pred, axis=sum_axis, sample_weight=sample_weight
)
pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight)
fp = pred_sum - tp_sum
fn = true_sum - tp_sum
tp = tp_sum
if sample_weight is not None and samplewise:
sample_weight = np.array(sample_weight)
tp = np.array(tp)
fp = np.array(fp)
fn = np.array(fn)
tn = sample_weight * y_true.shape[1] - tp - fp - fn
elif sample_weight is not None:
tn = sum(sample_weight) - tp - fp - fn
elif samplewise:
tn = y_true.shape[1] - tp - fp - fn
else:
tn = y_true.shape[0] - tp - fp - fn
return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)
def cohen_kappa_score(y1, y2, *, labels=None, weights=None, sample_weight=None):
r"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array of shape (n_samples,)
Labels assigned by the first annotator.
y2 : array of shape (n_samples,)
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array-like of shape (n_classes,), default=None
List of labels to index the matrix. This may be used to select a
subset of labels. If `None`, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : {'linear', 'quadratic'}, default=None
Weighting type to calculate the score. `None` means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596
<https://www.mitpressjournals.org/doi/pdf/10.1162/coli.07-034-R2>`_.
.. [3] `Wikipedia entry for the Cohen's kappa
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_.
"""
confusion = confusion_matrix(y1, y2, labels=labels, sample_weight=sample_weight)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def jaccard_score(
y_true,
y_pred,
*,
labels=None,
pos_label=1,
average="binary",
sample_weight=None,
zero_division="warn",
):
"""Jaccard similarity coefficient score.
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array-like of shape (n_classes,), default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'micro', 'macro', 'samples', 'weighted', \
'binary'} or None, default='binary'
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", {0.0, 1.0}, default="warn"
Sets the value to return when there is a zero division, i.e. when there
there are no negative values in predictions and labels. If set to
"warn", this acts like 0, but a warning is also raised.
Returns
-------
score : float (if average is not None) or array of floats, shape =\
[n_unique_labels]
See Also
--------
accuracy_score, f1_score, multilabel_confusion_matrix
Notes
-----
:func:`jaccard_score` may be a poor metric if there are no
positives for some samples or classes. Jaccard is undefined if there are
no true or predicted labels, and our implementation will return a score
of 0 with a warning.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_score
>>> y_true = np.array([[0, 1, 1],
... [1, 1, 0]])
>>> y_pred = np.array([[1, 1, 1],
... [1, 0, 0]])
In the binary case:
>>> jaccard_score(y_true[0], y_pred[0])
0.6666...
In the multilabel case:
>>> jaccard_score(y_true, y_pred, average='samples')
0.5833...
>>> jaccard_score(y_true, y_pred, average='macro')
0.6666...
>>> jaccard_score(y_true, y_pred, average=None)
array([0.5, 0.5, 1. ])
In the multiclass case:
>>> y_pred = [0, 2, 1, 2]
>>> y_true = [0, 1, 2, 2]
>>> jaccard_score(y_true, y_pred, average=None)
array([1. , 0. , 0.33...])
"""
labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)
samplewise = average == "samples"
MCM = multilabel_confusion_matrix(
y_true,
y_pred,
sample_weight=sample_weight,
labels=labels,
samplewise=samplewise,
)
numerator = MCM[:, 1, 1]
denominator = MCM[:, 1, 1] + MCM[:, 0, 1] + MCM[:, 1, 0]
if average == "micro":
numerator = np.array([numerator.sum()])
denominator = np.array([denominator.sum()])
jaccard = _prf_divide(
numerator,
denominator,
"jaccard",
"true or predicted",
average,
("jaccard",),
zero_division=zero_division,
)
if average is None:
return jaccard
if average == "weighted":
weights = MCM[:, 1, 0] + MCM[:, 1, 1]
if not np.any(weights):
# numerator is 0, and warning should have already been issued
weights = None
elif average == "samples" and sample_weight is not None:
weights = sample_weight
else:
weights = None
return np.average(jaccard, weights=weights)
def matthews_corrcoef(y_true, y_pred, *, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC).
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Binary and multiclass labels are supported. Only in the binary case does
this relate to information about true and false positives and negatives.
See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.18
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<https://doi.org/10.1093/bioinformatics/16.5.412>`_.
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_.
.. [3] `Gorodkin, (2004). Comparing two K-category assignments by a
K-category correlation coefficient
<https://www.sciencedirect.com/science/article/pii/S1476927104000799>`_.
.. [4] `Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC and CEN
Error Measures in MultiClass Prediction
<https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0041882>`_.
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred)
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type not in {"binary", "multiclass"}:
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
t_sum = C.sum(axis=1, dtype=np.float64)
p_sum = C.sum(axis=0, dtype=np.float64)
n_correct = np.trace(C, dtype=np.float64)
n_samples = p_sum.sum()
cov_ytyp = n_correct * n_samples - np.dot(t_sum, p_sum)
cov_ypyp = n_samples ** 2 - np.dot(p_sum, p_sum)
cov_ytyt = n_samples ** 2 - np.dot(t_sum, t_sum)
if cov_ypyp * cov_ytyt == 0:
return 0.0
else:
return cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)
def zero_one_loss(y_true, y_pred, *, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, default=True
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See Also
--------
accuracy_score, hamming_loss, jaccard_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(
y_true, y_pred, normalize=normalize, sample_weight=sample_weight
)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(
y_true,
y_pred,
*,
labels=None,
pos_label=1,
average="binary",
sample_weight=None,
zero_division="warn",
):
"""Compute the F1 score, also known as balanced F-score or F-measure.
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the average of
the F1 score of each class with weighting depending on the ``average``
parameter.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
Parameter `labels` improved for multiclass problem.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'micro', 'macro', 'samples','weighted', 'binary'} or None, \
default='binary'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division, i.e. when all
predictions and labels are negative. If set to "warn", this acts as 0,
but warnings are also raised.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
See Also
--------
fbeta_score, precision_recall_fscore_support, jaccard_score,
multilabel_confusion_matrix
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_.
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro')
0.26...
>>> f1_score(y_true, y_pred, average='micro')
0.33...
>>> f1_score(y_true, y_pred, average='weighted')
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([0.8, 0. , 0. ])
>>> y_true = [0, 0, 0, 0, 0, 0]
>>> y_pred = [0, 0, 0, 0, 0, 0]
>>> f1_score(y_true, y_pred, zero_division=1)
1.0...
>>> # multilabel classification
>>> y_true = [[0, 0, 0], [1, 1, 1], [0, 1, 1]]
>>> y_pred = [[0, 0, 0], [1, 1, 1], [1, 1, 0]]
>>> f1_score(y_true, y_pred, average=None)
array([0.66666667, 1. , 0.66666667])
Notes
-----
When ``true positive + false positive == 0``, precision is undefined.
When ``true positive + false negative == 0``, recall is undefined.
In such cases, by default the metric will be set to 0, as will f-score,
and ``UndefinedMetricWarning`` will be raised. This behavior can be
modified with ``zero_division``.
"""
return fbeta_score(
y_true,
y_pred,
beta=1,
labels=labels,
pos_label=pos_label,
average=average,
sample_weight=sample_weight,
zero_division=zero_division,
)
def fbeta_score(
y_true,
y_pred,
*,
beta,
labels=None,
pos_label=1,
average="binary",
sample_weight=None,
zero_division="warn",
):
"""Compute the F-beta score.
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of recall in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> +inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float
Determines the weight of recall in the combined score.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
Parameter `labels` improved for multiclass problem.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None \
default='binary'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division, i.e. when all
predictions and labels are negative. If set to "warn", this acts as 0,
but warnings are also raised.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
See Also
--------
precision_recall_fscore_support, multilabel_confusion_matrix
Notes
-----
When ``true positive + false positive == 0`` or
``true positive + false negative == 0``, f-score returns 0 and raises
``UndefinedMetricWarning``. This behavior can be
modified with ``zero_division``.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_.
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
array([0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(
y_true,
y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=("f-score",),
sample_weight=sample_weight,
zero_division=zero_division,
)
return f
def _prf_divide(
numerator, denominator, metric, modifier, average, warn_for, zero_division="warn"
):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements equal to
0 or 1 (according to ``zero_division``). Plus, if
``zero_division != "warn"`` raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
mask = denominator == 0.0
denominator = denominator.copy()
denominator[mask] = 1 # avoid infs/nans
result = numerator / denominator
if not np.any(mask):
return result
# if ``zero_division=1``, set those with denominator == 0 equal to 1
result[mask] = 0.0 if zero_division in ["warn", 0] else 1.0
# the user will be removing warnings if zero_division is set to something
# different than its default value. If we are computing only f-score
# the warning will be raised only if precision and recall are ill-defined
if zero_division != "warn" or metric not in warn_for:
return result
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples. Use ``zero_division`` parameter to
# control this behavior."
if metric in warn_for and "f-score" in warn_for:
msg_start = "{0} and F-score are".format(metric.title())
elif metric in warn_for:
msg_start = "{0} is".format(metric.title())
elif "f-score" in warn_for:
msg_start = "F-score is"
else:
return result
_warn_prf(average, modifier, msg_start, len(result))
return result
def _warn_prf(average, modifier, msg_start, result_size):
axis0, axis1 = "sample", "label"
if average == "samples":
axis0, axis1 = axis1, axis0
msg = (
"{0} ill-defined and being set to 0.0 {{0}} "
"no {1} {2}s. Use `zero_division` parameter to control"
" this behavior.".format(msg_start, modifier, axis0)
)
if result_size == 1:
msg = msg.format("due to")
else:
msg = msg.format("in {0}s with".format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
def _check_set_wise_labels(y_true, y_pred, average, labels, pos_label):
"""Validation associated with set-wise metrics.
Returns identified labels.
"""
average_options = (None, "micro", "macro", "weighted", "samples")
if average not in average_options and average != "binary":
raise ValueError("average has to be one of " + str(average_options))
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
# Convert to Python primitive type to avoid NumPy type / Python str
# comparison. See https://github.com/numpy/numpy/issues/6784
present_labels = unique_labels(y_true, y_pred).tolist()
if average == "binary":
if y_type == "binary":
if pos_label not in present_labels:
if len(present_labels) >= 2:
raise ValueError(
f"pos_label={pos_label} is not a valid label. It "
f"should be one of {present_labels}"
)
labels = [pos_label]
else:
average_options = list(average_options)
if y_type == "multiclass":
average_options.remove("samples")
raise ValueError(
"Target is %s but average='binary'. Please "
"choose another average setting, one of %r." % (y_type, average_options)
)
elif pos_label not in (None, 1):
warnings.warn(
"Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average),
UserWarning,
)
return labels
def precision_recall_fscore_support(
y_true,
y_pred,
*,
beta=1.0,
labels=None,
pos_label=1,
average=None,
warn_for=("precision", "recall", "f-score"),
sample_weight=None,
zero_division="warn",
):
"""Compute precision, recall, F-measure and support for each class.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, default=1.0
The strength of recall versus precision in the F-score.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'binary', 'micro', 'macro', 'samples','weighted'}, \
default=None
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division:
- recall: when there are no positive labels
- precision: when there are no positive predictions
- f-score: both
If set to "warn", this acts as 0, but warnings are also raised.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall : float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
support : None (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
Notes
-----
When ``true positive + false positive == 0``, precision is undefined.
When ``true positive + false negative == 0``, recall is undefined.
In such cases, by default the metric will be set to 0, as will f-score,
and ``UndefinedMetricWarning`` will be raised. This behavior can be
modified with ``zero_division``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_.
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
(array([0. , 0. , 0.66...]),
array([0., 0., 1.]), array([0. , 0. , 0.8]),
array([2, 2, 2]))
"""
_check_zero_division(zero_division)
if beta < 0:
raise ValueError("beta should be >=0 in the F-beta score")
labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)
# Calculate tp_sum, pred_sum, true_sum ###
samplewise = average == "samples"
MCM = multilabel_confusion_matrix(
y_true,
y_pred,
sample_weight=sample_weight,
labels=labels,
samplewise=samplewise,
)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == "micro":
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
# Divide, and on zero-division, set scores and/or warn according to
# zero_division:
precision = _prf_divide(
tp_sum, pred_sum, "precision", "predicted", average, warn_for, zero_division
)
recall = _prf_divide(
tp_sum, true_sum, "recall", "true", average, warn_for, zero_division
)
# warn for f-score only if zero_division is warn, it is in warn_for
# and BOTH prec and rec are ill-defined
if zero_division == "warn" and ("f-score",) == warn_for:
if (pred_sum[true_sum == 0] == 0).any():
_warn_prf(average, "true nor predicted", "F-score is", len(true_sum))
# if tp == 0 F will be 1 only if all predictions are zero, all labels are
# zero, and zero_division=1. In all other case, 0
if np.isposinf(beta):
f_score = recall
else:
denom = beta2 * precision + recall
denom[denom == 0.0] = 1 # avoid division by 0
f_score = (1 + beta2) * precision * recall / denom
# Average the results
if average == "weighted":
weights = true_sum
if weights.sum() == 0:
zero_division_value = np.float64(1.0)
if zero_division in ["warn", 0]:
zero_division_value = np.float64(0.0)
# precision is zero_division if there are no positive predictions
# recall is zero_division if there are no positive labels
# fscore is zero_division if all labels AND predictions are
# negative
if pred_sum.sum() == 0:
return (
zero_division_value,
zero_division_value,
zero_division_value,
None,
)
else:
return (np.float64(0.0), zero_division_value, np.float64(0.0), None)
elif average == "samples":
weights = sample_weight
else:
weights = None
if average is not None:
assert average != "binary" or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(
y_true,
y_pred,
*,
labels=None,
pos_label=1,
average="binary",
sample_weight=None,
zero_division="warn",
):
"""Compute the precision.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
Parameter `labels` improved for multiclass problem.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None \
default='binary'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
Returns
-------
precision : float (if average is not None) or array of float of shape
(n_unique_labels,)
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
See Also
--------
precision_recall_fscore_support, multilabel_confusion_matrix
Notes
-----
When ``true positive + false positive == 0``, precision returns 0 and
raises ``UndefinedMetricWarning``. This behavior can be
modified with ``zero_division``.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro')
0.22...
>>> precision_score(y_true, y_pred, average='micro')
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
0.22...
>>> precision_score(y_true, y_pred, average=None)
array([0.66..., 0. , 0. ])
>>> y_pred = [0, 0, 0, 0, 0, 0]
>>> precision_score(y_true, y_pred, average=None)
array([0.33..., 0. , 0. ])
>>> precision_score(y_true, y_pred, average=None, zero_division=1)
array([0.33..., 1. , 1. ])
>>> # multilabel classification
>>> y_true = [[0, 0, 0], [1, 1, 1], [0, 1, 1]]
>>> y_pred = [[0, 0, 0], [1, 1, 1], [1, 1, 0]]
>>> precision_score(y_true, y_pred, average=None)
array([0.5, 1. , 1. ])
"""
p, _, _, _ = precision_recall_fscore_support(
y_true,
y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=("precision",),
sample_weight=sample_weight,
zero_division=zero_division,
)
return p
def recall_score(
y_true,
y_pred,
*,
labels=None,
pos_label=1,
average="binary",
sample_weight=None,
zero_division="warn",
):
"""Compute the recall.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
Parameter `labels` improved for multiclass problem.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None \
default='binary'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall. Weighted recall
is equal to accuracy.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
Returns
-------
recall : float (if average is not None) or array of float of shape
(n_unique_labels,)
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
See Also
--------
precision_recall_fscore_support, balanced_accuracy_score,
multilabel_confusion_matrix
Notes
-----
When ``true positive + false negative == 0``, recall returns 0 and raises
``UndefinedMetricWarning``. This behavior can be modified with
``zero_division``.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro')
0.33...
>>> recall_score(y_true, y_pred, average='micro')
0.33...
>>> recall_score(y_true, y_pred, average='weighted')
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([1., 0., 0.])
>>> y_true = [0, 0, 0, 0, 0, 0]
>>> recall_score(y_true, y_pred, average=None)
array([0.5, 0. , 0. ])
>>> recall_score(y_true, y_pred, average=None, zero_division=1)
array([0.5, 1. , 1. ])
>>> # multilabel classification
>>> y_true = [[0, 0, 0], [1, 1, 1], [0, 1, 1]]
>>> y_pred = [[0, 0, 0], [1, 1, 1], [1, 1, 0]]
>>> recall_score(y_true, y_pred, average=None)
array([1. , 1. , 0.5])
"""
_, r, _, _ = precision_recall_fscore_support(
y_true,
y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=("recall",),
sample_weight=sample_weight,
zero_division=zero_division,
)
return r
def balanced_accuracy_score(y_true, y_pred, *, sample_weight=None, adjusted=False):
"""Compute the balanced accuracy.
The balanced accuracy in binary and multiclass classification problems to
deal with imbalanced datasets. It is defined as the average of recall
obtained on each class.
The best value is 1 and the worst value is 0 when ``adjusted=False``.
Read more in the :ref:`User Guide <balanced_accuracy_score>`.
.. versionadded:: 0.20
Parameters
----------
y_true : 1d array-like
Ground truth (correct) target values.
y_pred : 1d array-like
Estimated targets as returned by a classifier.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
adjusted : bool, default=False
When true, the result is adjusted for chance, so that random
performance would score 0, while keeping perfect performance at a score
of 1.
Returns
-------
balanced_accuracy : float
See Also
--------
recall_score, roc_auc_score
Notes
-----
Some literature promotes alternative definitions of balanced accuracy. Our
definition is equivalent to :func:`accuracy_score` with class-balanced
sample weights, and shares desirable properties with the binary case.
See the :ref:`User Guide <balanced_accuracy_score>`.
References
----------
.. [1] Brodersen, K.H.; Ong, C.S.; Stephan, K.E.; Buhmann, J.M. (2010).
The balanced accuracy and its posterior distribution.
Proceedings of the 20th International Conference on Pattern
Recognition, 3121-24.
.. [2] John. D. Kelleher, Brian Mac Namee, Aoife D'Arcy, (2015).
`Fundamentals of Machine Learning for Predictive Data Analytics:
Algorithms, Worked Examples, and Case Studies
<https://mitpress.mit.edu/books/fundamentals-machine-learning-predictive-data-analytics>`_.
Examples
--------
>>> from sklearn.metrics import balanced_accuracy_score
>>> y_true = [0, 1, 0, 0, 1, 0]
>>> y_pred = [0, 1, 0, 0, 0, 1]
>>> balanced_accuracy_score(y_true, y_pred)
0.625
"""
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
with np.errstate(divide="ignore", invalid="ignore"):
per_class = np.diag(C) / C.sum(axis=1)
if np.any(np.isnan(per_class)):
warnings.warn("y_pred contains classes not in y_true")
per_class = per_class[~np.isnan(per_class)]
score = np.mean(per_class)
if adjusted:
n_classes = len(per_class)
chance = 1 / n_classes
score -= chance
score /= 1 - chance
return score
def classification_report(
y_true,
y_pred,
*,
labels=None,
target_names=None,
sample_weight=None,
digits=2,
output_dict=False,
zero_division="warn",
):
"""Build a text report showing the main classification metrics.
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like of shape (n_labels,), default=None
Optional list of label indices to include in the report.
target_names : list of str of shape (n_labels,), default=None
Optional display names matching the labels (same order).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
digits : int, default=2
Number of digits for formatting output floating point values.
When ``output_dict`` is ``True``, this will be ignored and the
returned values will not be rounded.
output_dict : bool, default=False
If True, return output as dict.
.. versionadded:: 0.20
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
Returns
-------
report : string / dict
Text summary of the precision, recall, F1 score for each class.
Dictionary returned if output_dict is True. Dictionary has the
following structure::
{'label 1': {'precision':0.5,
'recall':1.0,
'f1-score':0.67,
'support':1},
'label 2': { ... },
...
}
The reported averages include macro average (averaging the unweighted
mean per label), weighted average (averaging the support-weighted mean
per label), and sample average (only for multilabel classification).
Micro average (averaging the total true positives, false negatives and
false positives) is only shown for multi-label or multi-class
with a subset of classes, because it corresponds to accuracy
otherwise and would be the same for all metrics.
See also :func:`precision_recall_fscore_support` for more details
on averages.
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
See Also
--------
precision_recall_fscore_support, confusion_matrix,
multilabel_confusion_matrix
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
accuracy 0.60 5
macro avg 0.50 0.56 0.49 5
weighted avg 0.70 0.60 0.61 5
<BLANKLINE>
>>> y_pred = [1, 1, 0]
>>> y_true = [1, 1, 1]
>>> print(classification_report(y_true, y_pred, labels=[1, 2, 3]))
precision recall f1-score support
<BLANKLINE>
1 1.00 0.67 0.80 3
2 0.00 0.00 0.00 0
3 0.00 0.00 0.00 0
<BLANKLINE>
micro avg 1.00 0.67 0.80 3
macro avg 0.33 0.22 0.27 3
weighted avg 1.00 0.67 0.80 3
<BLANKLINE>
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
labels_given = False
else:
labels = np.asarray(labels)
labels_given = True
# labelled micro average
micro_is_accuracy = (y_type == "multiclass" or y_type == "binary") and (
not labels_given or (set(labels) == set(unique_labels(y_true, y_pred)))
)
if target_names is not None and len(labels) != len(target_names):
if labels_given:
warnings.warn(
"labels size, {0}, does not match size of target_names, {1}".format(
len(labels), len(target_names)
)
)
else:
raise ValueError(
"Number of classes, {0}, does not match size of "
"target_names, {1}. Try specifying the labels "
"parameter".format(len(labels), len(target_names))
)
if target_names is None:
target_names = ["%s" % l for l in labels]
headers = ["precision", "recall", "f1-score", "support"]
# compute per-class results without averaging
p, r, f1, s = precision_recall_fscore_support(
y_true,
y_pred,
labels=labels,
average=None,
sample_weight=sample_weight,
zero_division=zero_division,
)
rows = zip(target_names, p, r, f1, s)
if y_type.startswith("multilabel"):
average_options = ("micro", "macro", "weighted", "samples")
else:
average_options = ("micro", "macro", "weighted")
if output_dict:
report_dict = {label[0]: label[1:] for label in rows}
for label, scores in report_dict.items():
report_dict[label] = dict(zip(headers, [i.item() for i in scores]))
else:
longest_last_line_heading = "weighted avg"
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(longest_last_line_heading), digits)
head_fmt = "{:>{width}s} " + " {:>9}" * len(headers)
report = head_fmt.format("", *headers, width=width)
report += "\n\n"
row_fmt = "{:>{width}s} " + " {:>9.{digits}f}" * 3 + " {:>9}\n"
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += "\n"
# compute all applicable averages
for average in average_options:
if average.startswith("micro") and micro_is_accuracy:
line_heading = "accuracy"
else:
line_heading = average + " avg"
# compute averages with specified averaging method
avg_p, avg_r, avg_f1, _ = precision_recall_fscore_support(
y_true,
y_pred,
labels=labels,
average=average,
sample_weight=sample_weight,
zero_division=zero_division,
)
avg = [avg_p, avg_r, avg_f1, np.sum(s)]
if output_dict:
report_dict[line_heading] = dict(zip(headers, [i.item() for i in avg]))
else:
if line_heading == "accuracy":
row_fmt_accuracy = (
"{:>{width}s} "
+ " {:>9.{digits}}" * 2
+ " {:>9.{digits}f}"
+ " {:>9}\n"
)
report += row_fmt_accuracy.format(
line_heading, "", "", *avg[2:], width=width, digits=digits
)
else:
report += row_fmt.format(line_heading, *avg, width=width, digits=digits)
if output_dict:
if "accuracy" in report_dict.keys():
report_dict["accuracy"] = report_dict["accuracy"]["precision"]
return report_dict
else:
return report
def hamming_loss(y_true, y_pred, *, sample_weight=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.18
Returns
-------
loss : float or int
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss corresponds to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function, when `normalize` parameter is set to
True.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does not entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes only the
individual labels.
The Hamming loss is upperbounded by the subset zero-one loss, when
`normalize` parameter is set to True. It is always between 0 and 1,
lower being better.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_.
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if sample_weight is None:
weight_average = 1.0
else:
weight_average = np.mean(sample_weight)
if y_type.startswith("multilabel"):
n_differences = count_nonzero(y_true - y_pred, sample_weight=sample_weight)
return n_differences / (y_true.shape[0] * y_true.shape[1] * weight_average)
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(
y_true, y_pred, *, eps=1e-15, normalize=True, sample_weight=None, labels=None
):
r"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of a logistic model that returns ``y_pred`` probabilities
for its training data ``y_true``.
The log loss is only defined for two or more labels.
For a single sample with true label :math:`y \in \{0,1\}` and
and a probability estimate :math:`p = \operatorname{Pr}(y = 1)`, the log
loss is:
.. math::
L_{\log}(y, p) = -(y \log (p) + (1 - y) \log (1 - p))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float, default=1e-15
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, default=True
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like, default=None
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Notes
-----
The logarithm used is the natural logarithm (base-e).
Examples
--------
>>> from sklearn.metrics import log_loss
>>> log_loss(["spam", "ham", "ham", "spam"],
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true, sample_weight)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError(
"y_true contains only one label ({0}). Please "
"provide the true labels explicitly through the "
"labels argument.".format(lb.classes_[0])
)
else:
raise ValueError(
"The labels array needs to contain at least two "
"labels for log_loss, "
"got {0}.".format(lb.classes_)
)
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(
1 - transformed_labels, transformed_labels, axis=1
)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError(
"y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(
transformed_labels.shape[1], y_pred.shape[1], lb.classes_
)
)
else:
raise ValueError(
"The number of classes in labels is different "
"from that in y_pred. Classes found in "
"labels: {0}".format(lb.classes_)
)
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, *, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized).
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array of shape (n_samples,)
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array of shape (n_samples,) or (n_samples, n_classes)
Predicted decisions, as output by decision_function (floats).
labels : array-like, default=None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_.
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292.
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(random_state=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision)
0.30...
In the multiclass case:
>>> import numpy as np
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC()
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels=labels)
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(labels if labels is not None else y_true)
if y_true_unique.size > 2:
if pred_decision.ndim <= 1:
raise ValueError(
"The shape of pred_decision cannot be 1d array"
"with a multiclass target. pred_decision shape "
"must be (n_samples, n_classes), that is "
f"({y_true.shape[0]}, {y_true_unique.size})."
f" Got: {pred_decision.shape}"
)
# pred_decision.ndim > 1 is true
if y_true_unique.size != pred_decision.shape[1]:
if labels is None:
raise ValueError(
"Please include all labels in y_true "
"or pass labels as third argument"
)
else:
raise ValueError(
"The shape of pred_decision is not "
"consistent with the number of classes. "
"With a multiclass target, pred_decision "
"shape must be "
"(n_samples, n_classes), that is "
f"({y_true.shape[0]}, {y_true_unique.size}). "
f"Got: {pred_decision.shape}"
)
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1), axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
np.clip(losses, 0, None, out=losses)
return np.average(losses, weights=sample_weight)
def brier_score_loss(y_true, y_prob, *, sample_weight=None, pos_label=None):
"""Compute the Brier score loss.
The smaller the Brier score loss, the better, hence the naming with "loss".
The Brier score measures the mean squared difference between the predicted
probability and the actual outcome. The Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1). It can be decomposed is the sum of refinement loss and
calibration loss.
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter `pos_label`, which defaults to
the greater label unless `y_true` is all 0 or all -1, in which case
`pos_label` defaults to 1.
Read more in the :ref:`User Guide <brier_score_loss>`.
Parameters
----------
y_true : array of shape (n_samples,)
True targets.
y_prob : array of shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
pos_label : int or str, default=None
Label of the positive class. `pos_label` will be infered in the
following manner:
* if `y_true` in {-1, 1} or {0, 1}, `pos_label` defaults to 1;
* else if `y_true` contains string, an error will be raised and
`pos_label` should be explicitely specified;
* otherwise, `pos_label` defaults to the greater label,
i.e. `np.unique(y_true)[-1]`.
Returns
-------
score : float
Brier score loss.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob)
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0)
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, pos_label="ham")
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score
<https://en.wikipedia.org/wiki/Brier_score>`_.
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
check_consistent_length(y_true, y_prob, sample_weight)
y_type = type_of_target(y_true)
if y_type != "binary":
raise ValueError(
"Only binary classification is supported. The type of the target "
f"is {y_type}."
)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
try:
pos_label = _check_pos_label_consistency(pos_label, y_true)
except ValueError:
classes = np.unique(y_true)
if classes.dtype.kind not in ("O", "U", "S"):
# for backward compatibility, if classes are not string then
# `pos_label` will correspond to the greater label
pos_label = classes[-1]
else:
raise
y_true = np.array(y_true == pos_label, int)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
|
huzq/scikit-learn
|
sklearn/metrics/_classification.py
|
Python
|
bsd-3-clause
| 96,977
|
[
"Brian"
] |
2b47f5a77f1d817df855ea841ec46695d61c47c8e6d075851d1d856ebc13b6f5
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Jason Swails
# Contributors: Robert McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import mdtraj as md
from mdtraj.testing import get_fn, eq, DocStringFormatTester, skipif
from mdtraj.formats import psf
from mdtraj.utils import enter_temp_directory
from distutils.spawn import find_executable
doc = DocStringFormatTester(psf)
VMD = find_executable('vmd')
VMD_MSG = 'This test requires the VMD executable: http://www.ks.uiuc.edu/Research/vmd/'
def test_load_psf():
top = psf.load_psf(get_fn('ala_ala_ala.psf'))
ref_top = md.load(get_fn('ala_ala_ala.pdb')).topology
eq(top, ref_top)
# Test the XPLOR psf format parsing
top2 = psf.load_psf(get_fn('ala_ala_ala.xpsf'))
eq(top2, ref_top)
def test_multichain_psf():
top = psf.load_psf(get_fn('3pqr_memb.psf'))
# Check that each segment was turned into a chain
eq(top.n_chains, 11)
chain_lengths = [5162, 185, 97, 28, 24, 24, 45, 35742, 72822, 75, 73]
for i, n in enumerate(chain_lengths):
eq(top.chain(i).n_atoms, n)
# There are some non-sequentially numbered residues across chain
# boundaries... make sure resSeq is properly taken from the PSF
eq(top.chain(0).residue(0).resSeq, 1)
eq(top.chain(0).residue(-1).resSeq, 326)
eq(top.chain(1).residue(0).resSeq, 340)
eq(top.chain(1).residue(-1).resSeq, 350)
eq(top.chain(2).residue(0).resSeq, 1)
eq(top.chain(2).residue(-1).resSeq, 4)
eq(top.chain(3).residue(0).resSeq, 1)
eq(top.chain(3).residue(-1).resSeq, 1)
eq(top.chain(4).residue(0).resSeq, 1)
eq(top.chain(4).residue(-1).resSeq, 1)
eq(top.chain(5).residue(0).resSeq, 1)
eq(top.chain(5).residue(-1).resSeq, 1)
eq(top.chain(6).residue(0).resSeq, 1)
eq(top.chain(6).residue(-1).resSeq, 2)
eq(top.chain(7).residue(0).resSeq, 1)
eq(top.chain(7).residue(-1).resSeq, 276)
eq(top.chain(8).residue(0).resSeq, 1)
eq(top.chain(8).residue(-1).resSeq, 24274)
eq(top.chain(9).residue(0).resSeq, 1)
eq(top.chain(9).residue(-1).resSeq, 75)
eq(top.chain(10).residue(0).resSeq, 1)
eq(top.chain(10).residue(-1).resSeq, 73)
def test_load_mdcrd_with_psf():
traj = md.load(get_fn('ala_ala_ala.mdcrd'), top=get_fn('ala_ala_ala.psf'))
ref_traj = md.load(get_fn('ala_ala_ala.pdb'))
eq(traj.topology, ref_traj.topology)
eq(traj.xyz, ref_traj.xyz)
@skipif(not VMD, VMD_MSG)
def test_vmd_psf():
yield lambda: _test_against_vmd(get_fn('1vii.pdb'))
yield lambda: _test_against_vmd(get_fn('2EQQ.pdb'))
def _test_against_vmd(pdb):
# this is probably not cross-platform compatible. I assume that the exact
# path to this CHARMM topology that is included with VMD depends on
# the install mechanism, especially for bundled mac or windows installers
VMD_ROOT = os.path.join(os.path.dirname(os.path.realpath(VMD)), '..')
top_paths = [os.path.join(r, f) for (r, _, fs) in os.walk(VMD_ROOT) for f in fs
if 'top_all27_prot_lipid_na.inp' in f]
assert len(top_paths) >= 0
top = os.path.abspath(top_paths[0]).replace(" ", "\\ ")
TEMPLATE = '''
package require psfgen
topology %(top)s
pdbalias residue HIS HSE
pdbalias atom ILE CD1 CD
segment U {pdb %(pdb)s}
coordpdb %(pdb)s U
guesscoord
writepdb out.pdb
writepsf out.psf
exit
''' % {'top': top, 'pdb' : pdb}
with enter_temp_directory():
with open('script.tcl', 'w') as f:
f.write(TEMPLATE)
os.system(' '.join([VMD, '-e', 'script.tcl', '-dispdev', 'none']))
out_pdb = md.load('out.pdb')
out_psf = md.load_psf('out.psf')
# make sure the two topologies are equal
eq(out_pdb.top, out_psf)
|
kyleabeauchamp/mdtraj
|
mdtraj/tests/test_psf.py
|
Python
|
lgpl-2.1
| 4,591
|
[
"CHARMM",
"MDTraj",
"VMD"
] |
455335f18b7a0112a25fc8e476c35509465d152e504a7a1f39de766242c5075a
|
#!@PYTHON_EXECUTABLE@
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import os
import json
import argparse
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(description="Psi4: Open-Source Quantum Chemistry", formatter_class=RawTextHelpFormatter)
parser.add_argument("-i", "--input", default="input.dat", help="Input file name. Default: input.dat.")
parser.add_argument("-o", "--output", help="""\
Redirect output elsewhere.
Default: when input filename is 'input.dat', 'output.dat'.
Otherwise, output filename defaults to input filename with
any '.in' or 'dat' extension replaced by '.out'""")
parser.add_argument("-v", "--verbose", action='store_true', help="Print a lot of information.")
parser.add_argument("-V", "--version", action='store_true', help="Print version information.")
# parser.add_argument("-d", "--debug", action='store_true', help="Flush the outfile at every print statement.")
parser.add_argument("-k", "--skip-preprocessor", action='store_true', help="Skips input preprocessing. Expert mode.")
parser.add_argument("-m", "--messy", action='store_true', help="Leave temporary files after the run is completed.")
# parser.add_argument("-r", "--restart", action='store_true', help="Number to be used instead of process id.")
parser.add_argument("-s", "--scratch", help="Psi4 scratch directory to use.")
parser.add_argument("-a", "--append", action='store_true', help="Append results to output file. Default: Truncate first")
parser.add_argument("-l", "--psidatadir",
help="Specify where to look for the Psi data directory. Overrides PSIDATADIR.")
parser.add_argument("-n", "--nthread", default=1, help="Number of threads to use (overrides OMP_NUM_THREADS)")
parser.add_argument("-p", "--prefix", help="Prefix name for psi files. Default psi")
parser.add_argument("--inplace", action='store_true', help="Runs psi4 from the source directory. "
"!Warning! expert option.")
parser.add_argument("--json", action='store_true', help="Runs a JSON input file. !Warning! experimental option.")
# For plugins
parser.add_argument("--plugin-name", help="""\
Creates a new directory with files for writing a new plugin.
You can specify an additional argument that specifies a
template to use, for example
>>> psi4 --plugin-name mygreatcode --plugin-template mointegrals""")
parser.add_argument('--plugin-template', default='basic',
choices=['aointegrals', 'basic', 'dfmp2', 'mointegrals', 'scf', 'sointegrals', 'wavefunction'],
help='New plugin template to use.')
parser.add_argument('--plugin-compile', action='store_true', help="""\
Generates a CMake command for building a plugin against this psi4 installation.
>>> cd <plugin_directory>
>>> `psi4 --plugin-compile`
>>> make
>>> psi4""")
# print("Environment Variables\n");
# print(" PSI_SCRATCH Directory where scratch files are written.")
# print(" Default: $TMPDIR (or /tmp/ when not set)")
# print(" This should be a local, not network, disk")
# parser.print_help()
args, unknown = parser.parse_known_args()
args = args.__dict__ # Namespace object seems silly
# Figure out pythonpath
cmake_install_prefix = os.path.normpath(os.path.dirname(os.path.abspath(__file__)) + os.path.sep + '..')
lib_dir = os.path.sep.join([cmake_install_prefix, "@CMAKE_INSTALL_LIBDIR@", "@PYMOD_INSTALL_LIBDIR@"])
if args["inplace"]:
if "CMAKE_INSTALL_LIBDIR" not in lib_dir:
raise ImportError("Cannot run inplace from a installed directory.")
core_location = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + "core.so"
if not os.path.isfile(core_location):
raise ImportError("A compiled Psi4 core.so needs to be symlinked to the %s folder" % os.path.dirname(__file__))
lib_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if ("PSIDATADIR" not in os.environ.keys()) and (not args["psidatadir"]):
data_dir = os.path.sep.join([os.path.abspath(os.path.dirname(__file__)), "share", "psi4"])
os.environ["PSIDATADIR"] = data_dir
elif "CMAKE_INSTALL_LIBDIR" in lib_dir:
raise ImportError("Psi4 was not installed correctly!")
# Replace input/output if unknown kwargs
if len(unknown) > 0:
args["input"] = unknown[0]
if len(unknown) > 1:
args["output"] = unknown[1]
if len(unknown) > 2:
raise KeyError("Too many unknown arguments: %s" % str(unknown))
# Figure out output arg
if args["output"] is None:
if args["input"] == "input.dat":
args["output"] = "output.dat"
elif args["input"].endswith(".in"):
args["output"] = args["input"].replace(".in", ".out")
elif args["input"].endswith(".dat"):
args["output"] = args["input"].replace(".dat", ".out")
else:
args["output"] = args["input"] + ".dat"
# Plugin compile line
if args['plugin_compile']:
share_cmake_dir = os.path.sep.join([cmake_install_prefix, 'share', 'cmake', 'psi4'])
print("""cmake -C {}/psi4PluginCache.cmake -DCMAKE_PREFIX_PATH={} .""".format(share_cmake_dir, cmake_install_prefix))
sys.exit()
# Transmit any argument psidatadir through environ
if args["psidatadir"] is not None:
data_dir = os.path.abspath(os.path.expanduser(args["psidatadir"]))
os.environ["PSIDATADIR"] = data_dir
### Actually import psi4 and apply setup ###
# Import installed psi4
sys.path.insert(1, lib_dir)
import psi4
if args["version"]:
print(psi4.__version__)
sys.exit()
if args['plugin_name']:
# This call does not return.
psi4.plugin.create_plugin(args['plugin_name'], args['plugin_template'])
if not os.path.isfile(args["input"]):
raise KeyError("The file %s does not exist." % args["input"])
args["input"] = os.path.normpath(args["input"])
# Setup outfile
if args["append"] is None:
args["append"] = False
if args["output"] != "stdout":
psi4.core.set_output_file(args["output"], args["append"])
# Set a few options
if args["prefix"] is not None:
psi4.core.set_psi_file_prefix(args["prefix"])
psi4.core.set_nthread(int(args["nthread"]))
psi4.core.set_memory(524288000, True)
psi4.extras._input_dir_ = os.path.dirname(os.path.abspath(args["input"]))
psi4.print_header()
if args["scratch"] is not None:
if not os.path.isdir(args["scratch"]):
raise Exception("Passed in scratch is not a directory (%s)." % args["scratch"])
psi4.core.set_environment("PSI_SCRATCH", args["scratch"])
# If this is a json call, compute and stop
if args["json"]:
with open(args["input"], 'r') as f:
json_data = json.load(f)
psi4.extras._success_flag_ = True
psi4.extras.exit_printing()
psi4.json_wrapper.run_json(json_data)
with open(args["input"], 'w') as f:
json.dump(json_data, f)
if args["output"] != "stdout":
os.unlink(args["output"])
sys.exit()
# Read input
with open(args["input"]) as f:
content = f.read()
# Preprocess
if not args["skip_preprocessor"]:
# PSI_SCRATCH must be set before this call!
content = psi4.process_input(content)
# Handle Verbose
if args["verbose"]:
psi4.core.print_out('\nParsed Psithon:')
psi4.core.print_out(content)
psi4.core.print_out('-' * 75)
# Handle Messy
if args["messy"]:
import atexit
for handler in atexit._exithandlers:
if handler[0] == psi4.core.clean:
atexit._exithandlers.remove(handler)
# Register exit printing, failure GOTO coffee ELSE beer
import atexit
atexit.register(psi4.extras.exit_printing)
# Run the program!
try:
exec(content)
psi4.extras._success_flag_ = True
# Capture _any_ python error message
except Exception as exception:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
tb_str = "Traceback (most recent call last):\n"
tb_str += ''.join(traceback.format_tb(exc_traceback))
tb_str += '\n'
tb_str += type(exception).__name__
tb_str += ': '
tb_str += str(exception)
psi4.core.print_out("\n")
psi4.core.print_out(tb_str)
psi4.core.print_out("\n")
if psi4.core.get_output_file() != "stdout":
print(tb_str)
sys.exit(1)
# elif '***HDF5 library version mismatched error***' in str(err):
# raise ImportError("{0}\nLikely cause: HDF5 used in compilation not prominent enough in RPATH/[DY]LD_LIBRARY_PATH".format(err))
|
kannon92/psi4
|
psi4/run_psi4.py
|
Python
|
gpl-2.0
| 9,299
|
[
"Psi4"
] |
8ef3d1405816bc2d9dc3b7139ee0079e1910ec9db054525a1f101fa772c02fd8
|
#!/usr/bin/env python
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
## Convert single amber crd into Trajectory object
## $Revision$
## last $Date$
## last $Author$
import sys
from Biskit.tools import *
from Biskit import AmberCrdParser
def _use():
print """
Convert single amber crd into Trajectory object
amber2traj.py -i sim.crd -o traj_0.dat -r ref.pdb [-b -wat -hyd -rnres
-code PDBC ]
-i input amber trajectory
-o output file with pickled biskit Trajectory object
-r reference PDB, must have identical atom content+order as sim.crd
-b traj has box info (3 additional coordinates per frame)
-wat delete WAT, Cl-, Na+ residues (after parsing)
-hyd delete all hydrogens (after parsing)
-rnres rename amber residues HIE/HID/HIP, CYX to HIS and CYS
-code PDB code of molecule [first 4 letters of ref file name]
"""
sys.exit( 0 )
if __name__ == '__main__':
if len( sys.argv ) < 2:
_use()
o = cmdDict( {'o':'traj_0.dat', 'i':'sim.crd'} )
fcrd = o['i']
fpdb = o['r']
fout = o['o']
box = o.has_key( 'b' )
wat = o.has_key('wat')
hyd = o.has_key('hyd')
rnres = o.has_key('rnres')
code = o.get('code', None)
p = AmberCrdParser( fcrd, fpdb, box, rnres, pdbCode=code )
t = p.crd2traj()
if wat:
t.removeAtoms( lambda a: a['residue_name'] in ['WAT', 'Na+', 'Cl-'] )
if hyd:
t.ref.addChainId( keep_old=1 ) ## preserve chain-delimiters
t.removeAtoms( lambda a: a['element'] == 'H' )
print "Dumping result to ", fout
dump( t, absfile(fout) )
print "Done"
|
ostrokach/biskit
|
scripts/Biskit/amber2traj.py
|
Python
|
gpl-3.0
| 2,421
|
[
"Amber"
] |
00483b65d4f568bc75001a1d08b582ce795fc348fd0ca00cfb80c8e68ecb6159
|
from __future__ import print_function
__author__ = 'Tom Schaul, tom@idsia.ch; Justin Bayer, bayerj@in.tum.de'
import gc
import pickle
import logging
import threading
import os
import operator
from itertools import count
from math import sqrt
from random import random, choice
from scipy import where, array, exp, zeros, size, mat, median
from functools import reduce
# file extension for load/save protocol mapping
known_extensions = {
'mat': 'matlab',
'txt': 'ascii',
'svm': 'libsvm',
'pkl': 'pickle',
'nc' : 'netcdf' }
def abstractMethod():
""" This should be called when an abstract method is called that should have been
implemented by a subclass. It should not be called in situations where no implementation
(i.e. a 'pass' behavior) is acceptable. """
raise NotImplementedError('Method not implemented!')
def drawIndex(probs, tolerant=False):
""" Draws an index given an array of probabilities.
:key tolerant: if set to True, the array is normalized to sum to 1. """
if not sum(probs) < 1.00001 or not sum(probs) > 0.99999:
if tolerant:
probs /= sum(probs)
else:
print((probs, 1 - sum(probs)))
raise ValueError()
r = random()
s = 0
for i, p in enumerate(probs):
s += p
if s > r:
return i
return choice(list(range(len(probs))))
def drawGibbs(vals, temperature=1.):
""" Return the index of the sample drawn by a softmax (Gibbs). """
if temperature == 0:
# randomly pick one of the values with the max value.
m = max(vals)
best = []
for i, v in enumerate(vals):
if v == m:
best.append(i)
return choice(best)
else:
temp = vals / temperature
# make sure we keep the exponential bounded (between +20 and -20)
temp += 20 - max(temp)
if min(temp) < -20:
for i, v in enumerate(temp):
if v < -20:
temp[i] = -20
temp = exp(temp)
temp /= sum(temp)
return drawIndex(temp)
def iterCombinations(tup):
""" all possible of integer tuples of the same dimension than tup, and each component being
positive and strictly inferior to the corresponding entry in tup. """
if len(tup) == 1:
for i in range(tup[0]):
yield (i,)
elif len(tup) > 1:
for prefix in iterCombinations(tup[:-1]):
for i in range(tup[-1]):
yield tuple(list(prefix) + [i])
def setAllArgs(obj, argdict):
""" set all those internal variables which have the same name than an entry in the
given object's dictionary.
This function can be useful for quick initializations. """
xmlstore = isinstance(obj, XMLBuildable)
for n in list(argdict.keys()):
if hasattr(obj, n):
setattr(obj, n, argdict[n])
if xmlstore:
obj.argdict[n] = argdict[n]
else:
print(('Warning: parameter name', n, 'not found!'))
if xmlstore:
if not hasattr(obj, '_unknown_argdict'):
obj._unknown_argdict = {}
obj._unknown_argdict[n] = argdict[n]
def linscale(d, lim):
""" utility function to linearly scale array d to the interval defined by lim """
return (d - d.min())*(lim[1] - lim[0]) + lim[0]
def percentError(out, true):
""" return percentage of mismatch between out and target values (lists and arrays accepted) """
arrout = array(out).flatten()
wrong = where(arrout != array(true).flatten())[0].size
return 100. * float(wrong) / float(arrout.size)
def formatFromExtension(fname):
"""Tries to infer a protocol from the file extension."""
_base, ext = os.path.splitext(fname)
if not ext:
return None
try:
format = known_extensions[ext.replace('.', '')]
except KeyError:
format = None
return format
class XMLBuildable(object):
""" subclasses of this can be losslessly stored in XML, and
automatically reconstructed on reading. For this they need to store
their construction arguments in the variable <argdict>. """
argdict = None
def setArgs(self, **argdict):
if not self.argdict:
self.argdict = {}
setAllArgs(self, argdict)
class Serializable(object):
"""Class that implements shortcuts to serialize an object.
Serialization is done by various formats. At the moment, only 'pickle' is
supported.
"""
def saveToFileLike(self, flo, format=None, **kwargs):
"""Save the object to a given file like object in the given format.
"""
format = 'pickle' if format is None else format
save = getattr(self, "save_%s" % format, None)
if save is None:
raise ValueError("Unknown format '%s'." % format)
save(flo, **kwargs)
@classmethod
def loadFromFileLike(cls, flo, format=None):
"""Load the object to a given file like object with the given protocol.
"""
format = 'pickle' if format is None else format
load = getattr(cls, "load_%s" % format, None)
if load is None:
raise ValueError("Unknown format '%s'." % format)
return load(flo)
def saveToFile(self, filename, format=None, **kwargs):
"""Save the object to file given by filename."""
if format is None:
# try to derive protocol from file extension
format = formatFromExtension(filename)
with file(filename, 'wb') as fp:
self.saveToFileLike(fp, format, **kwargs)
@classmethod
def loadFromFile(cls, filename, format=None):
"""Return an instance of the class that is saved in the file with the
given filename in the specified format."""
if format is None:
# try to derive protocol from file extension
format = formatFromExtension(filename)
with file(filename, 'rbU') as fp:
obj = cls.loadFromFileLike(fp, format)
obj.filename = filename
return obj
def save_pickle(self, flo, protocol=0):
pickle.dump(self, flo, protocol)
@classmethod
def load_pickle(cls, flo):
return pickle.load(flo)
class Named(XMLBuildable):
"""Class whose objects are guaranteed to have a unique name."""
_nameIds = count(0)
def getName(self):
logging.warning("Deprecated, use .name property instead.")
return self.name
def setName(self, newname):
logging.warning("Deprecated, use .name property instead.")
self.name = newname
def _getName(self):
"""Returns the name, which is generated if it has not been already."""
if self._name is None:
self._name = self._generateName()
return self._name
def _setName(self, newname):
"""Change name to newname. Uniqueness is not guaranteed anymore."""
self._name = newname
_name = None
name = property(_getName, _setName)
def _generateName(self):
"""Return a unique name for this object."""
return "%s-%i" % (self.__class__.__name__, next(self._nameIds))
def __repr__(self):
""" The default representation of a named object is its name. """
return "<%s '%s'>" % (self.__class__.__name__, self.name)
def fListToString(a_list, a_precision=3):
""" Returns a string representing a list of floats with a given precision """
from numpy import around
s_list = ", ".join(("%g" % around(x, a_precision)).ljust(a_precision+3)
for x in a_list)
return "[%s]" % s_list
def tupleRemoveItem(tup, index):
""" remove the item at position index of the tuple and return a new tuple. """
l = list(tup)
return tuple(l[:index] + l[index + 1:])
def confidenceIntervalSize(stdev, nbsamples):
""" Determine the size of the confidence interval, given the standard deviation and the number of samples.
t-test-percentile: 97.5%, infinitely many degrees of freedom,
therefore on the two-sided interval: 95% """
# CHECKME: for better precision, maybe get the percentile dynamically, from the scipy library?
return 2 * 1.98 * stdev / sqrt(nbsamples)
def trace(func):
def inner(*args, **kwargs):
print(("%s: %s, %s" % (func.__name__, args, kwargs)))
return func(*args, **kwargs)
return inner
def threaded(callback=lambda * args, **kwargs: None, daemonic=False):
"""Decorate a function to run in its own thread and report the result
by calling callback with it."""
def innerDecorator(func):
def inner(*args, **kwargs):
target = lambda: callback(func(*args, **kwargs))
t = threading.Thread(target=target)
t.setDaemon(daemonic)
t.start()
return inner
return innerDecorator
def garbagecollect(func):
"""Decorate a function to invoke the garbage collector after each execution.
"""
def inner(*args, **kwargs):
result = func(*args, **kwargs)
gc.collect()
return result
return inner
def memoize(func):
"""Decorate a function to 'memoize' results by holding it in a cache that
maps call arguments to returns."""
cache = {}
def inner(*args, **kwargs):
# Dictionaries and lists are unhashable
args = tuple(args)
# Make a set for checking in the cache, since the order of
# .iteritems() is undefined
kwargs_set = frozenset(iter(kwargs.items()))
if (args, kwargs_set) in cache:
result = cache[args, kwargs_set]
else:
result = func(*args, **kwargs)
cache[args, kwargs_set] = result
return result
return inner
def storeCallResults(obj, verbose=False):
"""Pseudo-decorate an object to store all evaluations of the function in the returned list."""
results = []
oldcall = obj.__class__.__call__
def newcall(*args, **kwargs):
result = oldcall(*args, **kwargs)
results.append(result)
if verbose:
print(result)
return result
obj.__class__.__call__ = newcall
return results
def multiEvaluate(repeat):
"""Decorate a function to evaluate repeatedly with the same arguments, and return the average result """
def decorator(func):
def inner(*args, **kwargs):
result = 0.
for dummy in range(repeat):
result += func(*args, **kwargs)
return result / repeat
return inner
return decorator
def _import(name):
"""Return module from a package.
These two are equivalent:
> from package import module as bar
> bar = _import('package.module')
"""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
try:
mod = getattr(mod, comp)
except AttributeError:
raise ImportError("No module named %s" % mod)
return mod
# tools for binary Gray code manipulation:
def int2gray(i):
""" Returns the value of an integer in Gray encoding."""
return i ^ (i >> 1)
def gray2int(g, size):
""" Transforms a Gray code back into an integer. """
res = 0
for i in reversed(list(range(size))):
gi = (g >> i) % 2
if i == size - 1:
bi = gi
else:
bi = bi ^ gi
res += bi * 2 ** i
return res
def asBinary(i):
""" Produces a string from an integer's binary representation.
(preceding zeros removed). """
if i > 1:
if i % 2 == 1:
return asBinary(i >> 1) + '1'
else:
return asBinary(i >> 1) + '0'
else:
return str(i)
def one_to_n(val, maxval):
""" Returns a 1-in-n binary encoding of a non-negative integer. """
a = zeros(maxval, float)
a[val] = 1.
return a
def n_to_one(arr):
""" Returns the reverse of a 1-in-n binary encoding. """
return where(arr == 1)[0][0]
def canonicClassString(x):
""" the __class__ attribute changed from old-style to new-style classes... """
if isinstance(x, object):
return repr(x.__class__).split("'")[1]
else:
return repr(x.__class__)
def decrementAny(tup):
""" the closest tuples to tup: decrementing by 1 along any dimension.
Never go into negatives though. """
res = []
for i, x in enumerate(tup):
if x > 0:
res.append(tuple(list(tup[:i]) + [x - 1] + list(tup[i + 1:])))
return res
def reachable(stepFunction, start, destinations, _alreadyseen=None):
""" Determines the subset of destinations that can be reached from a set of starting positions,
while using stepFunction (which produces a list of neighbor states) to navigate.
Uses breadth-first search.
Returns a dictionary with reachable destinations and their distances.
"""
if len(start) == 0 or len(destinations) == 0:
return {}
if _alreadyseen is None:
_alreadyseen = []
_alreadyseen.extend(start)
# dict with distances to destinations
res = {}
for s in start:
if s in destinations:
res[s] = 0
start.remove(s)
# do one step
new = set()
for s in start:
new.update(stepFunction(s))
new.difference_update(_alreadyseen)
ndestinations = list(destinations)
for s in list(new):
if s in destinations:
res[s] = 1
new.remove(s)
ndestinations.remove(s)
_alreadyseen.append(s)
# recursively do the rest
deeper = reachable(stepFunction, new, ndestinations, _alreadyseen)
# adjust distances
for k, val in list(deeper.items()):
res[k] = val + 1
return res
def flood(stepFunction, fullSet, initSet, relevant=None):
""" Returns a list of elements of fullSet linked to some element of initSet
through the neighborhood-setFunction (which must be defined on all elements of fullSet).
:key relevant: (optional) list of relevant elements: stop once all relevant elements are found.
"""
if fullSet is None:
flooded = set(initSet)
else:
full = set(fullSet)
flooded = full.intersection(set(initSet))
if relevant is None:
relevant = full.copy()
if relevant:
relevant = set(relevant)
change = flooded.copy()
while len(change)>0:
new = set()
for m in change:
if fullSet is None:
new.update(stepFunction(m))
else:
new.update(full.intersection(stepFunction(m)))
change = new.difference(flooded)
flooded.update(change)
if relevant is not None and relevant.issubset(flooded):
break
return list(flooded)
def crossproduct(ss, row=None, level=0):
"""Returns the cross-product of the sets given in `ss`."""
if row is None:
row = []
if len(ss) > 1:
return reduce(operator.add,
[crossproduct(ss[1:], row + [i], level + 1) for i in ss[0]])
else:
return [row + [i] for i in ss[0]]
def permute(arr, permutation):
"""Return an array like arr but with elements permuted.
Only the first dimension is permuted, which makes it possible to permute
blocks of the input.
arr can be anything as long as it's indexable."""
return array([arr[i] for i in permutation])
def permuteToBlocks(arr, blockshape):
"""Permute an array so that it consists of linearized blocks.
Example: A two-dimensional array of the form
0 1 2 3
4 5 6 7
8 9 10 11
12 13 14 15
would be turned into an array like this with (2, 2) blocks:
0 1 4 5 2 3 6 7 8 9 12 13 10 11 14 15
"""
if len(blockshape) < 2:
raise ValueError("Need more than one dimension.")
elif len(blockshape) == 2:
blockheight, blockwidth = blockshape
return permuteToBlocks2d(arr, blockheight, blockwidth)
elif len(blockshape) == 3:
blockdepth, blockheight, blockwidth = blockshape
return permuteToBlocks3d(arr, blockdepth, blockheight, blockwidth)
else:
raise NotImplementedError("Only for dimensions 2 and 3.")
def permuteToBlocks3d(arr, blockdepth, blockheight, blockwidth):
depth, height, width = arr.shape
arr_ = arr.reshape(height * depth, width)
arr_ = permuteToBlocks2d(arr_, blockheight, blockwidth)
arr_.shape = depth, height * width
return permuteToBlocks2d(arr_, blockdepth, blockwidth * blockheight)
def permuteToBlocks2d(arr, blockheight, blockwidth):
_height, width = arr.shape
arr = arr.flatten()
new = zeros(size(arr))
for i in range(size(arr)):
blockx = (i % width) / blockwidth
blocky = i / width / blockheight
blockoffset = blocky * width / blockwidth + blockx
blockoffset *= blockwidth * blockheight
inblockx = i % blockwidth
inblocky = (i / width) % blockheight
j = blockoffset + inblocky * blockwidth + inblockx
new[j] = arr[i]
return new
def triu2flat(m):
""" Flattens an upper triangular matrix, returning a vector of the
non-zero elements. """
dim = m.shape[0]
res = zeros(dim * (dim + 1) / 2)
index = 0
for row in range(dim):
res[index:index + dim - row] = m[row, row:]
index += dim - row
return res
def flat2triu(a, dim):
""" Produces an upper triangular matrix of dimension dim from the elements of the given vector. """
res = zeros((dim, dim))
index = 0
for row in range(dim):
res[row, row:] = a[index:index + dim - row]
index += dim - row
return res
def blockList2Matrix(l):
""" Converts a list of matrices into a corresponding big block-diagonal one. """
dims = [m.shape[0] for m in l]
s = sum(dims)
res = zeros((s, s))
index = 0
for i in range(len(l)):
d = dims[i]
m = l[i]
res[index:index + d, index:index + d] = m
index += d
return res
def blockCombine(l):
""" Produce a matrix from a list of lists of its components. """
l = [list(map(mat, row)) for row in l]
hdims = [m.shape[1] for m in l[0]]
hs = sum(hdims)
vdims = [row[0].shape[0] for row in l]
vs = sum(vdims)
res = zeros((hs, vs))
vindex = 0
for i, row in enumerate(l):
hindex = 0
for j, m in enumerate(row):
res[vindex:vindex + vdims[i], hindex:hindex + hdims[j]] = m
hindex += hdims[j]
vindex += vdims[i]
return res
def avgFoundAfter(decreasingTargetValues, listsOfActualValues, batchSize=1, useMedian=False):
""" Determine the average number of steps to reach a certain value (for the first time),
given a list of value sequences.
If a value is not always encountered, the length of the longest sequence is used.
Returns an array. """
from scipy import sum
numLists = len(listsOfActualValues)
longest = max(list(map(len, listsOfActualValues)))
# gather a list of indices of first encounters
res = [[0] for _ in range(numLists)]
for tval in decreasingTargetValues:
for li, l in enumerate(listsOfActualValues):
lres = res[li]
found = False
for i in range(lres[-1], len(l)):
if l[i] <= tval:
lres.append(i)
found = True
break
if not found:
lres.append(longest)
tmp = array(res)
if useMedian:
resx = median(tmp, axis=0)[1:]
else:
resx = sum(tmp, axis=0)[1:] / float(numLists)
return resx * batchSize
class DivergenceError(Exception):
""" Raised when an algorithm diverges. """
def matchingDict(d, selection, require_existence=False):
""" Determines if the dictionary d conforms to the specified selection,
i.e. if a (key, x) is in the selection, then if key is in d as well it must be x
or contained in x (if x is a list). """
for k, v in list(selection.items()):
if k in d:
if isinstance(v, list):
if d[k] not in v:
return False
else:
if d[k] != v:
return False
elif require_existence:
return False
return True
def subDict(d, allowedkeys, flip=False):
""" Returns a new dictionary with a subset of the entries of d
that have on of the (dis-)allowed keys."""
res = {}
for k, v in list(d.items()):
if (k in allowedkeys) ^ flip:
res[k] = v
return res
def dictCombinations(listdict):
""" Iterates over dictionaries that go through every possible combination
of key-value pairs as specified in the lists of values for each key in listdict."""
listdict = listdict.copy()
if len(listdict) == 0:
return [{}]
k, vs = listdict.popitem()
res = dictCombinations(listdict)
if isinstance(vs, list) or isinstance(vs, tuple):
res = [dict(d, **{k:v}) for d in res for v in sorted(set(vs))]
else:
res = [dict(d, **{k:vs}) for d in res]
return res
def r_argmax(v):
""" Acts like scipy argmax, but break ties randomly. """
if len(v) == 1:
return 0
maxbid = max(v)
maxbidders = [i for (i, b) in enumerate(v) if b==maxbid]
return choice(maxbidders)
def all_argmax(x):
""" Return the indices of all values that are equal to the maximum: no breaking ties. """
m = max(x)
return [i for i, v in enumerate(x) if v == m]
def dense_orth(dim):
""" Constructs a dense orthogonal matrix. """
from scipy import rand
from scipy.linalg import orth
return orth(rand(dim, dim))
def sparse_orth(d):
""" Constructs a sparse orthogonal matrix.
The method is described in:
Gi-Sang Cheon et al., Constructions for the sparsest orthogonal matrices,
Bull. Korean Math. Soc 36 (1999) No.1 pp.199-129
"""
from scipy.sparse import eye
from scipy import r_, pi, sin, cos
if d%2 == 0:
seq = r_[0:d:2,1:d-1:2]
else:
seq = r_[0:d-1:2,1:d:2]
Q = eye(d,d).tocsc()
for i in seq:
theta = random() * 2 * pi
flip = (random() - 0.5)>0;
Qi = eye(d,d).tocsc()
Qi[i,i] = cos(theta)
Qi[(i+1),i] = sin(theta)
if flip > 0:
Qi[i,(i+1)] = -sin(theta)
Qi[(i+1),(i+1)] = cos(theta)
else:
Qi[i,(i+1)] = sin(theta)
Qi[(i+1),(i+1)] = -cos(theta)
Q = Q*Qi;
return Q
def xhash(arr):
""" Hashing function for arrays. Use with care. """
import hashlib
return hashlib.sha1(arr).hexdigest()
def binArr2int(arr):
""" Convert a binary array into its (long) integer representation. """
from numpy import packbits
tmp2 = packbits(arr.astype(int))
return sum(val * 256 ** i for i, val in enumerate(tmp2[::-1]))
def uniqueArrays(vs):
""" create a set of arrays """
resdic = {}
for v in vs:
resdic[xhash(v)] = v
return list(resdic.values())
def seedit(seed=0):
""" Fixed seed makes for repeatability, but there may be two different
random number generators involved. """
import random
import numpy
random.seed(seed)
numpy.random.seed(seed)
def weightedUtest(g1, w1, g2, w2):
""" Determines the confidence level of the assertion:
'The values of g2 are higher than those of g1'.
(adapted from the scipy.stats version)
Twist: here the elements of each group have associated weights,
corresponding to how often they are present (i.e. two identical entries with
weight w are equivalent to one entry with weight 2w).
Reference: "Studies in Continuous Black-box Optimization", Schaul, 2011 [appendix B].
TODO: make more efficient for large sets.
"""
from scipy.stats.distributions import norm
import numpy
n1 = sum(w1)
n2 = sum(w2)
u1 = 0.
for x1, wx1 in zip(g1, w1):
for x2, wx2 in zip(g2, w2):
if x1 == x2:
u1 += 0.5 * wx1 * wx2
elif x1 > x2:
u1 += wx1 * wx2
mu = n1*n2/2.
sigu = numpy.sqrt(n1*n2*(n1+n2+1)/12.)
z = (u1 - mu) / sigu
conf = norm.cdf(z)
return conf
|
comepradz/pybrain
|
pybrain/utilities.py
|
Python
|
bsd-3-clause
| 24,382
|
[
"NetCDF"
] |
872975aef280a71fbd4df825f1c71316b52e09bbee2d712f21d3179e0d95ed08
|
# (C) British Crown Copyright 2010 - 2018, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Classes for representing multi-dimensional data with metadata.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import collections
import copy
from copy import deepcopy
import datetime
from functools import reduce
import operator
import warnings
from xml.dom.minidom import Document
import zlib
import dask.array as da
import numpy as np
import numpy.ma as ma
from iris._cube_coord_common import CFVariableMixin
import iris._concatenate
import iris._constraints
from iris._data_manager import DataManager
import iris._lazy_data as _lazy
import iris._merge
import iris.analysis
from iris.analysis.cartography import wrap_lons
import iris.analysis.maths
import iris.aux_factory
import iris.coord_systems
import iris.coords
import iris.exceptions
import iris.util
__all__ = ['Cube', 'CubeList', 'CubeMetadata']
class CubeMetadata(collections.namedtuple('CubeMetadata',
['standard_name',
'long_name',
'var_name',
'units',
'attributes',
'cell_methods'])):
"""
Represents the phenomenon metadata for a single :class:`Cube`.
"""
__slots__ = ()
def name(self, default='unknown'):
"""
Returns a human-readable name.
First it tries self.standard_name, then it tries the 'long_name'
attribute, then the 'var_name' attribute, before falling back to
the value of `default` (which itself defaults to 'unknown').
"""
return self.standard_name or self.long_name or self.var_name or default
# The XML namespace to use for CubeML documents
XML_NAMESPACE_URI = "urn:x-iris:cubeml-0.2"
class _CubeFilter(object):
"""
A constraint, paired with a list of cubes matching that constraint.
"""
def __init__(self, constraint, cubes=None):
self.constraint = constraint
if cubes is None:
cubes = CubeList()
self.cubes = cubes
def __len__(self):
return len(self.cubes)
def add(self, cube):
"""
Adds the appropriate (sub)cube to the list of cubes where it
matches the constraint.
"""
sub_cube = self.constraint.extract(cube)
if sub_cube is not None:
self.cubes.append(sub_cube)
def merged(self, unique=False):
"""
Returns a new :class:`_CubeFilter` by merging the list of
cubes.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
"""
return _CubeFilter(self.constraint, self.cubes.merge(unique))
class _CubeFilterCollection(object):
"""
A list of _CubeFilter instances.
"""
@staticmethod
def from_cubes(cubes, constraints=None):
"""
Creates a new collection from an iterable of cubes, and some
optional constraints.
"""
constraints = iris._constraints.list_of_constraints(constraints)
pairs = [_CubeFilter(constraint) for constraint in constraints]
collection = _CubeFilterCollection(pairs)
for cube in cubes:
collection.add_cube(cube)
return collection
def __init__(self, pairs):
self.pairs = pairs
def add_cube(self, cube):
"""
Adds the given :class:`~iris.cube.Cube` to all of the relevant
constraint pairs.
"""
for pair in self.pairs:
pair.add(cube)
def cubes(self):
"""
Returns all the cubes in this collection concatenated into a
single :class:`CubeList`.
"""
result = CubeList()
for pair in self.pairs:
result.extend(pair.cubes)
return result
def merged(self, unique=False):
"""
Returns a new :class:`_CubeFilterCollection` by merging all the cube
lists of this collection.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
"""
return _CubeFilterCollection([pair.merged(unique) for pair in
self.pairs])
class CubeList(list):
"""
All the functionality of a standard :class:`list` with added "Cube"
context.
"""
def __new__(cls, list_of_cubes=None):
"""Given a :class:`list` of cubes, return a CubeList instance."""
cube_list = list.__new__(cls, list_of_cubes)
# Check that all items in the incoming list are cubes. Note that this
# checking does not guarantee that a CubeList instance *always* has
# just cubes in its list as the append & __getitem__ methods have not
# been overridden.
if not all([isinstance(cube, Cube) for cube in cube_list]):
raise ValueError('All items in list_of_cubes must be Cube '
'instances.')
return cube_list
def __str__(self):
"""Runs short :meth:`Cube.summary` on every cube."""
result = ['%s: %s' % (i, cube.summary(shorten=True)) for i, cube in
enumerate(self)]
if result:
result = '\n'.join(result)
else:
result = '< No cubes >'
return result
def __repr__(self):
"""Runs repr on every cube."""
return '[%s]' % ',\n'.join([repr(cube) for cube in self])
# TODO #370 Which operators need overloads?
def __add__(self, other):
return CubeList(list.__add__(self, other))
def __getitem__(self, keys):
"""x.__getitem__(y) <==> x[y]"""
result = super(CubeList, self).__getitem__(keys)
if isinstance(result, list):
result = CubeList(result)
return result
def __getslice__(self, start, stop):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
result = super(CubeList, self).__getslice__(start, stop)
result = CubeList(result)
return result
def xml(self, checksum=False, order=True, byteorder=True):
"""Return a string of the XML that this list of cubes represents."""
doc = Document()
cubes_xml_element = doc.createElement("cubes")
cubes_xml_element.setAttribute("xmlns", XML_NAMESPACE_URI)
for cube_obj in self:
cubes_xml_element.appendChild(
cube_obj._xml_element(
doc, checksum=checksum, order=order, byteorder=byteorder))
doc.appendChild(cubes_xml_element)
# return our newly created XML string
return doc.toprettyxml(indent=" ")
def extract(self, constraints, strict=False):
"""
Filter each of the cubes which can be filtered by the given
constraints.
This method iterates over each constraint given, and subsets each of
the cubes in this CubeList where possible. Thus, a CubeList of length
**n** when filtered with **m** constraints can generate a maximum of
**m * n** cubes.
Keywords:
* strict - boolean
If strict is True, then there must be exactly one cube which is
filtered per constraint.
"""
return self._extract_and_merge(self, constraints, strict,
merge_unique=None)
@staticmethod
def _extract_and_merge(cubes, constraints, strict, merge_unique=False):
# * merge_unique - if None: no merging, if false: non unique merging,
# else unique merging (see merge)
constraints = iris._constraints.list_of_constraints(constraints)
# group the resultant cubes by constraints in a dictionary
constraint_groups = dict([(constraint, CubeList()) for constraint in
constraints])
for cube in cubes:
for constraint, cube_list in six.iteritems(constraint_groups):
sub_cube = constraint.extract(cube)
if sub_cube is not None:
cube_list.append(sub_cube)
if merge_unique is not None:
for constraint, cubelist in six.iteritems(constraint_groups):
constraint_groups[constraint] = cubelist.merge(merge_unique)
result = CubeList()
for constraint in constraints:
constraint_cubes = constraint_groups[constraint]
if strict and len(constraint_cubes) != 1:
msg = 'Got %s cubes for constraint %r, ' \
'expecting 1.' % (len(constraint_cubes), constraint)
raise iris.exceptions.ConstraintMismatchError(msg)
result.extend(constraint_cubes)
if strict and len(constraints) == 1:
result = result[0]
return result
def extract_strict(self, constraints):
"""
Calls :meth:`CubeList.extract` with the strict keyword set to True.
"""
return self.extract(constraints, strict=True)
def extract_overlapping(self, coord_names):
"""
Returns a :class:`CubeList` of cubes extracted over regions
where the coordinates overlap, for the coordinates
in coord_names.
Args:
* coord_names:
A string or list of strings of the names of the coordinates
over which to perform the extraction.
"""
if isinstance(coord_names, six.string_types):
coord_names = [coord_names]
def make_overlap_fn(coord_name):
def overlap_fn(cell):
return all(cell in cube.coord(coord_name).cells()
for cube in self)
return overlap_fn
coord_values = {coord_name: make_overlap_fn(coord_name)
for coord_name in coord_names}
return self.extract(iris.Constraint(coord_values=coord_values))
def merge_cube(self):
"""
Return the merged contents of the :class:`CubeList` as a single
:class:`Cube`.
If it is not possible to merge the `CubeList` into a single
`Cube`, a :class:`~iris.exceptions.MergeError` will be raised
describing the reason for the failure.
For example:
>>> cube_1 = iris.cube.Cube([1, 2])
>>> cube_1.add_aux_coord(iris.coords.AuxCoord(0, long_name='x'))
>>> cube_2 = iris.cube.Cube([3, 4])
>>> cube_2.add_aux_coord(iris.coords.AuxCoord(1, long_name='x'))
>>> cube_2.add_dim_coord(
... iris.coords.DimCoord([0, 1], long_name='z'), 0)
>>> single_cube = iris.cube.CubeList([cube_1, cube_2]).merge_cube()
Traceback (most recent call last):
...
iris.exceptions.MergeError: failed to merge into a single cube.
Coordinates in cube.dim_coords differ: z.
Coordinate-to-dimension mapping differs for cube.dim_coords.
"""
if not self:
raise ValueError("can't merge an empty CubeList")
# Register each of our cubes with a single ProtoCube.
proto_cube = iris._merge.ProtoCube(self[0])
for cube in self[1:]:
proto_cube.register(cube, error_on_mismatch=True)
# Extract the merged cube from the ProtoCube.
merged_cube, = proto_cube.merge()
return merged_cube
def merge(self, unique=True):
"""
Returns the :class:`CubeList` resulting from merging this
:class:`CubeList`.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
This combines cubes with different values of an auxiliary scalar
coordinate, by constructing a new dimension.
.. testsetup::
import iris
c1 = iris.cube.Cube([0,1,2], long_name='some_parameter')
xco = iris.coords.DimCoord([11, 12, 13], long_name='x_vals')
c1.add_dim_coord(xco, 0)
c1.add_aux_coord(iris.coords.AuxCoord([100], long_name='y_vals'))
c2 = c1.copy()
c2.coord('y_vals').points = [200]
For example::
>>> print(c1)
some_parameter / (unknown) (x_vals: 3)
Dimension coordinates:
x_vals x
Scalar coordinates:
y_vals: 100
>>> print(c2)
some_parameter / (unknown) (x_vals: 3)
Dimension coordinates:
x_vals x
Scalar coordinates:
y_vals: 200
>>> cube_list = iris.cube.CubeList([c1, c2])
>>> new_cube = cube_list.merge()[0]
>>> print(new_cube)
some_parameter / (unknown) (y_vals: 2; x_vals: 3)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(new_cube.coord('y_vals').points)
[100 200]
>>>
Contrast this with :meth:`iris.cube.CubeList.concatenate`, which joins
cubes along an existing dimension.
.. note::
If time coordinates in the list of cubes have differing epochs then
the cubes will not be able to be merged. If this occurs, use
:func:`iris.util.unify_time_units` to normalise the epochs of the
time coordinates so that the cubes can be merged.
"""
# Register each of our cubes with its appropriate ProtoCube.
proto_cubes_by_name = {}
for cube in self:
name = cube.standard_name
proto_cubes = proto_cubes_by_name.setdefault(name, [])
proto_cube = None
for target_proto_cube in proto_cubes:
if target_proto_cube.register(cube):
proto_cube = target_proto_cube
break
if proto_cube is None:
proto_cube = iris._merge.ProtoCube(cube)
proto_cubes.append(proto_cube)
# Emulate Python 2 behaviour.
def _none_sort(item):
return (item is not None, item)
# Extract all the merged cubes from the ProtoCubes.
merged_cubes = CubeList()
for name in sorted(proto_cubes_by_name, key=_none_sort):
for proto_cube in proto_cubes_by_name[name]:
merged_cubes.extend(proto_cube.merge(unique=unique))
return merged_cubes
def concatenate_cube(self, check_aux_coords=True):
"""
Return the concatenated contents of the :class:`CubeList` as a single
:class:`Cube`.
If it is not possible to concatenate the `CubeList` into a single
`Cube`, a :class:`~iris.exceptions.ConcatenateError` will be raised
describing the reason for the failure.
Kwargs:
* check_aux_coords
Checks the auxilliary coordinates of the cubes match. This check
is not applied to auxilliary coordinates that span the dimension
the concatenation is occuring along. Defaults to True.
.. note::
Concatenation cannot occur along an anonymous dimension.
"""
if not self:
raise ValueError("can't concatenate an empty CubeList")
names = [cube.metadata.name() for cube in self]
unique_names = list(collections.OrderedDict.fromkeys(names))
if len(unique_names) == 1:
res = iris._concatenate.concatenate(
self, error_on_mismatch=True,
check_aux_coords=check_aux_coords)
n_res_cubes = len(res)
if n_res_cubes == 1:
return res[0]
else:
msgs = []
msgs.append('An unexpected problem prevented concatenation.')
msgs.append('Expected only a single cube, '
'found {}.'.format(n_res_cubes))
raise iris.exceptions.ConcatenateError(msgs)
else:
msgs = []
msgs.append('Cube names differ: {} != {}'.format(names[0],
names[1]))
raise iris.exceptions.ConcatenateError(msgs)
def concatenate(self, check_aux_coords=True):
"""
Concatenate the cubes over their common dimensions.
Kwargs:
* check_aux_coords
Checks the auxilliary coordinates of the cubes match. This check
is not applied to auxilliary coordinates that span the dimension
the concatenation is occuring along. Defaults to True.
Returns:
A new :class:`iris.cube.CubeList` of concatenated
:class:`iris.cube.Cube` instances.
This combines cubes with a common dimension coordinate, but occupying
different regions of the coordinate value. The cubes are joined across
that dimension.
.. testsetup::
import iris
import numpy as np
xco = iris.coords.DimCoord([11, 12, 13, 14], long_name='x_vals')
yco1 = iris.coords.DimCoord([4, 5], long_name='y_vals')
yco2 = iris.coords.DimCoord([7, 9, 10], long_name='y_vals')
c1 = iris.cube.Cube(np.zeros((2,4)), long_name='some_parameter')
c1.add_dim_coord(xco, 1)
c1.add_dim_coord(yco1, 0)
c2 = iris.cube.Cube(np.zeros((3,4)), long_name='some_parameter')
c2.add_dim_coord(xco, 1)
c2.add_dim_coord(yco2, 0)
For example::
>>> print(c1)
some_parameter / (unknown) (y_vals: 2; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(c1.coord('y_vals').points)
[4 5]
>>> print(c2)
some_parameter / (unknown) (y_vals: 3; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(c2.coord('y_vals').points)
[ 7 9 10]
>>> cube_list = iris.cube.CubeList([c1, c2])
>>> new_cube = cube_list.concatenate()[0]
>>> print(new_cube)
some_parameter / (unknown) (y_vals: 5; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(new_cube.coord('y_vals').points)
[ 4 5 7 9 10]
>>>
Contrast this with :meth:`iris.cube.CubeList.merge`, which makes a new
dimension from values of an auxiliary scalar coordinate.
.. note::
If time coordinates in the list of cubes have differing epochs then
the cubes will not be able to be concatenated. If this occurs, use
:func:`iris.util.unify_time_units` to normalise the epochs of the
time coordinates so that the cubes can be concatenated.
.. note::
Concatenation cannot occur along an anonymous dimension.
"""
return iris._concatenate.concatenate(self,
check_aux_coords=check_aux_coords)
def realise_data(self):
"""
Fetch 'real' data for all cubes, in a shared calculation.
This computes any lazy data, equivalent to accessing each `cube.data`.
However, lazy calculations and data fetches can be shared between the
computations, improving performance.
For example::
# Form stats.
a_std = cube_a.collapsed(['x', 'y'], iris.analysis.STD_DEV)
b_std = cube_b.collapsed(['x', 'y'], iris.analysis.STD_DEV)
ab_mean_diff = (cube_b - cube_a).collapsed(['x', 'y'],
iris.analysis.MEAN)
std_err = (a_std * a_std + b_std * b_std) ** 0.5
# Compute these stats together (avoiding multiple data passes).
CubeList([a_std, b_std, ab_mean_diff, std_err]).realise_data()
.. Note::
Cubes with non-lazy data are not affected.
"""
_lazy.co_realise_cubes(*self)
def _is_single_item(testee):
"""
Return whether this is a single item, rather than an iterable.
We count string types as 'single', also.
"""
return (isinstance(testee, six.string_types) or
not isinstance(testee, collections.Iterable))
class Cube(CFVariableMixin):
"""
A single Iris cube of data and metadata.
Typically obtained from :func:`iris.load`, :func:`iris.load_cube`,
:func:`iris.load_cubes`, or from the manipulation of existing cubes.
For example:
>>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
>>> print(cube)
air_temperature / (K) (latitude: 73; longitude: 96)
Dimension coordinates:
latitude x -
longitude - x
Scalar coordinates:
forecast_period: 6477 hours, bound=(-28083.0, 6477.0) hours
forecast_reference_time: 1998-03-01 03:00:00
pressure: 1000.0 hPa
time: 1998-12-01 00:00:00, \
bound=(1994-12-01 00:00:00, 1998-12-01 00:00:00)
Attributes:
STASH: m01s16i203
source: Data from Met Office Unified Model
Cell methods:
mean within years: time
mean over years: time
See the :doc:`user guide</userguide/index>` for more information.
"""
#: Indicates to client code that the object supports
#: "orthogonal indexing", which means that slices that are 1d arrays
#: or lists slice along each dimension independently. This behavior
#: is similar to Fortran or Matlab, but different than numpy.
__orthogonal_indexing__ = True
def __init__(self, data, standard_name=None, long_name=None,
var_name=None, units=None, attributes=None,
cell_methods=None, dim_coords_and_dims=None,
aux_coords_and_dims=None, aux_factories=None,
cell_measures_and_dims=None):
"""
Creates a cube with data and optional metadata.
Not typically used - normally cubes are obtained by loading data
(e.g. :func:`iris.load`) or from manipulating existing cubes.
Args:
* data
This object defines the shape of the cube and the phenomenon
value in each cell.
``data`` can be a dask array, a NumPy array, a NumPy array
subclass (such as :class:`numpy.ma.MaskedArray`), or
array_like (as described in :func:`numpy.asarray`).
See :attr:`Cube.data<iris.cube.Cube.data>`.
Kwargs:
* standard_name
The standard name for the Cube's data.
* long_name
An unconstrained description of the cube.
* var_name
The netCDF variable name for the cube.
* units
The unit of the cube, e.g. ``"m s-1"`` or ``"kelvin"``.
* attributes
A dictionary of cube attributes
* cell_methods
A tuple of CellMethod objects, generally set by Iris, e.g.
``(CellMethod("mean", coords='latitude'), )``.
* dim_coords_and_dims
A list of coordinates with scalar dimension mappings, e.g
``[(lat_coord, 0), (lon_coord, 1)]``.
* aux_coords_and_dims
A list of coordinates with dimension mappings,
e.g ``[(lat_coord, 0), (lon_coord, (0, 1))]``.
See also :meth:`Cube.add_dim_coord()<iris.cube.Cube.add_dim_coord>`
and :meth:`Cube.add_aux_coord()<iris.cube.Cube.add_aux_coord>`.
* aux_factories
A list of auxiliary coordinate factories. See
:mod:`iris.aux_factory`.
* cell_measures_and_dims
A list of CellMeasures with dimension mappings.
For example::
>>> from iris.coords import DimCoord
>>> from iris.cube import Cube
>>> latitude = DimCoord(np.linspace(-90, 90, 4),
... standard_name='latitude',
... units='degrees')
>>> longitude = DimCoord(np.linspace(45, 360, 8),
... standard_name='longitude',
... units='degrees')
>>> cube = Cube(np.zeros((4, 8), np.float32),
... dim_coords_and_dims=[(latitude, 0),
... (longitude, 1)])
"""
# Temporary error while we transition the API.
if isinstance(data, six.string_types):
raise TypeError('Invalid data type: {!r}.'.format(data))
# Initialise the cube data manager.
self._data_manager = DataManager(data)
#: The "standard name" for the Cube's phenomenon.
self.standard_name = standard_name
#: An instance of :class:`cf_units.Unit` describing the Cube's data.
self.units = units
#: The "long name" for the Cube's phenomenon.
self.long_name = long_name
#: The netCDF variable name for the Cube.
self.var_name = var_name
self.cell_methods = cell_methods
#: A dictionary, with a few restricted keys, for arbitrary
#: Cube metadata.
self.attributes = attributes
# Coords
self._dim_coords_and_dims = []
self._aux_coords_and_dims = []
self._aux_factories = []
# Cell Measures
self._cell_measures_and_dims = []
identities = set()
if dim_coords_and_dims:
dims = set()
for coord, dim in dim_coords_and_dims:
identity = coord.standard_name, coord.long_name
if identity not in identities and dim not in dims:
self._add_unique_dim_coord(coord, dim)
else:
self.add_dim_coord(coord, dim)
identities.add(identity)
dims.add(dim)
if aux_coords_and_dims:
for coord, dims in aux_coords_and_dims:
identity = coord.standard_name, coord.long_name
if identity not in identities:
self._add_unique_aux_coord(coord, dims)
else:
self.add_aux_coord(coord, dims)
identities.add(identity)
if aux_factories:
for factory in aux_factories:
self.add_aux_factory(factory)
if cell_measures_and_dims:
for cell_measure, dims in cell_measures_and_dims:
self.add_cell_measure(cell_measure, dims)
@property
def metadata(self):
"""
An instance of :class:`CubeMetadata` describing the phenomenon.
This property can be updated with any of:
- another :class:`CubeMetadata` instance,
- a tuple/dict which can be used to make a :class:`CubeMetadata`,
- or any object providing the attributes exposed by
:class:`CubeMetadata`.
"""
return CubeMetadata(self.standard_name, self.long_name, self.var_name,
self.units, self.attributes, self.cell_methods)
@metadata.setter
def metadata(self, value):
try:
value = CubeMetadata(**value)
except TypeError:
try:
value = CubeMetadata(*value)
except TypeError:
missing_attrs = [field for field in CubeMetadata._fields
if not hasattr(value, field)]
if missing_attrs:
raise TypeError('Invalid/incomplete metadata')
for name in CubeMetadata._fields:
setattr(self, name, getattr(value, name))
def is_compatible(self, other, ignore=None):
"""
Return whether the cube is compatible with another.
Compatibility is determined by comparing :meth:`iris.cube.Cube.name()`,
:attr:`iris.cube.Cube.units`, :attr:`iris.cube.Cube.cell_methods` and
:attr:`iris.cube.Cube.attributes` that are present in both objects.
Args:
* other:
An instance of :class:`iris.cube.Cube` or
:class:`iris.cube.CubeMetadata`.
* ignore:
A single attribute key or iterable of attribute keys to ignore when
comparing the cubes. Default is None. To ignore all attributes set
this to other.attributes.
Returns:
Boolean.
.. seealso::
:meth:`iris.util.describe_diff()`
.. note::
This function does not indicate whether the two cubes can be
merged, instead it checks only the four items quoted above for
equality. Determining whether two cubes will merge requires
additional logic that is beyond the scope of this method.
"""
compatible = (self.name() == other.name() and
self.units == other.units and
self.cell_methods == other.cell_methods)
if compatible:
common_keys = set(self.attributes).intersection(other.attributes)
if ignore is not None:
if isinstance(ignore, six.string_types):
ignore = (ignore,)
common_keys = common_keys.difference(ignore)
for key in common_keys:
if np.any(self.attributes[key] != other.attributes[key]):
compatible = False
break
return compatible
def convert_units(self, unit):
"""
Change the cube's units, converting the values in the data array.
For example, if a cube's :attr:`~iris.cube.Cube.units` are
kelvin then::
cube.convert_units('celsius')
will change the cube's :attr:`~iris.cube.Cube.units` attribute to
celsius and subtract 273.15 from each value in
:attr:`~iris.cube.Cube.data`.
.. warning::
Calling this method will trigger any deferred loading, causing
the cube's data array to be loaded into memory.
"""
# If the cube has units convert the data.
if self.units.is_unknown():
raise iris.exceptions.UnitConversionError(
'Cannot convert from unknown units. '
'The "cube.units" attribute may be set directly.')
if self.has_lazy_data():
# Make fixed copies of old + new units for a delayed conversion.
old_unit = self.units
new_unit = unit
# Define a delayed conversion operation (i.e. a callback).
def pointwise_convert(values):
return old_unit.convert(values, new_unit)
new_data = _lazy.lazy_elementwise(self.lazy_data(),
pointwise_convert)
else:
new_data = self.units.convert(self.data, unit)
self.data = new_data
self.units = unit
def add_cell_method(self, cell_method):
"""Add a :class:`~iris.coords.CellMethod` to the Cube."""
self.cell_methods += (cell_method, )
def add_aux_coord(self, coord, data_dims=None):
"""
Adds a CF auxiliary coordinate to the cube.
Args:
* coord
The :class:`iris.coords.DimCoord` or :class:`iris.coords.AuxCoord`
instance to add to the cube.
Kwargs:
* data_dims
Integer or iterable of integers giving the data dimensions spanned
by the coordinate.
Raises a ValueError if a coordinate with identical metadata already
exists on the cube.
See also :meth:`Cube.remove_coord()<iris.cube.Cube.remove_coord>`.
"""
if self.coords(coord): # TODO: just fail on duplicate object
raise ValueError('Duplicate coordinates are not permitted.')
self._add_unique_aux_coord(coord, data_dims)
def _check_multi_dim_metadata(self, metadata, data_dims):
# Convert to a tuple of integers
if data_dims is None:
data_dims = tuple()
elif isinstance(data_dims, collections.Container):
data_dims = tuple(int(d) for d in data_dims)
else:
data_dims = (int(data_dims),)
if data_dims:
if len(data_dims) != metadata.ndim:
msg = 'Invalid data dimensions: {} given, {} expected for ' \
'{!r}.'.format(len(data_dims), metadata.ndim,
metadata.name())
raise ValueError(msg)
# Check compatibility with the shape of the data
for i, dim in enumerate(data_dims):
if metadata.shape[i] != self.shape[dim]:
msg = 'Unequal lengths. Cube dimension {} => {};' \
' metadata {!r} dimension {} => {}.'
raise ValueError(msg.format(dim, self.shape[dim],
metadata.name(), i,
metadata.shape[i]))
elif metadata.shape != (1,):
msg = 'Missing data dimensions for multi-valued {} {!r}'
msg = msg.format(metadata.__class__.__name__, metadata.name())
raise ValueError(msg)
return data_dims
def _add_unique_aux_coord(self, coord, data_dims):
data_dims = self._check_multi_dim_metadata(coord, data_dims)
self._aux_coords_and_dims.append([coord, data_dims])
def add_aux_factory(self, aux_factory):
"""
Adds an auxiliary coordinate factory to the cube.
Args:
* aux_factory
The :class:`iris.aux_factory.AuxCoordFactory` instance to add.
"""
if not isinstance(aux_factory, iris.aux_factory.AuxCoordFactory):
raise TypeError('Factory must be a subclass of '
'iris.aux_factory.AuxCoordFactory.')
cube_coords = self.coords()
for dependency in aux_factory.dependencies:
ref_coord = aux_factory.dependencies[dependency]
if ref_coord is not None and ref_coord not in cube_coords:
msg = "{} coordinate for factory is not present on cube {}"
raise ValueError(msg.format(ref_coord.name(), self.name()))
self._aux_factories.append(aux_factory)
def add_cell_measure(self, cell_measure, data_dims=None):
"""
Adds a CF cell measure to the cube.
Args:
* cell_measure
The :class:`iris.coords.CellMeasure`
instance to add to the cube.
Kwargs:
* data_dims
Integer or iterable of integers giving the data dimensions spanned
by the coordinate.
Raises a ValueError if a cell_measure with identical metadata already
exists on the cube.
See also
:meth:`Cube.remove_cell_measure()<iris.cube.Cube.remove_cell_measure>`.
"""
if self.cell_measures(cell_measure):
raise ValueError('Duplicate cell_measures are not permitted.')
data_dims = self._check_multi_dim_metadata(cell_measure, data_dims)
self._cell_measures_and_dims.append([cell_measure, data_dims])
self._cell_measures_and_dims.sort(key=lambda cm_dims:
(cm_dims[0]._as_defn(), cm_dims[1]))
def add_dim_coord(self, dim_coord, data_dim):
"""
Add a CF coordinate to the cube.
Args:
* dim_coord
The :class:`iris.coords.DimCoord` instance to add to the cube.
* data_dim
Integer giving the data dimension spanned by the coordinate.
Raises a ValueError if a coordinate with identical metadata already
exists on the cube or if a coord already exists for the
given dimension.
See also :meth:`Cube.remove_coord()<iris.cube.Cube.remove_coord>`.
"""
if self.coords(dim_coord):
raise ValueError('The coordinate already exists on the cube. '
'Duplicate coordinates are not permitted.')
# Check dimension is available
if self.coords(dimensions=data_dim, dim_coords=True):
raise ValueError('A dim_coord is already associated with '
'dimension %d.' % data_dim)
self._add_unique_dim_coord(dim_coord, data_dim)
def _add_unique_dim_coord(self, dim_coord, data_dim):
if isinstance(dim_coord, iris.coords.AuxCoord):
raise ValueError('The dim_coord may not be an AuxCoord instance.')
# Convert data_dim to a single integer
if isinstance(data_dim, collections.Container):
if len(data_dim) != 1:
raise ValueError('The supplied data dimension must be a'
' single number.')
data_dim = int(list(data_dim)[0])
else:
data_dim = int(data_dim)
# Check data_dim value is valid
if data_dim < 0 or data_dim >= self.ndim:
raise ValueError('The cube does not have the specified dimension '
'(%d)' % data_dim)
# Check compatibility with the shape of the data
if dim_coord.shape[0] != self.shape[data_dim]:
msg = 'Unequal lengths. Cube dimension {} => {}; coord {!r} => {}.'
raise ValueError(msg.format(data_dim, self.shape[data_dim],
dim_coord.name(),
len(dim_coord.points)))
self._dim_coords_and_dims.append([dim_coord, int(data_dim)])
def remove_aux_factory(self, aux_factory):
"""Removes the given auxiliary coordinate factory from the cube."""
self._aux_factories.remove(aux_factory)
def _remove_coord(self, coord):
self._dim_coords_and_dims = [(coord_, dim) for coord_, dim in
self._dim_coords_and_dims if coord_
is not coord]
self._aux_coords_and_dims = [(coord_, dims) for coord_, dims in
self._aux_coords_and_dims if coord_
is not coord]
def remove_coord(self, coord):
"""
Removes a coordinate from the cube.
Args:
* coord (string or coord)
The (name of the) coordinate to remove from the cube.
See also :meth:`Cube.add_dim_coord()<iris.cube.Cube.add_dim_coord>`
and :meth:`Cube.add_aux_coord()<iris.cube.Cube.add_aux_coord>`.
"""
coord = self.coord(coord)
self._remove_coord(coord)
for factory in self.aux_factories:
factory.update(coord)
def remove_cell_measure(self, cell_measure):
"""
Removes a cell measure from the cube.
Args:
* cell_measure (CellMeasure)
The CellMeasure to remove from the cube.
See also
:meth:`Cube.add_cell_measure()<iris.cube.Cube.add_cell_measure>`
"""
self._cell_measures_and_dims = [[cell_measure_, dim] for cell_measure_,
dim in self._cell_measures_and_dims
if cell_measure_ is not cell_measure]
def replace_coord(self, new_coord):
"""
Replace the coordinate whose metadata matches the given coordinate.
"""
old_coord = self.coord(new_coord)
dims = self.coord_dims(old_coord)
was_dimensioned = old_coord in self.dim_coords
self._remove_coord(old_coord)
if was_dimensioned and isinstance(new_coord, iris.coords.DimCoord):
self.add_dim_coord(new_coord, dims[0])
else:
self.add_aux_coord(new_coord, dims)
for factory in self.aux_factories:
factory.update(old_coord, new_coord)
def coord_dims(self, coord):
"""
Returns a tuple of the data dimensions relevant to the given
coordinate.
When searching for the given coordinate in the cube the comparison is
made using coordinate metadata equality. Hence the given coordinate
instance need not exist on the cube, and may contain different
coordinate values.
Args:
* coord (string or coord)
The (name of the) coord to look for.
"""
coord = self.coord(coord)
# Search for existing coordinate (object) on the cube, faster lookup
# than equality - makes no functional difference.
matches = [(dim,) for coord_, dim in self._dim_coords_and_dims if
coord_ is coord]
if not matches:
matches = [dims for coord_, dims in self._aux_coords_and_dims if
coord_ is coord]
# Search derived aux coords
target_defn = coord._as_defn()
if not matches:
def match(factory):
return factory._as_defn() == target_defn
factories = filter(match, self._aux_factories)
matches = [factory.derived_dims(self.coord_dims) for factory in
factories]
if not matches:
raise iris.exceptions.CoordinateNotFoundError(coord.name())
return matches[0]
def cell_measure_dims(self, cell_measure):
"""
Returns a tuple of the data dimensions relevant to the given
CellMeasure.
* cell_measure
The CellMeasure to look for.
"""
# Search for existing cell measure (object) on the cube, faster lookup
# than equality - makes no functional difference.
matches = [dims for cm_, dims in self._cell_measures_and_dims if
cm_ is cell_measure]
if not matches:
raise iris.exceptions.CellMeasureNotFoundError(cell_measure.name())
return matches[0]
def aux_factory(self, name=None, standard_name=None, long_name=None,
var_name=None):
"""
Returns the single coordinate factory that matches the criteria,
or raises an error if not found.
Kwargs:
* name
If not None, matches against factory.name().
* standard_name
The CF standard name of the desired coordinate factory.
If None, does not check for standard name.
* long_name
An unconstrained description of the coordinate factory.
If None, does not check for long_name.
* var_name
The netCDF variable name of the desired coordinate factory.
If None, does not check for var_name.
.. note::
If the arguments given do not result in precisely 1 coordinate
factory being matched, an
:class:`iris.exceptions.CoordinateNotFoundError` is raised.
"""
factories = self.aux_factories
if name is not None:
factories = [factory for factory in factories if
factory.name() == name]
if standard_name is not None:
factories = [factory for factory in factories if
factory.standard_name == standard_name]
if long_name is not None:
factories = [factory for factory in factories if
factory.long_name == long_name]
if var_name is not None:
factories = [factory for factory in factories if
factory.var_name == var_name]
if len(factories) > 1:
factory_names = (factory.name() for factory in factories)
msg = 'Expected to find exactly one coordinate factory, but ' \
'found {}. They were: {}.'.format(len(factories),
', '.join(factory_names))
raise iris.exceptions.CoordinateNotFoundError(msg)
elif len(factories) == 0:
msg = 'Expected to find exactly one coordinate factory, but ' \
'found none.'
raise iris.exceptions.CoordinateNotFoundError(msg)
return factories[0]
def coords(self, name_or_coord=None, standard_name=None,
long_name=None, var_name=None, attributes=None, axis=None,
contains_dimension=None, dimensions=None, coord_system=None,
dim_coords=None):
"""
Return a list of coordinates in this cube fitting the given criteria.
Kwargs:
* name_or_coord
Either
(a) a :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name`. Defaults to value of `default`
(which itself defaults to `unknown`) as defined in
:class:`iris._cube_coord_common.CFVariableMixin`.
(b) a coordinate instance with metadata equal to that of
the desired coordinates. Accepts either a
:class:`iris.coords.DimCoord`, :class:`iris.coords.AuxCoord`,
:class:`iris.aux_factory.AuxCoordFactory`
or :class:`iris.coords.CoordDefn`.
* standard_name
The CF standard name of the desired coordinate. If None, does not
check for standard name.
* long_name
An unconstrained description of the coordinate. If None, does not
check for long_name.
* var_name
The netCDF variable name of the desired coordinate. If None, does
not check for var_name.
* attributes
A dictionary of attributes desired on the coordinates. If None,
does not check for attributes.
* axis
The desired coordinate axis, see
:func:`iris.util.guess_coord_axis`. If None, does not check for
axis. Accepts the values 'X', 'Y', 'Z' and 'T' (case-insensitive).
* contains_dimension
The desired coordinate contains the data dimension. If None, does
not check for the dimension.
* dimensions
The exact data dimensions of the desired coordinate. Coordinates
with no data dimension can be found with an empty tuple or list
(i.e. ``()`` or ``[]``). If None, does not check for dimensions.
* coord_system
Whether the desired coordinates have coordinate systems equal to
the given coordinate system. If None, no check is done.
* dim_coords
Set to True to only return coordinates that are the cube's
dimension coordinates. Set to False to only return coordinates
that are the cube's auxiliary and derived coordinates. If None,
returns all coordinates.
See also :meth:`Cube.coord()<iris.cube.Cube.coord>`.
"""
name = None
coord = None
if isinstance(name_or_coord, six.string_types):
name = name_or_coord
else:
coord = name_or_coord
coords_and_factories = []
if dim_coords in [True, None]:
coords_and_factories += list(self.dim_coords)
if dim_coords in [False, None]:
coords_and_factories += list(self.aux_coords)
coords_and_factories += list(self.aux_factories)
if name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.name() == name]
if standard_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.standard_name == standard_name]
if long_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.long_name == long_name]
if var_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.var_name == var_name]
if axis is not None:
axis = axis.upper()
guess_axis = iris.util.guess_coord_axis
coords_and_factories = [coord_ for coord_ in coords_and_factories
if guess_axis(coord_) == axis]
if attributes is not None:
if not isinstance(attributes, collections.Mapping):
msg = 'The attributes keyword was expecting a dictionary ' \
'type, but got a %s instead.' % type(attributes)
raise ValueError(msg)
def attr_filter(coord_):
return all(k in coord_.attributes and coord_.attributes[k] == v
for k, v in six.iteritems(attributes))
coords_and_factories = [coord_ for coord_ in coords_and_factories
if attr_filter(coord_)]
if coord_system is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.coord_system == coord_system]
if coord is not None:
if isinstance(coord, iris.coords.CoordDefn):
defn = coord
else:
defn = coord._as_defn()
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_._as_defn() == defn]
if contains_dimension is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if contains_dimension in
self.coord_dims(coord_)]
if dimensions is not None:
if not isinstance(dimensions, collections.Container):
dimensions = [dimensions]
dimensions = tuple(dimensions)
coords_and_factories = [coord_ for coord_ in coords_and_factories
if self.coord_dims(coord_) == dimensions]
# If any factories remain after the above filters we have to make the
# coords so they can be returned
def extract_coord(coord_or_factory):
if isinstance(coord_or_factory, iris.aux_factory.AuxCoordFactory):
coord = coord_or_factory.make_coord(self.coord_dims)
elif isinstance(coord_or_factory, iris.coords.Coord):
coord = coord_or_factory
else:
msg = 'Expected Coord or AuxCoordFactory, got ' \
'{!r}.'.format(type(coord_or_factory))
raise ValueError(msg)
return coord
coords = [extract_coord(coord_or_factory) for coord_or_factory in
coords_and_factories]
return coords
def coord(self, name_or_coord=None, standard_name=None,
long_name=None, var_name=None, attributes=None, axis=None,
contains_dimension=None, dimensions=None, coord_system=None,
dim_coords=None):
"""
Return a single coord given the same arguments as :meth:`Cube.coords`.
.. note::
If the arguments given do not result in precisely 1 coordinate
being matched, an :class:`iris.exceptions.CoordinateNotFoundError`
is raised.
.. seealso::
:meth:`Cube.coords()<iris.cube.Cube.coords>` for full keyword
documentation.
"""
coords = self.coords(name_or_coord=name_or_coord,
standard_name=standard_name,
long_name=long_name, var_name=var_name,
attributes=attributes, axis=axis,
contains_dimension=contains_dimension,
dimensions=dimensions,
coord_system=coord_system,
dim_coords=dim_coords)
if len(coords) > 1:
msg = 'Expected to find exactly 1 coordinate, but found %s. ' \
'They were: %s.' % (len(coords), ', '.join(coord.name() for
coord in coords))
raise iris.exceptions.CoordinateNotFoundError(msg)
elif len(coords) == 0:
_name = name_or_coord
if name_or_coord is not None:
if not isinstance(name_or_coord, six.string_types):
_name = name_or_coord.name()
bad_name = _name or standard_name or long_name or ''
msg = 'Expected to find exactly 1 %s coordinate, but found ' \
'none.' % bad_name
raise iris.exceptions.CoordinateNotFoundError(msg)
return coords[0]
def coord_system(self, spec=None):
"""
Find the coordinate system of the given type.
If no target coordinate system is provided then find
any available coordinate system.
Kwargs:
* spec:
The the name or type of a coordinate system subclass.
E.g. ::
cube.coord_system("GeogCS")
cube.coord_system(iris.coord_systems.GeogCS)
If spec is provided as a type it can be a superclass of
any coordinate system found.
If spec is None, then find any available coordinate
systems within the :class:`iris.cube.Cube`.
Returns:
The :class:`iris.coord_systems.CoordSystem` or None.
"""
if isinstance(spec, six.string_types) or spec is None:
spec_name = spec
else:
msg = "type %s is not a subclass of CoordSystem" % spec
assert issubclass(spec, iris.coord_systems.CoordSystem), msg
spec_name = spec.__name__
# Gather a temporary list of our unique CoordSystems.
coord_systems = ClassDict(iris.coord_systems.CoordSystem)
for coord in self.coords():
if coord.coord_system:
coord_systems.add(coord.coord_system, replace=True)
result = None
if spec_name is None:
for key in sorted(coord_systems.keys(),
key=lambda class_: class_.__name__):
result = coord_systems[key]
break
else:
result = coord_systems.get(spec_name)
return result
def cell_measures(self, name_or_cell_measure=None):
"""
Return a list of cell measures in this cube fitting the given criteria.
Kwargs:
* name_or_cell_measure
Either
(a) a :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name`. Defaults to value of `default`
(which itself defaults to `unknown`) as defined in
:class:`iris._cube_coord_common.CFVariableMixin`.
(b) a cell_measure instance with metadata equal to that of
the desired cell_measures.
See also :meth:`Cube.cell_measure()<iris.cube.Cube.cell_measure>`.
"""
name = None
if isinstance(name_or_cell_measure, six.string_types):
name = name_or_cell_measure
else:
cell_measure = name_or_cell_measure
cell_measures = []
for cm, _ in self._cell_measures_and_dims:
if name is not None:
if cm.name() == name:
cell_measures.append(cm)
elif cell_measure is not None:
if cm == cell_measure:
cell_measures.append(cm)
else:
cell_measures.append(cm)
return cell_measures
def cell_measure(self, name_or_cell_measure=None):
"""
Return a single cell_measure given the same arguments as
:meth:`Cube.cell_measures`.
.. note::
If the arguments given do not result in precisely 1 cell_measure
being matched, an :class:`iris.exceptions.CellMeasureNotFoundError`
is raised.
.. seealso::
:meth:`Cube.cell_measures()<iris.cube.Cube.cell_measures>`
for full keyword documentation.
"""
cell_measures = self.cell_measures(name_or_cell_measure)
if len(cell_measures) > 1:
msg = ('Expected to find exactly 1 cell_measure, but found {}. '
'They were: {}.')
msg = msg.format(len(cell_measures),
', '.join(cm.name() for cm in cell_measures))
raise iris.exceptions.CellMeasureNotFoundError(msg)
elif len(cell_measures) == 0:
if isinstance(name_or_cell_measure, six.string_types):
bad_name = name_or_cell_measure
else:
bad_name = (name_or_cell_measure and
name_or_cell_measure.name()) or ''
msg = 'Expected to find exactly 1 %s cell_measure, but found ' \
'none.' % bad_name
raise iris.exceptions.CellMeasureNotFoundError(msg)
return cell_measures[0]
@property
def cell_methods(self):
"""
Tuple of :class:`iris.coords.CellMethod` representing the processing
done on the phenomenon.
"""
return self._cell_methods
@cell_methods.setter
def cell_methods(self, cell_methods):
self._cell_methods = tuple(cell_methods) if cell_methods else tuple()
def core_data(self):
"""
Retrieve the data array of this :class:`~iris.cube.Cube` in its
current state, which will either be real or lazy.
If this :class:`~iris.cube.Cube` has lazy data, accessing its data
array via this method **will not** realise the data array. This means
you can perform operations using this method that work equivalently
on real or lazy data, and will maintain lazy data if present.
"""
return self._data_manager.core_data()
@property
def shape(self):
"""The shape of the data of this cube."""
return self._data_manager.shape
@property
def dtype(self):
"""
The data type of the values in the data array of this
:class:`~iris.cube.Cube`.
"""
return self._data_manager.dtype
@property
def ndim(self):
"""The number of dimensions in the data of this cube."""
return self._data_manager.ndim
def lazy_data(self):
"""
Return a "lazy array" representing the Cube data. A lazy array
describes an array whose data values have not been loaded into memory
from disk.
Accessing this method will never cause the Cube data to be loaded.
Similarly, calling methods on, or indexing, the returned Array
will not cause the Cube data to be loaded.
If the Cube data have already been loaded (for example by calling
:meth:`~iris.cube.Cube.data`), the returned Array will be a view of the
loaded cube data represented as a lazy array object. Note that this
does _not_ make the Cube data lazy again; the Cube data remains loaded
in memory.
Returns:
A lazy array, representing the Cube data.
"""
return self._data_manager.lazy_data()
@property
def data(self):
"""
The :class:`numpy.ndarray` representing the multi-dimensional data of
the cube.
.. note::
Cubes obtained from netCDF, PP, and FieldsFile files will only
populate this attribute on its first use.
To obtain the shape of the data without causing it to be loaded,
use the Cube.shape attribute.
Example::
>>> fname = iris.sample_data_path('air_temp.pp')
>>> cube = iris.load_cube(fname, 'air_temperature')
>>> # cube.data does not yet have a value.
...
>>> print(cube.shape)
(73, 96)
>>> # cube.data still does not have a value.
...
>>> cube = cube[:10, :20]
>>> # cube.data still does not have a value.
...
>>> data = cube.data
>>> # Only now is the data loaded.
...
>>> print(data.shape)
(10, 20)
"""
return self._data_manager.data
@data.setter
def data(self, data):
self._data_manager.data = data
def has_lazy_data(self):
"""
Details whether this :class:`~iris.cube.Cube` has lazy data.
Returns:
Boolean.
"""
return self._data_manager.has_lazy_data()
@property
def dim_coords(self):
"""
Return a tuple of all the dimension coordinates, ordered by dimension.
.. note::
The length of the returned tuple is not necessarily the same as
:attr:`Cube.ndim` as there may be dimensions on the cube without
dimension coordinates. It is therefore unreliable to use the
resulting tuple to identify the dimension coordinates for a given
dimension - instead use the :meth:`Cube.coord` method with the
``dimensions`` and ``dim_coords`` keyword arguments.
"""
return tuple((coord for coord, dim in
sorted(self._dim_coords_and_dims,
key=lambda co_di: (co_di[1], co_di[0].name()))))
@property
def aux_coords(self):
"""
Return a tuple of all the auxiliary coordinates, ordered by
dimension(s).
"""
return tuple((coord for coord, dims in
sorted(self._aux_coords_and_dims,
key=lambda co_di: (co_di[1], co_di[0].name()))))
@property
def derived_coords(self):
"""
Return a tuple of all the coordinates generated by the coordinate
factories.
"""
return tuple(factory.make_coord(self.coord_dims) for factory in
sorted(self.aux_factories,
key=lambda factory: factory.name()))
@property
def aux_factories(self):
"""Return a tuple of all the coordinate factories."""
return tuple(self._aux_factories)
def _summary_coord_extra(self, coord, indent):
# Returns the text needed to ensure this coordinate can be
# distinguished from all others with the same name.
extra = ''
similar_coords = self.coords(coord.name())
if len(similar_coords) > 1:
# Find all the attribute keys
keys = set()
for similar_coord in similar_coords:
keys.update(six.iterkeys(similar_coord.attributes))
# Look for any attributes that vary
vary = set()
attributes = {}
for key in keys:
for similar_coord in similar_coords:
if key not in similar_coord.attributes:
vary.add(key)
break
value = similar_coord.attributes[key]
if attributes.setdefault(key, value) != value:
vary.add(key)
break
keys = sorted(vary & set(coord.attributes.keys()))
bits = ['{}={!r}'.format(key, coord.attributes[key]) for key in
keys]
if bits:
extra = indent + ', '.join(bits)
return extra
def _summary_extra(self, coords, summary, indent):
# Where necessary, inserts extra lines into the summary to ensure
# coordinates can be distinguished.
new_summary = []
for coord, summary in zip(coords, summary):
new_summary.append(summary)
extra = self._summary_coord_extra(coord, indent)
if extra:
new_summary.append(extra)
return new_summary
def summary(self, shorten=False, name_padding=35):
"""
Unicode string summary of the Cube with name, a list of dim coord names
versus length and optionally relevant coordinate information.
"""
# Create a set to contain the axis names for each data dimension.
dim_names = [set() for dim in range(len(self.shape))]
# Add the dim_coord names that participate in the associated data
# dimensions.
for dim in range(len(self.shape)):
dim_coords = self.coords(contains_dimension=dim, dim_coords=True)
if dim_coords:
dim_names[dim].add(dim_coords[0].name())
else:
dim_names[dim].add('-- ')
# Convert axes sets to lists and sort.
dim_names = [sorted(names, key=sorted_axes) for names in dim_names]
# Generate textual summary of the cube dimensionality.
if self.shape == ():
dimension_header = 'scalar cube'
else:
dimension_header = '; '.join(
[', '.join(dim_names[dim]) +
': %d' % dim_shape for dim, dim_shape in
enumerate(self.shape)])
nameunit = '{name} / ({units})'.format(name=self.name(),
units=self.units)
cube_header = '{nameunit!s:{length}} ({dimension})'.format(
length=name_padding,
nameunit=nameunit,
dimension=dimension_header)
summary = ''
# Generate full cube textual summary.
if not shorten:
indent = 10
extra_indent = ' ' * 13
# Cache the derived coords so we can rely on consistent
# object IDs.
derived_coords = self.derived_coords
# Determine the cube coordinates that are scalar (single-valued)
# AND non-dimensioned.
dim_coords = self.dim_coords
aux_coords = self.aux_coords
all_coords = dim_coords + aux_coords + derived_coords
scalar_coords = [coord for coord in all_coords if not
self.coord_dims(coord) and coord.shape == (1,)]
# Determine the cube coordinates that are not scalar BUT
# dimensioned.
scalar_coord_ids = set(map(id, scalar_coords))
vector_dim_coords = [coord for coord in dim_coords if id(coord) not
in scalar_coord_ids]
vector_aux_coords = [coord for coord in aux_coords if id(coord) not
in scalar_coord_ids]
vector_derived_coords = [coord for coord in derived_coords if
id(coord) not in scalar_coord_ids]
# cell measures
vector_cell_measures = [cm for cm in self.cell_measures()
if cm.shape != (1,)]
# Determine the cube coordinates that don't describe the cube and
# are most likely erroneous.
vector_coords = vector_dim_coords + vector_aux_coords + \
vector_derived_coords
ok_coord_ids = scalar_coord_ids.union(set(map(id, vector_coords)))
invalid_coords = [coord for coord in all_coords if id(coord) not
in ok_coord_ids]
# Sort scalar coordinates by name.
scalar_coords.sort(key=lambda coord: coord.name())
# Sort vector coordinates by data dimension and name.
vector_dim_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
vector_aux_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
vector_derived_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
# Sort other coordinates by name.
invalid_coords.sort(key=lambda coord: coord.name())
#
# Generate textual summary of cube vector coordinates.
#
def vector_summary(vector_coords, cube_header, max_line_offset,
cell_measures=None):
"""
Generates a list of suitably aligned strings containing coord
names and dimensions indicated by one or more 'x' symbols.
.. note::
The function may need to update the cube header so this is
returned with the list of strings.
"""
if cell_measures is None:
cell_measures = []
vector_summary = []
vectors = []
# Identify offsets for each dimension text marker.
alignment = np.array([index for index, value in
enumerate(cube_header) if
value == ':'])
# Generate basic textual summary for each vector coordinate
# - WITHOUT dimension markers.
for coord in vector_coords + cell_measures:
vector_summary.append('%*s%s' % (
indent, ' ', iris.util.clip_string(coord.name())))
min_alignment = min(alignment)
# Determine whether the cube header requires realignment
# due to one or more longer vector coordinate summaries.
if max_line_offset >= min_alignment:
delta = max_line_offset - min_alignment + 5
cube_header = '%-*s (%s)' % (int(name_padding + delta),
self.name() or 'unknown',
dimension_header)
alignment += delta
if vector_coords:
# Generate full textual summary for each vector coordinate
# - WITH dimension markers.
for index, coord in enumerate(vector_coords):
dims = self.coord_dims(coord)
for dim in range(len(self.shape)):
width = alignment[dim] - len(vector_summary[index])
char = 'x' if dim in dims else '-'
line = '{pad:{width}}{char}'.format(pad=' ',
width=width,
char=char)
vector_summary[index] += line
vectors = vectors + vector_coords
if cell_measures:
# Generate full textual summary for each vector coordinate
# - WITH dimension markers.
for index, coord in enumerate(cell_measures):
dims = self.cell_measure_dims(coord)
for dim in range(len(self.shape)):
width = alignment[dim] - len(vector_summary[index])
char = 'x' if dim in dims else '-'
line = '{pad:{width}}{char}'.format(pad=' ',
width=width,
char=char)
vector_summary[index] += line
vectors = vectors + cell_measures
# Interleave any extra lines that are needed to distinguish
# the coordinates.
vector_summary = self._summary_extra(vectors,
vector_summary,
extra_indent)
return vector_summary, cube_header
# Calculate the maximum line offset.
max_line_offset = 0
for coord in all_coords:
max_line_offset = max(max_line_offset, len('%*s%s' % (
indent, ' ', iris.util.clip_string(str(coord.name())))))
if vector_dim_coords:
dim_coord_summary, cube_header = vector_summary(
vector_dim_coords, cube_header, max_line_offset)
summary += '\n Dimension coordinates:\n' + \
'\n'.join(dim_coord_summary)
if vector_aux_coords:
aux_coord_summary, cube_header = vector_summary(
vector_aux_coords, cube_header, max_line_offset)
summary += '\n Auxiliary coordinates:\n' + \
'\n'.join(aux_coord_summary)
if vector_derived_coords:
derived_coord_summary, cube_header = vector_summary(
vector_derived_coords, cube_header, max_line_offset)
summary += '\n Derived coordinates:\n' + \
'\n'.join(derived_coord_summary)
#
# Generate summary of cube cell measures attribute
#
if vector_cell_measures:
cell_measure_summary, cube_header = vector_summary(
[], cube_header, max_line_offset,
cell_measures=vector_cell_measures)
summary += '\n Cell Measures:\n'
summary += '\n'.join(cell_measure_summary)
#
# Generate textual summary of cube scalar coordinates.
#
scalar_summary = []
if scalar_coords:
for coord in scalar_coords:
if (coord.units in ['1', 'no_unit', 'unknown'] or
coord.units.is_time_reference()):
unit = ''
else:
unit = ' {!s}'.format(coord.units)
# Format cell depending on type of point and whether it
# has a bound.
coord_cell = coord.cell(0)
if isinstance(coord_cell.point, six.string_types):
# Indent string type coordinates
coord_cell_split = [iris.util.clip_string(str(item))
for item in
coord_cell.point.split('\n')]
line_sep = '\n{pad:{width}}'.format(
pad=' ', width=indent + len(coord.name()) + 2)
coord_cell_str = line_sep.join(coord_cell_split) + unit
else:
coord_cell_cpoint = coord_cell.point
coord_cell_cbound = coord_cell.bound
coord_cell_str = '{!s}{}'.format(coord_cell_cpoint,
unit)
if coord_cell_cbound is not None:
bound = '({})'.format(', '.join(str(val) for
val in coord_cell_cbound))
coord_cell_str += ', bound={}{}'.format(bound,
unit)
scalar_summary.append('{pad:{width}}{name}: {cell}'.format(
pad=' ', width=indent, name=coord.name(),
cell=coord_cell_str))
# Interleave any extra lines that are needed to distinguish
# the coordinates.
scalar_summary = self._summary_extra(scalar_coords,
scalar_summary,
extra_indent)
summary += '\n Scalar coordinates:\n' + '\n'.join(
scalar_summary)
#
# Generate summary of cube's invalid coordinates.
#
if invalid_coords:
invalid_summary = []
for coord in invalid_coords:
invalid_summary.append(
'%*s%s' % (indent, ' ', coord.name()))
# Interleave any extra lines that are needed to distinguish the
# coordinates.
invalid_summary = self._summary_extra(
invalid_coords, invalid_summary, extra_indent)
summary += '\n Invalid coordinates:\n' + \
'\n'.join(invalid_summary)
# cell measures
scalar_cell_measures = [cm for cm in self.cell_measures()
if cm.shape == (1,)]
if scalar_cell_measures:
summary += '\n Scalar cell measures:\n'
scalar_cms = [' {}'.format(cm.name())
for cm in scalar_cell_measures]
summary += '\n'.join(scalar_cms)
#
# Generate summary of cube attributes.
#
if self.attributes:
attribute_lines = []
for name, value in sorted(six.iteritems(self.attributes)):
value = iris.util.clip_string(six.text_type(value))
line = u'{pad:{width}}{name}: {value}'.format(pad=' ',
width=indent,
name=name,
value=value)
attribute_lines.append(line)
summary += '\n Attributes:\n' + '\n'.join(attribute_lines)
#
# Generate summary of cube cell methods
#
if self.cell_methods:
summary += '\n Cell methods:\n'
cm_lines = []
for cm in self.cell_methods:
cm_lines.append('%*s%s' % (indent, ' ', str(cm)))
summary += '\n'.join(cm_lines)
# Construct the final cube summary.
summary = cube_header + summary
return summary
def __str__(self):
# six has a decorator for this bit, but it doesn't do errors='replace'.
if six.PY3:
return self.summary()
else:
return self.summary().encode(errors='replace')
def __unicode__(self):
return self.summary()
def __repr__(self):
return "<iris 'Cube' of %s>" % self.summary(shorten=True,
name_padding=1)
def _repr_html_(self):
from iris.experimental.representation import CubeRepresentation
representer = CubeRepresentation(self)
return representer.repr_html()
def __iter__(self):
raise TypeError('Cube is not iterable')
def __getitem__(self, keys):
"""
Cube indexing (through use of square bracket notation) has been
implemented at the data level. That is, the indices provided to this
method should be aligned to the data of the cube, and thus the indices
requested must be applicable directly to the cube.data attribute. All
metadata will be subsequently indexed appropriately.
"""
# turn the keys into a full slice spec (all dims)
full_slice = iris.util._build_full_slice_given_keys(keys, self.ndim)
def new_coord_dims(coord_):
return [dimension_mapping[d]
for d in self.coord_dims(coord_)
if dimension_mapping[d] is not None]
def new_cell_measure_dims(cm_):
return [dimension_mapping[d]
for d in self.cell_measure_dims(cm_)
if dimension_mapping[d] is not None]
# Fetch the data as a generic array-like object.
cube_data = self._data_manager.core_data()
# Index with the keys, using orthogonal slicing.
dimension_mapping, data = iris.util._slice_data_with_keys(
cube_data, keys)
# We don't want a view of the data, so take a copy of it.
data = deepcopy(data)
# XXX: Slicing a single item from a masked array that is masked,
# results in numpy (v1.11.1) *always* returning a MaskedConstant
# with a dtype of float64, regardless of the original masked
# array dtype!
if isinstance(data, ma.core.MaskedConstant) and \
data.dtype != cube_data.dtype:
data = ma.array(data.data, mask=data.mask, dtype=cube_data.dtype)
# Make the new cube slice
cube = Cube(data)
cube.metadata = deepcopy(self.metadata)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
# Slice the coords
for coord in self.aux_coords:
coord_keys = tuple([full_slice[dim] for dim in
self.coord_dims(coord)])
try:
new_coord = coord[coord_keys]
except ValueError:
# TODO make this except more specific to catch monotonic error
# Attempt to slice it by converting to AuxCoord first
new_coord = iris.coords.AuxCoord.from_coord(coord)[coord_keys]
cube.add_aux_coord(new_coord, new_coord_dims(coord))
coord_mapping[id(coord)] = new_coord
for coord in self.dim_coords:
coord_keys = tuple([full_slice[dim] for dim in
self.coord_dims(coord)])
new_dims = new_coord_dims(coord)
# Try/Catch to handle slicing that makes the points/bounds
# non-monotonic
try:
new_coord = coord[coord_keys]
if not new_dims:
# If the associated dimension has been sliced so the coord
# is a scalar move the coord to the aux_coords container
cube.add_aux_coord(new_coord, new_dims)
else:
cube.add_dim_coord(new_coord, new_dims)
except ValueError:
# TODO make this except more specific to catch monotonic error
# Attempt to slice it by converting to AuxCoord first
new_coord = iris.coords.AuxCoord.from_coord(coord)[coord_keys]
cube.add_aux_coord(new_coord, new_dims)
coord_mapping[id(coord)] = new_coord
for factory in self.aux_factories:
cube.add_aux_factory(factory.updated(coord_mapping))
# slice the cell measures and add them to the cube
for cellmeasure in self.cell_measures():
dims = self.cell_measure_dims(cellmeasure)
cm_keys = tuple([full_slice[dim] for dim in dims])
new_cm = cellmeasure[cm_keys]
cube.add_cell_measure(new_cm,
new_cell_measure_dims(cellmeasure))
return cube
def subset(self, coord):
"""
Get a subset of the cube by providing the desired resultant
coordinate. If the coordinate provided applies to the whole cube; the
whole cube is returned. As such, the operation is not strict.
"""
if not isinstance(coord, iris.coords.Coord):
raise ValueError('coord_to_extract must be a valid Coord.')
# Get the coord to extract from the cube
coord_to_extract = self.coord(coord)
# If scalar, return the whole cube. Not possible to subset 1 point.
if coord_to_extract in self.aux_coords and\
len(coord_to_extract.points) == 1:
# Default to returning None
result = None
indices = coord_to_extract.intersect(coord, return_indices=True)
# If there is an intersect between the two scalar coordinates;
# return the whole cube. Else, return None.
if len(indices):
result = self
else:
if len(self.coord_dims(coord_to_extract)) > 1:
msg = "Currently, only 1D coords can be used to subset a cube"
raise iris.exceptions.CoordinateMultiDimError(msg)
# Identify the dimension of the cube which this coordinate
# references
coord_to_extract_dim = self.coord_dims(coord_to_extract)[0]
# Identify the indices which intersect the requested coord and
# coord_to_extract
coord_indices = coord_to_extract.intersect(coord,
return_indices=True)
# Build up a slice which spans the whole of the cube
full_slice = [slice(None, None)] * len(self.shape)
# Update the full slice to only extract specific indices which
# were identified above
full_slice[coord_to_extract_dim] = coord_indices
full_slice = tuple(full_slice)
result = self[full_slice]
return result
def extract(self, constraint):
"""
Filter the cube by the given constraint using
:meth:`iris.Constraint.extract` method.
"""
# Cast the constraint into a proper constraint if it is not so already
constraint = iris._constraints.as_constraint(constraint)
return constraint.extract(self)
def intersection(self, *args, **kwargs):
"""
Return the intersection of the cube with specified coordinate
ranges.
Coordinate ranges can be specified as:
(a) instances of :class:`iris.coords.CoordExtent`.
(b) keyword arguments, where the keyword name specifies the name
of the coordinate (as defined in :meth:`iris.cube.Cube.coords()`)
and the value defines the corresponding range of coordinate
values as a tuple. The tuple must contain two, three, or four
items corresponding to: (minimum, maximum, min_inclusive,
max_inclusive). Where the items are defined as:
* minimum
The minimum value of the range to select.
* maximum
The maximum value of the range to select.
* min_inclusive
If True, coordinate values equal to `minimum` will be included
in the selection. Default is True.
* max_inclusive
If True, coordinate values equal to `maximum` will be included
in the selection. Default is True.
To perform an intersection that ignores any bounds on the coordinates,
set the optional keyword argument *ignore_bounds* to True. Defaults to
False.
.. note::
For ranges defined over "circular" coordinates (i.e. those
where the `units` attribute has a modulus defined) the cube
will be "rolled" to fit where neccesary.
.. warning::
Currently this routine only works with "circular"
coordinates (as defined in the previous note.)
For example::
>>> import iris
>>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
>>> print(cube.coord('longitude').points[::10])
[ 0. 37.49999237 74.99998474 112.49996948 \
149.99996948
187.49995422 224.99993896 262.49993896 299.99993896 \
337.49990845]
>>> subset = cube.intersection(longitude=(30, 50))
>>> print(subset.coord('longitude').points)
[ 33.74999237 37.49999237 41.24998856 44.99998856 48.74998856]
>>> subset = cube.intersection(longitude=(-10, 10))
>>> print(subset.coord('longitude').points)
[-7.50012207 -3.75012207 0. 3.75 7.5 ]
Returns:
A new :class:`~iris.cube.Cube` giving the subset of the cube
which intersects with the requested coordinate intervals.
"""
result = self
ignore_bounds = kwargs.pop('ignore_bounds', False)
for arg in args:
result = result._intersect(*arg, ignore_bounds=ignore_bounds)
for name, value in six.iteritems(kwargs):
result = result._intersect(name, *value,
ignore_bounds=ignore_bounds)
return result
def _intersect(self, name_or_coord, minimum, maximum,
min_inclusive=True, max_inclusive=True,
ignore_bounds=False):
coord = self.coord(name_or_coord)
if coord.ndim != 1:
raise iris.exceptions.CoordinateMultiDimError(coord)
if coord.nbounds not in (0, 2):
raise ValueError('expected 0 or 2 bound values per cell')
if minimum > maximum:
raise ValueError('minimum greater than maximum')
modulus = coord.units.modulus
if modulus is None:
raise ValueError('coordinate units with no modulus are not yet'
' supported')
subsets, points, bounds = self._intersect_modulus(coord,
minimum, maximum,
min_inclusive,
max_inclusive,
ignore_bounds)
# By this point we have either one or two subsets along the relevant
# dimension. If it's just one subset (which might be a slice or an
# unordered collection of indices) we can simply index the cube
# and we're done. If it's two subsets we need to stitch the two
# pieces together.
# subsets provides a way of slicing the coordinates to ensure that
# they remain contiguous. In doing so, this can mean
# transforming the data (this stitching together of two separate
# pieces).
def make_chunk(key):
chunk = self[key_tuple_prefix + (key,)]
chunk_coord = chunk.coord(coord)
chunk_coord.points = points[(key,)]
if chunk_coord.has_bounds():
chunk_coord.bounds = bounds[(key,)]
return chunk
dim, = self.coord_dims(coord)
key_tuple_prefix = (slice(None),) * dim
chunks = [make_chunk(key) for key in subsets]
if len(chunks) == 1:
result = chunks[0]
else:
chunk_data = [chunk.core_data() for chunk in chunks]
if self.has_lazy_data():
func = da.concatenate
else:
module = ma if ma.isMaskedArray(self.data) else np
func = module.concatenate
data = func(chunk_data, dim)
result = iris.cube.Cube(data)
result.metadata = deepcopy(self.metadata)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
def create_coords(src_coords, add_coord):
# Add copies of the source coordinates, selecting
# the appropriate subsets out of coordinates which
# share the intersection dimension.
preserve_circular = (min_inclusive and max_inclusive and
abs(maximum - minimum) == modulus)
for src_coord in src_coords:
dims = self.coord_dims(src_coord)
if dim in dims:
dim_within_coord = dims.index(dim)
points = np.concatenate([chunk.coord(src_coord).points
for chunk in chunks],
dim_within_coord)
if src_coord.has_bounds():
bounds = np.concatenate(
[chunk.coord(src_coord).bounds
for chunk in chunks],
dim_within_coord)
else:
bounds = None
result_coord = src_coord.copy(points=points,
bounds=bounds)
circular = getattr(result_coord, 'circular', False)
if circular and not preserve_circular:
result_coord.circular = False
else:
result_coord = src_coord.copy()
add_coord(result_coord, dims)
coord_mapping[id(src_coord)] = result_coord
create_coords(self.dim_coords, result.add_dim_coord)
create_coords(self.aux_coords, result.add_aux_coord)
for factory in self.aux_factories:
result.add_aux_factory(factory.updated(coord_mapping))
return result
def _intersect_derive_subset(self, coord, points, bounds, inside_indices):
# Return the subsets, i.e. the means to allow the slicing of
# coordinates to ensure that they remain contiguous.
modulus = coord.units.modulus
delta = coord.points[inside_indices] - points[inside_indices]
step = np.rint(np.diff(delta) / modulus)
non_zero_step_indices = np.nonzero(step)[0]
def dim_coord_subset():
"""
Derive the subset for dimension coordinates.
Ensure that we do not wrap if blocks are at the very edge. That
is, if the very edge is wrapped and corresponds to base + period,
stop this unnecessary wraparound.
"""
# A contiguous block at the start and another at the end.
# (NB. We can't have more than two blocks because we've already
# restricted the coordinate's range to its modulus).
end_of_first_chunk = non_zero_step_indices[0]
index_of_second_chunk = inside_indices[end_of_first_chunk + 1]
final_index = points.size - 1
# Condition1: The two blocks don't themselves wrap
# (inside_indices is contiguous).
# Condition2: Are we chunked at either extreme edge.
edge_wrap = ((index_of_second_chunk ==
inside_indices[end_of_first_chunk] + 1) and
index_of_second_chunk in (final_index, 1))
subsets = None
if edge_wrap:
# Increasing coord
if coord.points[-1] > coord.points[0]:
index_end = -1
index_start = 0
# Decreasing coord
else:
index_end = 0
index_start = -1
# Unwrap points and bounds (if present and equal base + period)
if bounds is not None:
edge_equal_base_period = (
np.isclose(coord.bounds[index_end, index_end],
coord.bounds[index_start, index_start] +
modulus))
if edge_equal_base_period:
bounds[index_end, :] = coord.bounds[index_end, :]
else:
edge_equal_base_period = (
np.isclose(coord.points[index_end],
coord.points[index_start] +
modulus))
if edge_equal_base_period:
points[index_end] = coord.points[index_end]
subsets = [slice(inside_indices[0],
inside_indices[-1] + 1)]
# Either no edge wrap or edge wrap != base + period
# i.e. derive subset without alteration
if subsets is None:
subsets = [
slice(index_of_second_chunk, None),
slice(None, inside_indices[end_of_first_chunk] + 1)
]
return subsets
if isinstance(coord, iris.coords.DimCoord):
if non_zero_step_indices.size:
subsets = dim_coord_subset()
else:
# A single, contiguous block.
subsets = [slice(inside_indices[0], inside_indices[-1] + 1)]
else:
# An AuxCoord could have its values in an arbitrary
# order, and hence a range of values can select an
# arbitrary subset. Also, we want to preserve the order
# from the original AuxCoord. So we just use the indices
# directly.
subsets = [inside_indices]
return subsets
def _intersect_modulus(self, coord, minimum, maximum, min_inclusive,
max_inclusive, ignore_bounds):
modulus = coord.units.modulus
if maximum > minimum + modulus:
raise ValueError("requested range greater than coordinate's"
" unit's modulus")
if coord.has_bounds():
values = coord.bounds
else:
values = coord.points
if values.max() > values.min() + modulus:
raise ValueError("coordinate's range greater than coordinate's"
" unit's modulus")
min_comp = np.less_equal if min_inclusive else np.less
max_comp = np.less_equal if max_inclusive else np.less
if coord.has_bounds():
bounds = wrap_lons(coord.bounds, minimum, modulus)
if ignore_bounds:
points = wrap_lons(coord.points, minimum, modulus)
inside_indices, = np.where(
np.logical_and(min_comp(minimum, points),
max_comp(points, maximum)))
else:
inside = np.logical_and(min_comp(minimum, bounds),
max_comp(bounds, maximum))
inside_indices, = np.where(np.any(inside, axis=1))
# To ensure that bounds (and points) of matching cells aren't
# "scrambled" by the wrap operation we detect split cells that
# straddle the wrap point and choose a new wrap point which avoids
# split cells.
# For example: the cell [349.875, 350.4375] wrapped at -10 would
# become [349.875, -9.5625] which is no longer valid. The lower
# cell bound value (and possibly associated point) are
# recalculated so that they are consistent with the extended
# wrapping scheme which moves the wrap point to the correct lower
# bound value (-10.125) thus resulting in the cell no longer
# being split. For bounds which may extend exactly the length of
# the modulus, we simply preserve the point to bound difference,
# and call the new bounds = the new points + the difference.
pre_wrap_delta = np.diff(coord.bounds[inside_indices])
post_wrap_delta = np.diff(bounds[inside_indices])
close_enough = np.allclose(pre_wrap_delta, post_wrap_delta)
if not close_enough:
split_cell_indices, _ = np.where(pre_wrap_delta !=
post_wrap_delta)
# Recalculate the extended minimum.
indices = inside_indices[split_cell_indices]
cells = bounds[indices]
cells_delta = np.diff(coord.bounds[indices])
# Watch out for ascending/descending bounds
if cells_delta[0, 0] > 0:
cells[:, 0] = cells[:, 1] - cells_delta[:, 0]
minimum = np.min(cells[:, 0])
else:
cells[:, 1] = cells[:, 0] + cells_delta[:, 0]
minimum = np.min(cells[:, 1])
points = wrap_lons(coord.points, minimum, modulus)
bound_diffs = coord.points[:, np.newaxis] - coord.bounds
bounds = points[:, np.newaxis] - bound_diffs
else:
points = wrap_lons(coord.points, minimum, modulus)
bounds = None
inside_indices, = np.where(
np.logical_and(min_comp(minimum, points),
max_comp(points, maximum)))
# Determine the subsets
subsets = self._intersect_derive_subset(coord, points, bounds,
inside_indices)
return subsets, points, bounds
def _as_list_of_coords(self, names_or_coords):
"""
Convert a name, coord, or list of names/coords to a list of coords.
"""
# If not iterable, convert to list of a single item
if _is_single_item(names_or_coords):
names_or_coords = [names_or_coords]
coords = []
for name_or_coord in names_or_coords:
if (isinstance(name_or_coord, six.string_types) or
isinstance(name_or_coord, iris.coords.Coord)):
coords.append(self.coord(name_or_coord))
else:
# Don't know how to handle this type
msg = ("Don't know how to handle coordinate of type %s. "
"Ensure all coordinates are of type six.string_types "
"or iris.coords.Coord.") % (type(name_or_coord), )
raise TypeError(msg)
return coords
def slices_over(self, ref_to_slice):
"""
Return an iterator of all subcubes along a given coordinate or
dimension index, or multiple of these.
Args:
* ref_to_slice (string, coord, dimension index or a list of these):
Determines which dimensions will be iterated along (i.e. the
dimensions that are not returned in the subcubes).
A mix of input types can also be provided.
Returns:
An iterator of subcubes.
For example, to get all subcubes along the time dimension::
for sub_cube in cube.slices_over('time'):
print(sub_cube)
.. seealso:: :meth:`iris.cube.Cube.slices`.
.. note::
The order of dimension references to slice along does not affect
the order of returned items in the iterator; instead the ordering
is based on the fastest-changing dimension.
"""
# Required to handle a mix between types.
if _is_single_item(ref_to_slice):
ref_to_slice = [ref_to_slice]
slice_dims = set()
for ref in ref_to_slice:
try:
coord, = self._as_list_of_coords(ref)
except TypeError:
dim = int(ref)
if dim < 0 or dim > self.ndim:
msg = ('Requested an iterator over a dimension ({}) '
'which does not exist.'.format(dim))
raise ValueError(msg)
# Convert coord index to a single-element list to prevent a
# TypeError when `slice_dims.update` is called with it.
dims = [dim]
else:
dims = self.coord_dims(coord)
slice_dims.update(dims)
all_dims = set(range(self.ndim))
opposite_dims = list(all_dims - slice_dims)
return self.slices(opposite_dims, ordered=False)
def slices(self, ref_to_slice, ordered=True):
"""
Return an iterator of all subcubes given the coordinates or dimension
indices desired to be present in each subcube.
Args:
* ref_to_slice (string, coord, dimension index or a list of these):
Determines which dimensions will be returned in the subcubes (i.e.
the dimensions that are not iterated over).
A mix of input types can also be provided. They must all be
orthogonal (i.e. point to different dimensions).
Kwargs:
* ordered: if True, the order which the coords to slice or data_dims
are given will be the order in which they represent the data in
the resulting cube slices. If False, the order will follow that of
the source cube. Default is True.
Returns:
An iterator of subcubes.
For example, to get all 2d longitude/latitude subcubes from a
multi-dimensional cube::
for sub_cube in cube.slices(['longitude', 'latitude']):
print(sub_cube)
.. seealso:: :meth:`iris.cube.Cube.slices_over`.
"""
if not isinstance(ordered, bool):
raise TypeError("'ordered' argument to slices must be boolean.")
# Required to handle a mix between types
if _is_single_item(ref_to_slice):
ref_to_slice = [ref_to_slice]
dim_to_slice = []
for ref in ref_to_slice:
try:
# attempt to handle as coordinate
coord = self._as_list_of_coords(ref)[0]
dims = self.coord_dims(coord)
if not dims:
msg = ('Requested an iterator over a coordinate ({}) '
'which does not describe a dimension.')
msg = msg.format(coord.name())
raise ValueError(msg)
dim_to_slice.extend(dims)
except TypeError:
try:
# attempt to handle as dimension index
dim = int(ref)
except ValueError:
raise ValueError('{} Incompatible type {} for '
'slicing'.format(ref, type(ref)))
if dim < 0 or dim > self.ndim:
msg = ('Requested an iterator over a dimension ({}) '
'which does not exist.'.format(dim))
raise ValueError(msg)
dim_to_slice.append(dim)
if len(set(dim_to_slice)) != len(dim_to_slice):
msg = 'The requested coordinates are not orthogonal.'
raise ValueError(msg)
# Create a list with of the shape of our data
dims_index = list(self.shape)
# Set the dimensions which have been requested to length 1
for d in dim_to_slice:
dims_index[d] = 1
return _SliceIterator(self, dims_index, dim_to_slice, ordered)
def transpose(self, new_order=None):
"""
Re-order the data dimensions of the cube in-place.
new_order - list of ints, optional
By default, reverse the dimensions, otherwise permute the
axes according to the values given.
.. note:: If defined, new_order must span all of the data dimensions.
Example usage::
# put the second dimension first, followed by the third dimension,
and finally put the first dimension third::
>>> cube.transpose([1, 2, 0])
"""
if new_order is None:
new_order = np.arange(self.ndim)[::-1]
# `new_order` must be an iterable for checking with `self.ndim`.
# Dask transpose only supports lists, so ensure `new_order` is
# always a list.
new_order = list(new_order)
if len(new_order) != self.ndim:
raise ValueError('Incorrect number of dimensions.')
# Transpose the data payload.
dm = self._data_manager
data = dm.core_data().transpose(new_order)
self._data_manager = DataManager(data)
dim_mapping = {src: dest for dest, src in enumerate(new_order)}
def remap_dim_coord(coord_and_dim):
coord, dim = coord_and_dim
return coord, dim_mapping[dim]
self._dim_coords_and_dims = list(map(remap_dim_coord,
self._dim_coords_and_dims))
def remap_aux_coord(coord_and_dims):
coord, dims = coord_and_dims
return coord, tuple(dim_mapping[dim] for dim in dims)
self._aux_coords_and_dims = list(map(remap_aux_coord,
self._aux_coords_and_dims))
def xml(self, checksum=False, order=True, byteorder=True):
"""
Returns a fully valid CubeML string representation of the Cube.
"""
doc = Document()
cube_xml_element = self._xml_element(doc, checksum=checksum,
order=order,
byteorder=byteorder)
cube_xml_element.setAttribute("xmlns", XML_NAMESPACE_URI)
doc.appendChild(cube_xml_element)
# Print our newly created XML
return doc.toprettyxml(indent=" ")
def _xml_element(self, doc, checksum=False, order=True, byteorder=True):
cube_xml_element = doc.createElement("cube")
if self.standard_name:
cube_xml_element.setAttribute('standard_name', self.standard_name)
if self.long_name:
cube_xml_element.setAttribute('long_name', self.long_name)
if self.var_name:
cube_xml_element.setAttribute('var_name', self.var_name)
cube_xml_element.setAttribute('units', str(self.units))
cube_xml_element.setAttribute('dtype', self.dtype.name)
if self.attributes:
attributes_element = doc.createElement('attributes')
for name in sorted(six.iterkeys(self.attributes)):
attribute_element = doc.createElement('attribute')
attribute_element.setAttribute('name', name)
value = self.attributes[name]
# Strict check because we don't want namedtuples.
if type(value) in (list, tuple):
delimiter = '[]' if isinstance(value, list) else '()'
value = ', '.join(("'%s'"
if isinstance(item, six.string_types)
else '%s') % (item, ) for item in value)
value = delimiter[0] + value + delimiter[1]
else:
value = str(value)
attribute_element.setAttribute('value', value)
attributes_element.appendChild(attribute_element)
cube_xml_element.appendChild(attributes_element)
coords_xml_element = doc.createElement("coords")
for coord in sorted(self.coords(), key=lambda coord: coord.name()):
# make a "cube coordinate" element which holds the dimensions (if
# appropriate) which itself will have a sub-element of the
# coordinate instance itself.
cube_coord_xml_element = doc.createElement("coord")
coords_xml_element.appendChild(cube_coord_xml_element)
dims = list(self.coord_dims(coord))
if dims:
cube_coord_xml_element.setAttribute("datadims", repr(dims))
coord_xml_element = coord.xml_element(doc)
cube_coord_xml_element.appendChild(coord_xml_element)
cube_xml_element.appendChild(coords_xml_element)
# cell methods (no sorting!)
cell_methods_xml_element = doc.createElement("cellMethods")
for cm in self.cell_methods:
cell_method_xml_element = cm.xml_element(doc)
cell_methods_xml_element.appendChild(cell_method_xml_element)
cube_xml_element.appendChild(cell_methods_xml_element)
data_xml_element = doc.createElement("data")
data_xml_element.setAttribute("shape", str(self.shape))
# NB. Getting a checksum triggers any deferred loading,
# in which case it also has the side-effect of forcing the
# byte order to be native.
if checksum:
data = self.data
# Ensure consistent memory layout for checksums.
def normalise(data):
data = np.ascontiguousarray(data)
if data.dtype.newbyteorder('<') != data.dtype:
data = data.byteswap(False)
data.dtype = data.dtype.newbyteorder('<')
return data
if ma.isMaskedArray(data):
# Fill in masked values to avoid the checksum being
# sensitive to unused numbers. Use a fixed value so
# a change in fill_value doesn't affect the
# checksum.
crc = '0x%08x' % (
zlib.crc32(normalise(data.filled(0))) & 0xffffffff, )
data_xml_element.setAttribute("checksum", crc)
if ma.is_masked(data):
crc = '0x%08x' % (
zlib.crc32(normalise(data.mask)) & 0xffffffff, )
else:
crc = 'no-masked-elements'
data_xml_element.setAttribute("mask_checksum", crc)
else:
crc = '0x%08x' % (zlib.crc32(normalise(data)) & 0xffffffff, )
data_xml_element.setAttribute("checksum", crc)
elif self.has_lazy_data():
data_xml_element.setAttribute("state", "deferred")
else:
data_xml_element.setAttribute("state", "loaded")
# Add the dtype, and also the array and mask orders if the
# data is loaded.
if not self.has_lazy_data():
data = self.data
dtype = data.dtype
def _order(array):
order = ''
if array.flags['C_CONTIGUOUS']:
order = 'C'
elif array.flags['F_CONTIGUOUS']:
order = 'F'
return order
if order:
data_xml_element.setAttribute('order', _order(data))
# NB. dtype.byteorder can return '=', which is bad for
# cross-platform consistency - so we use dtype.str
# instead.
if byteorder:
array_byteorder = {'>': 'big', '<': 'little'}.get(dtype.str[0])
if array_byteorder is not None:
data_xml_element.setAttribute('byteorder', array_byteorder)
if order and ma.isMaskedArray(data):
data_xml_element.setAttribute('mask_order',
_order(data.mask))
else:
dtype = self.lazy_data().dtype
data_xml_element.setAttribute('dtype', dtype.name)
cube_xml_element.appendChild(data_xml_element)
return cube_xml_element
def copy(self, data=None):
"""
Returns a deep copy of this cube.
Kwargs:
* data:
Replace the data of the cube copy with provided data payload.
Returns:
A copy instance of the :class:`Cube`.
"""
memo = {}
cube = self._deepcopy(memo, data=data)
return cube
def __copy__(self):
"""Shallow copying is disallowed for Cubes."""
raise copy.Error("Cube shallow-copy not allowed. Use deepcopy() or "
"Cube.copy()")
def __deepcopy__(self, memo):
return self._deepcopy(memo)
def _deepcopy(self, memo, data=None):
dm = self._data_manager.copy(data=data)
new_dim_coords_and_dims = deepcopy(self._dim_coords_and_dims, memo)
new_aux_coords_and_dims = deepcopy(self._aux_coords_and_dims, memo)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
for old_pair, new_pair in zip(self._dim_coords_and_dims,
new_dim_coords_and_dims):
coord_mapping[id(old_pair[0])] = new_pair[0]
for old_pair, new_pair in zip(self._aux_coords_and_dims,
new_aux_coords_and_dims):
coord_mapping[id(old_pair[0])] = new_pair[0]
new_cube = Cube(dm.core_data(),
dim_coords_and_dims=new_dim_coords_and_dims,
aux_coords_and_dims=new_aux_coords_and_dims)
new_cube.metadata = deepcopy(self.metadata, memo)
for factory in self.aux_factories:
new_cube.add_aux_factory(factory.updated(coord_mapping))
return new_cube
# START OPERATOR OVERLOADS
def __eq__(self, other):
result = NotImplemented
if isinstance(other, Cube):
result = self.metadata == other.metadata
# having checked the metadata, now check the coordinates
if result:
coord_comparison = iris.analysis.coord_comparison(self, other)
# if there are any coordinates which are not equal
result = not (coord_comparison['not_equal'] or
coord_comparison['non_equal_data_dimension'])
# having checked everything else, check approximate data
# equality - loading the data if has not already been loaded.
if result:
result = np.all(np.abs(self.data - other.data) < 1e-8)
return result
# Must supply __ne__, Python does not defer to __eq__ for negative equality
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
# Must supply __hash__ as Python 3 does not enable it if __eq__ is defined.
# NOTE: Violates "objects which compare equal must have the same hash".
# We ought to remove this, as equality of two cube can *change*, so they
# really should not be hashable.
# However, current code needs it, e.g. so we can put them in sets.
# Fixing it will require changing those uses. See #962 and #1772.
def __hash__(self):
return hash(id(self))
def __add__(self, other):
return iris.analysis.maths.add(self, other)
def __iadd__(self, other):
return iris.analysis.maths.add(self, other, in_place=True)
__radd__ = __add__
def __sub__(self, other):
return iris.analysis.maths.subtract(self, other)
def __isub__(self, other):
return iris.analysis.maths.subtract(self, other, in_place=True)
__mul__ = iris.analysis.maths.multiply
__rmul__ = iris.analysis.maths.multiply
def __imul__(self, other):
return iris.analysis.maths.multiply(self, other, in_place=True)
__div__ = iris.analysis.maths.divide
def __idiv__(self, other):
return iris.analysis.maths.divide(self, other, in_place=True)
__truediv__ = iris.analysis.maths.divide
def __itruediv__(self, other):
return iris.analysis.maths.divide(self, other, in_place=True)
__pow__ = iris.analysis.maths.exponentiate
# END OPERATOR OVERLOADS
def collapsed(self, coords, aggregator, **kwargs):
"""
Collapse one or more dimensions over the cube given the coordinate/s
and an aggregation.
Examples of aggregations that may be used include
:data:`~iris.analysis.COUNT` and :data:`~iris.analysis.MAX`.
Weighted aggregations (:class:`iris.analysis.WeightedAggregator`) may
also be supplied. These include :data:`~iris.analysis.MEAN` and
sum :data:`~iris.analysis.SUM`.
Weighted aggregations support an optional *weights* keyword argument.
If set, this should be supplied as an array of weights whose shape
matches the cube. Values for latitude-longitude area weights may be
calculated using :func:`iris.analysis.cartography.area_weights`.
Some Iris aggregators support "lazy" evaluation, meaning that
cubes resulting from this method may represent data arrays which are
not computed until the data is requested (e.g. via ``cube.data`` or
``iris.save``). If lazy evaluation exists for the given aggregator
it will be used wherever possible when this cube's data is itself
a deferred array.
Args:
* coords (string, coord or a list of strings/coords):
Coordinate names/coordinates over which the cube should be
collapsed.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied for collapse operation.
Kwargs:
* kwargs:
Aggregation function keyword arguments.
Returns:
Collapsed cube.
For example:
>>> import iris
>>> import iris.analysis
>>> path = iris.sample_data_path('ostia_monthly.nc')
>>> cube = iris.load_cube(path)
>>> new_cube = cube.collapsed('longitude', iris.analysis.MEAN)
>>> print(new_cube)
surface_temperature / (K) (time: 54; latitude: 18)
Dimension coordinates:
time x -
latitude - x
Auxiliary coordinates:
forecast_reference_time x -
Scalar coordinates:
forecast_period: 0 hours
longitude: 180.0 degrees, bound=(0.0, 360.0) degrees
Attributes:
Conventions: CF-1.5
STASH: m01s00i024
Cell methods:
mean: month, year
mean: longitude
.. note::
Some aggregations are not commutative and hence the order of
processing is important i.e.::
tmp = cube.collapsed('realization', iris.analysis.VARIANCE)
result = tmp.collapsed('height', iris.analysis.VARIANCE)
is not necessarily the same result as::
tmp = cube.collapsed('height', iris.analysis.VARIANCE)
result2 = tmp.collapsed('realization', iris.analysis.VARIANCE)
Conversely operations which operate on more than one coordinate
at the same time are commutative as they are combined internally
into a single operation. Hence the order of the coordinates
supplied in the list does not matter::
cube.collapsed(['longitude', 'latitude'],
iris.analysis.VARIANCE)
is the same (apart from the logically equivalent cell methods that
may be created etc.) as::
cube.collapsed(['latitude', 'longitude'],
iris.analysis.VARIANCE)
"""
# Convert any coordinate names to coordinates
coords = self._as_list_of_coords(coords)
if (isinstance(aggregator, iris.analysis.WeightedAggregator) and
not aggregator.uses_weighting(**kwargs)):
msg = "Collapsing spatial coordinate {!r} without weighting"
lat_match = [coord for coord in coords
if 'latitude' in coord.name()]
if lat_match:
for coord in lat_match:
warnings.warn(msg.format(coord.name()))
# Determine the dimensions we need to collapse (and those we don't)
if aggregator.cell_method == 'peak':
dims_to_collapse = [list(self.coord_dims(coord))
for coord in coords]
# Remove duplicate dimensions.
new_dims = collections.OrderedDict.fromkeys(
d for dim in dims_to_collapse for d in dim)
# Reverse the dimensions so the order can be maintained when
# reshaping the data.
dims_to_collapse = list(new_dims)[::-1]
else:
dims_to_collapse = set()
for coord in coords:
dims_to_collapse.update(self.coord_dims(coord))
if not dims_to_collapse:
msg = 'Cannot collapse a dimension which does not describe any ' \
'data.'
raise iris.exceptions.CoordinateCollapseError(msg)
untouched_dims = set(range(self.ndim)) - set(dims_to_collapse)
# Remove the collapsed dimension(s) from the metadata
indices = [slice(None, None)] * self.ndim
for dim in dims_to_collapse:
indices[dim] = 0
collapsed_cube = self[tuple(indices)]
# Collapse any coords that span the dimension(s) being collapsed
for coord in self.dim_coords + self.aux_coords:
coord_dims = self.coord_dims(coord)
if set(dims_to_collapse).intersection(coord_dims):
local_dims = [coord_dims.index(dim) for dim in
dims_to_collapse if dim in coord_dims]
collapsed_cube.replace_coord(coord.collapsed(local_dims))
untouched_dims = sorted(untouched_dims)
# Record the axis(s) argument passed to 'aggregation', so the same is
# passed to the 'update_metadata' function.
collapse_axis = -1
data_result = None
# Perform the actual aggregation.
if aggregator.cell_method == 'peak':
# The PEAK aggregator must collapse each coordinate separately.
untouched_shape = [self.shape[d] for d in untouched_dims]
collapsed_shape = [self.shape[d] for d in dims_to_collapse]
new_shape = untouched_shape + collapsed_shape
array_dims = untouched_dims + dims_to_collapse
unrolled_data = np.transpose(
self.data, array_dims).reshape(new_shape)
for dim in dims_to_collapse:
unrolled_data = aggregator.aggregate(unrolled_data,
axis=-1,
**kwargs)
data_result = unrolled_data
# Perform the aggregation in lazy form if possible.
elif (aggregator.lazy_func is not None and self.has_lazy_data()):
# Use a lazy operation separately defined by the aggregator, based
# on the cube lazy array.
# NOTE: do not reform the data in this case, as 'lazy_aggregate'
# accepts multiple axes (unlike 'aggregate').
collapse_axis = list(dims_to_collapse)
try:
data_result = aggregator.lazy_aggregate(self.lazy_data(),
axis=collapse_axis,
**kwargs)
except TypeError:
# TypeError - when unexpected keywords passed through (such as
# weights to mean)
pass
# If we weren't able to complete a lazy aggregation, compute it
# directly now.
if data_result is None:
# Perform the (non-lazy) aggregation over the cube data
# First reshape the data so that the dimensions being aggregated
# over are grouped 'at the end' (i.e. axis=-1).
dims_to_collapse = sorted(dims_to_collapse)
end_size = reduce(operator.mul, (self.shape[dim] for dim in
dims_to_collapse))
untouched_shape = [self.shape[dim] for dim in untouched_dims]
new_shape = untouched_shape + [end_size]
dims = untouched_dims + dims_to_collapse
unrolled_data = np.transpose(self.data, dims).reshape(new_shape)
# Perform the same operation on the weights if applicable
if kwargs.get("weights") is not None:
weights = kwargs["weights"].view()
kwargs["weights"] = np.transpose(weights,
dims).reshape(new_shape)
data_result = aggregator.aggregate(unrolled_data,
axis=-1,
**kwargs)
aggregator.update_metadata(collapsed_cube, coords, axis=collapse_axis,
**kwargs)
result = aggregator.post_process(collapsed_cube, data_result, coords,
**kwargs)
return result
def aggregated_by(self, coords, aggregator, **kwargs):
"""
Perform aggregation over the cube given one or more "group
coordinates".
A "group coordinate" is a coordinate where repeating values represent a
single group, such as a month coordinate on a daily time slice.
Repeated values will form a group even if they are not consecutive.
The group coordinates must all be over the same cube dimension. Each
common value group identified over all the group-by coordinates is
collapsed using the provided aggregator.
Args:
* coords (list of coord names or :class:`iris.coords.Coord` instances):
One or more coordinates over which group aggregation is to be
performed.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied to each group.
Kwargs:
* kwargs:
Aggregator and aggregation function keyword arguments.
Returns:
:class:`iris.cube.Cube`.
.. note::
This operation does not yet have support for lazy evaluation.
For example:
>>> import iris
>>> import iris.analysis
>>> import iris.coord_categorisation as cat
>>> fname = iris.sample_data_path('ostia_monthly.nc')
>>> cube = iris.load_cube(fname, 'surface_temperature')
>>> cat.add_year(cube, 'time', name='year')
>>> new_cube = cube.aggregated_by('year', iris.analysis.MEAN)
>>> print(new_cube)
surface_temperature / (K) \
(time: 5; latitude: 18; longitude: 432)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_reference_time \
x - -
year \
x - -
Scalar coordinates:
forecast_period: 0 hours
Attributes:
Conventions: CF-1.5
STASH: m01s00i024
Cell methods:
mean: month, year
mean: year
"""
groupby_coords = []
dimension_to_groupby = None
# We can't handle weights
if isinstance(aggregator, iris.analysis.WeightedAggregator) and \
aggregator.uses_weighting(**kwargs):
raise ValueError('Invalid Aggregation, aggregated_by() cannot use'
' weights.')
coords = self._as_list_of_coords(coords)
for coord in sorted(coords, key=lambda coord: coord._as_defn()):
if coord.ndim > 1:
msg = 'Cannot aggregate_by coord %s as it is ' \
'multidimensional.' % coord.name()
raise iris.exceptions.CoordinateMultiDimError(msg)
dimension = self.coord_dims(coord)
if not dimension:
msg = 'Cannot group-by the coordinate "%s", as its ' \
'dimension does not describe any data.' % coord.name()
raise iris.exceptions.CoordinateCollapseError(msg)
if dimension_to_groupby is None:
dimension_to_groupby = dimension[0]
if dimension_to_groupby != dimension[0]:
msg = 'Cannot group-by coordinates over different dimensions.'
raise iris.exceptions.CoordinateCollapseError(msg)
groupby_coords.append(coord)
# Determine the other coordinates that share the same group-by
# coordinate dimension.
shared_coords = list(filter(
lambda coord_: coord_ not in groupby_coords,
self.coords(dimensions=dimension_to_groupby)))
# Create the aggregation group-by instance.
groupby = iris.analysis._Groupby(groupby_coords, shared_coords)
# Create the resulting aggregate-by cube and remove the original
# coordinates that are going to be groupedby.
key = [slice(None, None)] * self.ndim
# Generate unique index tuple key to maintain monotonicity.
key[dimension_to_groupby] = tuple(range(len(groupby)))
key = tuple(key)
aggregateby_cube = self[key]
for coord in groupby_coords + shared_coords:
aggregateby_cube.remove_coord(coord)
# Determine the group-by cube data shape.
data_shape = list(self.shape + aggregator.aggregate_shape(**kwargs))
data_shape[dimension_to_groupby] = len(groupby)
# Aggregate the group-by data.
cube_slice = [slice(None, None)] * len(data_shape)
for i, groupby_slice in enumerate(groupby.group()):
# Slice the cube with the group-by slice to create a group-by
# sub-cube.
cube_slice[dimension_to_groupby] = groupby_slice
groupby_sub_cube = self[tuple(cube_slice)]
# Perform the aggregation over the group-by sub-cube and
# repatriate the aggregated data into the aggregate-by cube data.
cube_slice[dimension_to_groupby] = i
result = aggregator.aggregate(groupby_sub_cube.data,
axis=dimension_to_groupby,
**kwargs)
# Determine aggregation result data type for the aggregate-by cube
# data on first pass.
if i == 0:
if ma.isMaskedArray(self.data):
aggregateby_data = ma.zeros(data_shape, dtype=result.dtype)
else:
aggregateby_data = np.zeros(data_shape, dtype=result.dtype)
aggregateby_data[tuple(cube_slice)] = result
# Add the aggregation meta data to the aggregate-by cube.
aggregator.update_metadata(aggregateby_cube,
groupby_coords,
aggregate=True, **kwargs)
# Replace the appropriate coordinates within the aggregate-by cube.
dim_coord, = self.coords(dimensions=dimension_to_groupby,
dim_coords=True) or [None]
for coord in groupby.coords:
if dim_coord is not None and \
dim_coord._as_defn() == coord._as_defn() and \
isinstance(coord, iris.coords.DimCoord):
aggregateby_cube.add_dim_coord(coord.copy(),
dimension_to_groupby)
else:
aggregateby_cube.add_aux_coord(coord.copy(),
dimension_to_groupby)
# Attach the aggregate-by data into the aggregate-by cube.
aggregateby_cube = aggregator.post_process(aggregateby_cube,
aggregateby_data,
coords, **kwargs)
return aggregateby_cube
def rolling_window(self, coord, aggregator, window, **kwargs):
"""
Perform rolling window aggregation on a cube given a coordinate, an
aggregation method and a window size.
Args:
* coord (string/:class:`iris.coords.Coord`):
The coordinate over which to perform the rolling window
aggregation.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied to the data.
* window (int):
Size of window to use.
Kwargs:
* kwargs:
Aggregator and aggregation function keyword arguments. The weights
argument to the aggregator, if any, should be a 1d array with the
same length as the chosen window.
Returns:
:class:`iris.cube.Cube`.
.. note::
This operation does not yet have support for lazy evaluation.
For example:
>>> import iris, iris.analysis
>>> fname = iris.sample_data_path('GloSea4', 'ensemble_010.pp')
>>> air_press = iris.load_cube(fname, 'surface_temperature')
>>> print(air_press)
surface_temperature / (K) \
(time: 6; latitude: 145; longitude: 192)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_period \
x - -
Scalar coordinates:
forecast_reference_time: 2011-07-23 00:00:00
realization: 10
Attributes:
STASH: m01s00i024
source: Data from Met Office Unified Model
um_version: 7.6
Cell methods:
mean: time (1 hour)
>>> print(air_press.rolling_window('time', iris.analysis.MEAN, 3))
surface_temperature / (K) \
(time: 4; latitude: 145; longitude: 192)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_period \
x - -
Scalar coordinates:
forecast_reference_time: 2011-07-23 00:00:00
realization: 10
Attributes:
STASH: m01s00i024
source: Data from Met Office Unified Model
um_version: 7.6
Cell methods:
mean: time (1 hour)
mean: time
Notice that the forecast_period dimension now represents the 4
possible windows of size 3 from the original cube.
"""
coord = self._as_list_of_coords(coord)[0]
if getattr(coord, 'circular', False):
raise iris.exceptions.NotYetImplementedError(
'Rolling window over a circular coordinate.')
if window < 2:
raise ValueError('Cannot perform rolling window '
'with a window size less than 2.')
if coord.ndim > 1:
raise iris.exceptions.CoordinateMultiDimError(coord)
dimension = self.coord_dims(coord)
if len(dimension) != 1:
raise iris.exceptions.CoordinateCollapseError(
'Cannot perform rolling window with coordinate "%s", '
'must map to one data dimension.' % coord.name())
dimension = dimension[0]
# Use indexing to get a result-cube of the correct shape.
# NB. This indexes the data array which is wasted work.
# As index-to-get-shape-then-fiddle is a common pattern, perhaps
# some sort of `cube.prepare()` method would be handy to allow
# re-shaping with given data, and returning a mapping of
# old-to-new-coords (to avoid having to use metadata identity)?
key = [slice(None, None)] * self.ndim
key[dimension] = slice(None, self.shape[dimension] - window + 1)
new_cube = self[tuple(key)]
# take a view of the original data using the rolling_window function
# this will add an extra dimension to the data at dimension + 1 which
# represents the rolled window (i.e. will have a length of window)
rolling_window_data = iris.util.rolling_window(self.data,
window=window,
axis=dimension)
# now update all of the coordinates to reflect the aggregation
for coord_ in self.coords(dimensions=dimension):
if coord_.has_bounds():
warnings.warn('The bounds of coordinate %r were ignored in '
'the rolling window operation.' % coord_.name())
if coord_.ndim != 1:
raise ValueError('Cannot calculate the rolling '
'window of %s as it is a multidimensional '
'coordinate.' % coord_.name())
new_bounds = iris.util.rolling_window(coord_.points, window)
if np.issubdtype(new_bounds.dtype, np.str_):
# Handle case where the AuxCoord contains string. The points
# are the serialized form of the points contributing to each
# window and the bounds are the first and last points in the
# window as with numeric coordinates.
new_points = np.apply_along_axis(lambda x: '|'.join(x), -1,
new_bounds)
new_bounds = new_bounds[:, (0, -1)]
else:
# Take the first and last element of the rolled window (i.e.
# the bounds) and the new points are the midpoints of these
# bounds.
new_bounds = new_bounds[:, (0, -1)]
new_points = np.mean(new_bounds, axis=-1)
# wipe the coords points and set the bounds
new_coord = new_cube.coord(coord_)
new_coord.points = new_points
new_coord.bounds = new_bounds
# update the metadata of the cube itself
aggregator.update_metadata(
new_cube, [coord],
action='with a rolling window of length %s over' % window,
**kwargs)
# and perform the data transformation, generating weights first if
# needed
if isinstance(aggregator, iris.analysis.WeightedAggregator) and \
aggregator.uses_weighting(**kwargs):
if 'weights' in kwargs:
weights = kwargs['weights']
if weights.ndim > 1 or weights.shape[0] != window:
raise ValueError('Weights for rolling window aggregation '
'must be a 1d array with the same length '
'as the window.')
kwargs = dict(kwargs)
kwargs['weights'] = iris.util.broadcast_to_shape(
weights, rolling_window_data.shape, (dimension + 1,))
data_result = aggregator.aggregate(rolling_window_data,
axis=dimension + 1,
**kwargs)
result = aggregator.post_process(new_cube, data_result, [coord],
**kwargs)
return result
def interpolate(self, sample_points, scheme, collapse_scalar=True):
"""
Interpolate from this :class:`~iris.cube.Cube` to the given
sample points using the given interpolation scheme.
Args:
* sample_points:
A sequence of (coordinate, points) pairs over which to
interpolate. The values for coordinates that correspond to
dates or times may optionally be supplied as datetime.datetime or
cftime.datetime instances.
* scheme:
The type of interpolation to use to interpolate from this
:class:`~iris.cube.Cube` to the given sample points. The
interpolation schemes currently available in Iris are:
* :class:`iris.analysis.Linear`, and
* :class:`iris.analysis.Nearest`.
Kwargs:
* collapse_scalar:
Whether to collapse the dimension of scalar sample points
in the resulting cube. Default is True.
Returns:
A cube interpolated at the given sample points.
If `collapse_scalar` is True then the dimensionality of the cube
will be the number of original cube dimensions minus
the number of scalar coordinates.
For example:
>>> import datetime
>>> import iris
>>> path = iris.sample_data_path('uk_hires.pp')
>>> cube = iris.load_cube(path, 'air_potential_temperature')
>>> print(cube.summary(shorten=True))
air_potential_temperature / (K) \
(time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
>>> print(cube.coord('time'))
DimCoord([2009-11-19 10:00:00, 2009-11-19 11:00:00, \
2009-11-19 12:00:00], standard_name='time', calendar='gregorian')
>>> print(cube.coord('time').points)
[349618. 349619. 349620.]
>>> samples = [('time', 349618.5)]
>>> result = cube.interpolate(samples, iris.analysis.Linear())
>>> print(result.summary(shorten=True))
air_potential_temperature / (K) \
(model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
>>> print(result.coord('time'))
DimCoord([2009-11-19 10:30:00], standard_name='time', \
calendar='gregorian')
>>> print(result.coord('time').points)
[349618.5]
>>> # For datetime-like coordinates, we can also use
>>> # datetime-like objects.
>>> samples = [('time', datetime.datetime(2009, 11, 19, 10, 30))]
>>> result2 = cube.interpolate(samples, iris.analysis.Linear())
>>> print(result2.summary(shorten=True))
air_potential_temperature / (K) \
(model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
>>> print(result2.coord('time'))
DimCoord([2009-11-19 10:30:00], standard_name='time', \
calendar='gregorian')
>>> print(result2.coord('time').points)
[349618.5]
>>> print(result == result2)
True
"""
coords, points = zip(*sample_points)
interp = scheme.interpolator(self, coords)
return interp(points, collapse_scalar=collapse_scalar)
def regrid(self, grid, scheme):
"""
Regrid this :class:`~iris.cube.Cube` on to the given target `grid`
using the given regridding `scheme`.
Args:
* grid:
A :class:`~iris.cube.Cube` that defines the target grid.
* scheme:
The type of regridding to use to regrid this cube onto the
target grid. The regridding schemes currently available
in Iris are:
* :class:`iris.analysis.Linear`,
* :class:`iris.analysis.Nearest`, and
* :class:`iris.analysis.AreaWeighted`.
Returns:
A cube defined with the horizontal dimensions of the target grid
and the other dimensions from this cube. The data values of
this cube will be converted to values on the new grid
according to the given regridding scheme.
.. note::
Both the source and target cubes must have a CoordSystem, otherwise
this function is not applicable.
"""
regridder = scheme.regridder(self, grid)
return regridder(self)
class ClassDict(collections.MutableMapping, object):
"""
A mapping that stores objects keyed on their superclasses and their names.
The mapping has a root class, all stored objects must be a subclass of the
root class. The superclasses used for an object include the class of the
object, but do not include the root class. Only one object is allowed for
any key.
"""
def __init__(self, superclass):
if not isinstance(superclass, type):
raise TypeError("The superclass must be a Python type or new "
"style class.")
self._superclass = superclass
self._basic_map = {}
self._retrieval_map = {}
def add(self, object_, replace=False):
'''Add an object to the dictionary.'''
if not isinstance(object_, self._superclass):
msg = "Only subclasses of {!r} are allowed as values.".format(
self._superclass.__name__)
raise TypeError(msg)
# Find all the superclasses of the given object, starting with the
# object's class.
superclasses = type.mro(type(object_))
if not replace:
# Ensure nothing else is already registered against those
# superclasses.
# NB. This implies the _basic_map will also be empty for this
# object.
for key_class in superclasses:
if key_class in self._retrieval_map:
msg = "Cannot add instance of '%s' because instance of " \
"'%s' already added." % (type(object_).__name__,
key_class.__name__)
raise ValueError(msg)
# Register the given object against those superclasses.
for key_class in superclasses:
self._retrieval_map[key_class] = object_
self._retrieval_map[key_class.__name__] = object_
self._basic_map[type(object_)] = object_
def __getitem__(self, class_):
try:
return self._retrieval_map[class_]
except KeyError:
raise KeyError('Coordinate system %r does not exist.' % class_)
def __setitem__(self, key, value):
raise NotImplementedError('You must call the add method instead.')
def __delitem__(self, class_):
cs = self[class_]
keys = [k for k, v in six.iteritems(self._retrieval_map) if v == cs]
for key in keys:
del self._retrieval_map[key]
del self._basic_map[type(cs)]
return cs
def __len__(self):
return len(self._basic_map)
def __iter__(self):
for item in self._basic_map:
yield item
def keys(self):
'''Return the keys of the dictionary mapping.'''
return self._basic_map.keys()
def sorted_axes(axes):
"""
Returns the axis names sorted alphabetically, with the exception that
't', 'z', 'y', and, 'x' are sorted to the end.
"""
return sorted(axes, key=lambda name: ({'x': 4,
'y': 3,
'z': 2,
't': 1}.get(name, 0), name))
# See Cube.slice() for the definition/context.
class _SliceIterator(collections.Iterator):
def __init__(self, cube, dims_index, requested_dims, ordered):
self._cube = cube
# Let Numpy do some work in providing all of the permutations of our
# data shape. This functionality is something like:
# ndindex(2, 1, 3) -> [(0, 0, 0), (0, 0, 1), (0, 0, 2),
# (1, 0, 0), (1, 0, 1), (1, 0, 2)]
self._ndindex = np.ndindex(*dims_index)
self._requested_dims = requested_dims
# indexing relating to sliced cube
self._mod_requested_dims = np.argsort(requested_dims)
self._ordered = ordered
def __next__(self):
# NB. When self._ndindex runs out it will raise StopIteration for us.
index_tuple = next(self._ndindex)
# Turn the given tuple into a list so that we can do something with it
index_list = list(index_tuple)
# For each of the spanning dimensions requested, replace the 0 with a
# spanning slice
for d in self._requested_dims:
index_list[d] = slice(None, None)
# Request the slice
cube = self._cube[tuple(index_list)]
if self._ordered:
if any(self._mod_requested_dims != list(range(len(cube.shape)))):
n = len(self._mod_requested_dims)
sliced_dims = np.empty(n, dtype=int)
sliced_dims[self._mod_requested_dims] = np.arange(n)
cube.transpose(sliced_dims)
return cube
next = __next__
|
dkillick/iris
|
lib/iris/cube.py
|
Python
|
lgpl-3.0
| 154,273
|
[
"NetCDF"
] |
ba10d3fcff747ae696bee7b0fafb661182426f636e97d97e8f663537cc66cb38
|
from setuptools import setup
from pip.req import parse_requirements
from pip.download import PipSession
links = []
requires = []
requirements = parse_requirements('requirements.txt', session=PipSession())
for item in requirements:
# we want to handle package names and also repo urls
if getattr(item, 'url', None): # older pip has url
links.append(str(item.url))
if getattr(item, 'link', None): # newer pip has link
links.append(str(item.link))
if item.req:
requires.append(str(item.req))
setup(name='seleniumai',
description='An openAI environment that uses Selenium to create web automation agents',
version='0.0.6',
url='https://github.com/bewestphal/SeleniumAI',
author='Brian Westphal',
author_email='coding@brianwestphal.com',
license='MIT',
download_url='https://codeload.github.com/bewestphal/SeleniumAI/0.0.6.tar.gz',
keywords=[
'selenium',
'artificial intelligence',
'openai',
'environment'
],
packages = ['package'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
dependency_links=links,
install_requires=requires)
|
bewestphal/SeleniumAI
|
setup.py
|
Python
|
mit
| 1,357
|
[
"Brian"
] |
414416bf6807e81623134565bfc00c614a5754e0bf438284ec3e73f3ce80e21b
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Unit tests for the engine.activate() call within the
`iris.fileformats.netcdf._load_cube` function.
Test rules activation relating to hybrid vertical coordinates.
"""
import iris.tests as tests # isort: skip
import iris.fileformats._nc_load_rules.helpers as hh
from iris.tests.unit.fileformats.nc_load_rules.actions import (
Mixin__nc_load_actions,
)
class Test__formulae_tests(Mixin__nc_load_actions, tests.IrisTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def _make_testcase_cdl(
self, formula_root_name=None, term_names=None, extra_formula_type=None
):
"""Construct a testcase CDL for data with hybrid vertical coords."""
if formula_root_name is None:
formula_root_name = "atmosphere_hybrid_height_coordinate"
if term_names is None:
term_names = hh.CF_COORD_VERTICAL.get(formula_root_name)
if term_names is None:
# unsupported type : just make something up
term_names = ["term1"]
# Arrange to create additional term variables for an 'extra' hybrid
# formula, if requested.
if extra_formula_type is None:
term_names_extra = []
phenom_coord_names = ["vert"] # always include the root variable
else:
phenom_coord_names = ["vert", "vert_2"] # two formula coords
term_names_extra = hh.CF_COORD_VERTICAL.get(extra_formula_type)
# Build strings to define term variables.
formula_term_strings = []
extra_formula_term_strings = []
terms_string = ""
for term_name in term_names + term_names_extra:
term_varname = "v_" + term_name
# Include in the phenom coordinates list.
phenom_coord_names.append(term_varname)
term_string = f"{term_name}: {term_varname}"
if term_name in term_names:
# Include in the 'main' terms list.
formula_term_strings.append(term_string)
else:
# Include in the 'extra' terms list.
extra_formula_term_strings.append(term_string)
terms_string += f"""
double {term_varname}(h) ;
{term_varname}:long_name = "{term_name}_long_name" ;
{term_varname}:units = "m" ;
"""
# Construct the reference strings.
phenom_coords_string = " ".join(phenom_coord_names)
formula_terms_string = " ".join(formula_term_strings)
extra_formula_terms_string = " ".join(extra_formula_term_strings)
# Construct the 'extra' hybrid coord if requested.
if extra_formula_type is None:
extra_formula_string = ""
else:
# Create the lines to add an 'extra' formula.
# For now, put this on the same dim : makes no difference.
extra_formula_string = f"""
double vert_2(h) ;
vert_2:standard_name = "{extra_formula_type}" ;
vert_2:units = "m" ;
vert_2:formula_terms = "{extra_formula_terms_string}" ;
"""
# Create the main result string.
cdl_str = f"""
netcdf test {{
dimensions:
h = 2 ;
variables:
double phenom(h) ;
phenom:standard_name = "air_temperature" ;
phenom:units = "K" ;
phenom:coordinates = "{phenom_coords_string}" ;
double vert(h) ;
vert:standard_name = "{formula_root_name}" ;
vert:long_name = "hybrid_vertical" ;
vert:units = "m" ;
vert:formula_terms = "{formula_terms_string}" ;
{terms_string}
{extra_formula_string}
}}
"""
return cdl_str
def check_result(self, cube, factory_type="_auto", formula_terms="_auto"):
"""Check the result of a cube load with a hybrid vertical coord."""
if factory_type == "_auto":
# replace with our 'default', which is hybrid-height.
# N.B. 'None' is different: it means expect *no* factory.
factory_type = "atmosphere_hybrid_height_coordinate"
self.assertEqual(cube._formula_type_name, factory_type)
if formula_terms == "_auto":
# Set default terms-expected, according to the expected factory
# type.
if factory_type is None:
# If no factory, expect no identified terms.
formula_terms = []
else:
# Expect the correct ones defined for the factory type.
formula_terms = hh.CF_COORD_VERTICAL[factory_type]
# Compare the formula_terms list with the 'expected' ones.
# N.B. first make the 'expected' list lower case, as the lists in
# hh.CF_COORD_VERTICAL include uppercase, but rules outputs don't.
formula_terms = [term.lower() for term in formula_terms]
# N.B. the terms dictionary can be missing, if there were none
actual_terms = cube._formula_terms_byname or {}
self.assertEqual(sorted(formula_terms), sorted(actual_terms.keys()))
# Check that there is an aux-coord of the expected name for each term
for var_name in actual_terms.values():
coords = cube.coords(var_name=var_name, dim_coords=False)
self.assertEqual(len(coords), 1)
#
# Actual testcase routines
#
def test_basic_hybridheight(self):
# Rules Triggered:
# 001 : fc_default
# 002 : fc_build_auxiliary_coordinate
# 003 : fc_build_auxiliary_coordinate
# 004 : fc_build_auxiliary_coordinate
# 005 : fc_build_auxiliary_coordinate
# 008 : fc_formula_type_atmosphere_hybrid_height_coordinate
# 009 : fc_formula_term(a)
# 010 : fc_formula_term(b)
# 011 : fc_formula_term(orog)
result = self.run_testcase()
self.check_result(result)
def test_missing_term(self):
# Check behaviour when a term is missing.
# For the test, omit "orography", which is common in practice.
#
# Rules Triggered:
# 001 : fc_default
# 002 : fc_build_auxiliary_coordinate
# 003 : fc_build_auxiliary_coordinate
# 004 : fc_build_auxiliary_coordinate
# 007 : fc_formula_type_atmosphere_hybrid_height_coordinate
# 008 : fc_formula_term(a)
# 009 : fc_formula_term(b)
result = self.run_testcase(
term_names=["a", "b"] # missing the 'orog' term
)
self.check_result(result, formula_terms=["a", "b"])
def test_no_terms(self):
# Check behaviour when *all* terms are missing.
# N.B. for any _actual_ type, this is probably invalid and would fail?
#
# Rules Triggered:
# 001 : fc_default
# 002 : fc_build_auxiliary_coordinate
result = self.run_testcase(
formula_root_name="atmosphere_hybrid_height_coordinate",
term_names=[],
)
# This does *not* trigger
# 'fc_formula_type_atmosphere_hybrid_height_coordinate'
# This is because, within the 'assert_case_specific_facts' routine,
# formula_roots are only recognised by scanning the identified
# formula_terms.
self.check_result(result, factory_type=None)
def test_unrecognised_verticaltype(self):
# Set the root variable name to something NOT a recognised hybrid type.
#
# Rules Triggered:
# 001 : fc_default
# 002 : fc_build_auxiliary_coordinate
# 003 : fc_build_auxiliary_coordinate
# 004 : fc_build_auxiliary_coordinate
# 007 : fc_formula_type(FAILED - unrecognised formula type = 'unknown')
# 008 : fc_formula_term(a)
# 009 : fc_formula_term(b)
result = self.run_testcase(
formula_root_name="unknown",
term_names=["a", "b"],
warning="Ignored formula of unrecognised type: 'unknown'.",
)
# Check that it picks up the terms, but *not* the factory root coord,
# which is simply discarded.
self.check_result(result, factory_type=None, formula_terms=["a", "b"])
def test_two_formulae(self):
# Construct an example with TWO hybrid coords.
# This is not errored, but we don't correctly support it.
#
# NOTE: the original Pyke implementation does not detect this problem
# By design, the new mechanism does + will raise a warning.
warning = (
"Omitting factories for some hybrid coordinates.*"
"multiple hybrid coordinates.* not supported"
)
extra_type = "ocean_sigma_coordinate"
result = self.run_testcase(
extra_formula_type=extra_type, warning=warning
)
# NOTE: FOR NOW, check expected behaviour : only one factory will be
# built, but there are coordinates (terms) for both types.
# TODO: this is a bug and needs fixing : translation should handle
# multiple hybrid coordinates in a sensible way.
self.check_result(
result,
factory_type=extra_type,
formula_terms=["a", "b", "depth", "eta", "orog", "sigma"],
)
# Add in tests methods to exercise each (supported) vertical coordinate type
# individually.
# NOTE: hh.CF_COORD_VERTICAL lists all the valid types, but we don't yet
# support all of them.
_SUPPORTED_FORMULA_TYPES = (
# NOTE: omit "atmosphere_hybrid_height_coordinate" : our basic testcase
"atmosphere_sigma_coordinate",
"atmosphere_hybrid_sigma_pressure_coordinate",
"ocean_sigma_z_coordinate",
"ocean_sigma_coordinate",
"ocean_s_coordinate",
"ocean_s_coordinate_g1",
"ocean_s_coordinate_g2",
)
for hybrid_type in _SUPPORTED_FORMULA_TYPES:
def construct_inner_func(hybrid_type):
term_names = hh.CF_COORD_VERTICAL[hybrid_type]
def inner(self):
result = self.run_testcase(
formula_root_name=hybrid_type, term_names=term_names
)
self.check_result(
result, factory_type=hybrid_type, formula_terms=term_names
)
return inner
# Note: use an intermediate function to generate each test method, simply to
# generate a new local variable for 'hybrid_type' on each iteration.
# Otherwise all the test methods will refer to the *same* 'hybrid_type'
# variable, i.e. the loop variable, which does not work !
method_name = f"test_{hybrid_type}_coord"
setattr(
Test__formulae_tests, method_name, construct_inner_func(hybrid_type)
)
if __name__ == "__main__":
tests.main()
|
rcomer/iris
|
lib/iris/tests/unit/fileformats/nc_load_rules/actions/test__hybrid_formulae.py
|
Python
|
lgpl-3.0
| 10,925
|
[
"NetCDF"
] |
7c18ce74fb2abcdaa21b086b95a3c04c7b119fd279de3a965aa4d5d73cd56832
|
import abc
import numpy as np
import six
from ..fit.io.file import FileIOMixin
__all__ = ['ParameterConstraintException', 'ParameterConstraint', 'GaussianSimpleParameterConstraint',
'GaussianMatrixParameterConstraint']
class ParameterConstraintException(Exception):
pass
@six.add_metaclass(abc.ABCMeta)
class ParameterConstraint(FileIOMixin, object):
"""
Abstract base class for parameter constraints.
Subclasses must implement the ``cost`` method.
"""
def __init__(self):
pass
@property
@abc.abstractmethod
def extra_ndf(self):
"""
:return: the additional number of degrees of freedom introduced by this constraint.
"""
def _get_base_class(self):
return ParameterConstraint
def _get_object_type_name(self):
return 'parameter_constraint'
def cost(self, parameter_values):
"""
Calculates additional cost depending on the fit parameter values.
:param parameter_values: The current parameter values of the fit
:type parameter_values: iterable of float
:return: The additional cost imposed by the given parameter values
:rtype: float
"""
pass
class GaussianSimpleParameterConstraint(ParameterConstraint):
def __init__(self, index, value, uncertainty, relative=False):
"""
Simple class for applying a gaussian constraint to a single parameter of a fit.
:param index: The index of the parameter to be constrained
:type index: int
:param value: The value to which the parameter should be constrained
:type value: float
:param uncertainty: The uncertainty with which the parameter should be constrained to the given value
:type uncertainty: float
:param relative: Whether the given uncertainty is relative to the given value
:type relative: bool
"""
self._index = index
self._value = value
if relative:
self._uncertainty_abs = None
self._uncertainty_rel = uncertainty
else:
self._uncertainty_abs = uncertainty
self._uncertainty_rel = None
self._relative = relative
super(GaussianSimpleParameterConstraint, self).__init__()
@property
def index(self):
"""the index of the constrained parameter"""
return self._index
@property
def value(self):
"""the value to which the parameter is being constrained"""
return self._value
@property
def uncertainty(self):
"""the absolute uncertainty with which the parameter is being constrained"""
if self._uncertainty_abs is None:
self._uncertainty_abs = self._uncertainty_rel * self.value
return self._uncertainty_abs
@property
def uncertainty_rel(self):
"""the uncertainty relative to ``value`` with which the parameter is being constrained"""
if self._uncertainty_rel is None:
self._uncertainty_rel = self._uncertainty_abs / self.value
return self._uncertainty_rel
@property
def relative(self):
"""whether the constraint was initialized with a relative uncertainty"""
return self._relative
@property
def extra_ndf(self):
return 1
def cost(self, parameter_values):
"""
Calculates additional cost depending on the fit parameter values.
More specifically, the constraint first picks the value from ``parameter_values`` at ``self.index``.
The constraint then calculates the residual by subtracting ``self.value``.
The final cost is calculated by dividing the residual by ``self.uncertainty`` and squaring the result.
:param parameter_values: The current parameter values of the fit
:type parameter_values: iterable of float
:return: The additional cost imposed by the given parameter values
:rtype: float
"""
return ((parameter_values[self.index] - self.value) / self.uncertainty) ** 2
class GaussianMatrixParameterConstraint(ParameterConstraint):
def __init__(self, indices, values, matrix, matrix_type='cov', uncertainties=None, relative=False):
"""
Advanced class for applying correlated constraints to several parameters of a fit.
The order of ``indices``, ``values``, ``matrix``, and ``uncertainties`` must be aligned.
In other words the first index must belong to the first value, the first row/column in the matrix, etc.
Let N be the number of parameters to be constrained.
:param indices: The indices of the parameters to be constrained
:type indices: iterable of int, shape (N,)
:param values: The values to which the parameters should be constrained
:type values: iterable of float, shape (N,)
:param matrix: The matrix that defines the correlation between the parameters. By default interpreted as a
covariance matrix. Can also be interpreted as a correlation matrix by setting ``matrix_type``
:type matrix: iterable of float, shape (N, N)
:param matrix_type: Whether the matrix should be interpreted as a covariance matrix or as a correlation matrix
:type matrix_type: str, either 'cov' or 'cor'
:param uncertainties: The uncertainties to be used in conjunction with a correlation matrix
:type uncertainties: ``None`` or iterable of float, shape (N,)
:param relative: Whether the covariance matrix/the uncertainties should be interpreted as relative to ``values``
:type relative: bool
"""
self._indices = np.array(indices)
self._values = np.array(values)
_matrix_array = np.array(matrix)
if not np.array_equal(_matrix_array, _matrix_array.T):
raise ValueError('The matrix for parameter constraints must be symmetric!')
if len(self._values.shape) != 1 or self._values.shape * 2 != _matrix_array.shape:
raise ValueError(
'Expected values and cov_mat to be of shapes (N, ), (N, N) but received shapes %s, %s instead!'
% (self._values.shape, _matrix_array.shape))
if matrix_type == 'cov':
pass
elif matrix_type == 'cor':
if np.any(np.diag(_matrix_array) != 1.0):
raise ValueError('The correlation matrix has diagonal elements that aren\'t equal to 1!')
if np.any(_matrix_array> 1.0):
raise ValueError('The correlation matrix has elements greater than 1!')
if np.any(_matrix_array < -1.0):
raise ValueError('The correlation matrix has elements smaller than -1!')
else:
raise ValueError('Unknown matrix_type: %s, must be either cov or cor!' % matrix_type)
if matrix_type == 'cov':
if relative:
self._cov_mat_abs = None
self._cov_mat_rel = _matrix_array
self._cor_mat = None
else:
self._cov_mat_abs = _matrix_array
self._cov_mat_rel = None
self._cor_mat = None
if uncertainties is not None:
raise ValueError('Uncertainties can only be specified if matrix_type is cov!')
self._uncertainties_abs = None
self._uncertainties_rel = None
else:
self._cov_mat_abs = None
self._cov_mat_rel = None
self._cor_mat = _matrix_array
if uncertainties is None:
raise ValueError('If matrix_type is cor uncertainties must be specified!')
if relative:
self._uncertainties_abs = None
self._uncertainties_rel = uncertainties
else:
self._uncertainties_abs = uncertainties
self._uncertainties_rel = None
self._matrix_type = matrix_type
self._relative = relative
self._cov_mat_inverse = None
super(GaussianMatrixParameterConstraint, self).__init__()
@property
def indices(self):
"""the indices of the parameters to be constrained"""
return self._indices
@property
def values(self):
"""the values to which the parameters are being constrained"""
return self._values
@property
def cov_mat(self):
"""the absolute covariance matrix between the parameter uncertainties"""
if self._cov_mat_abs is None:
if self.matrix_type == 'cov':
self._cov_mat_abs = self._cov_mat_rel * np.outer(self.values, self.values)
else:
self._cov_mat_abs = self._cor_mat * np.outer(self.uncertainties, self.uncertainties)
return self._cov_mat_abs
@property
def cov_mat_rel(self):
"""the covariance matrix between the parameter uncertainties relative to ``self.values``"""
if self._cov_mat_rel is None:
if self.matrix_type == 'cov':
self._cov_mat_rel = self._cov_mat_abs / np.outer(self.values, self.values)
else:
self._cov_mat_rel = self._cor_mat * np.outer(self.uncertainties_rel, self.uncertainties_rel)
return self._cov_mat_rel
@property
def cor_mat(self):
"""the correlation matrix between the parameter uncertainties"""
if self._cor_mat is None:
# if the originally specified cov mat was relative, calculate the cor mat based on that
if self._relative:
self._cor_mat = self.cov_mat_rel / np.outer(self.uncertainties_rel, self.uncertainties_rel)
else:
self._cor_mat = self.cov_mat / np.outer(self.uncertainties, self.uncertainties)
return self._cor_mat
@property
def uncertainties(self):
"""the uncorrelated, absolute uncertainties for the parameters to be constrained to"""
if self._uncertainties_abs is None:
if self.matrix_type == 'cov':
self._uncertainties_abs = np.sqrt(np.diag(self.cov_mat))
else:
self._uncertainties_abs = self.uncertainties_rel * self.values
return self._uncertainties_abs
@property
def uncertainties_rel(self):
"""the uncorrelated uncertainties for the parameters to be constrained to relative to ``self.values``"""
if self._uncertainties_rel is None:
if self.matrix_type == 'cov':
self._uncertainties_rel = np.sqrt(np.diag(self.cov_mat_rel))
else:
self._uncertainties_rel = self.uncertainties / self.values
return self._uncertainties_rel
@property
def matrix_type(self):
"""the type of matrix with which the constraint was initialized"""
return self._matrix_type
@property
def relative(self):
"""whether the constraint was initialized with a relative covariance matrix/with relative uncertainties"""
return self._relative
@property
def cov_mat_inverse(self):
"""the inverse of the covariance matrix between the parameter uncertainties"""
if self._cov_mat_inverse is None:
self._cov_mat_inverse = np.linalg.inv(self.cov_mat)
return self._cov_mat_inverse
@property
def extra_ndf(self):
return len(self.indices)
def cost(self, parameter_values):
"""
Calculates additional cost depending on the fit parameter values.
More specifically, the constraint first picks values from ``parameter_values`` according to ``self.indices``.
The constraint then calculates the residuals by subtracting ``self.values``.
The final cost is calculated by applying the residuals to both sides of ``self.cov_mat_inverse``
via dot product.
:param parameter_values: The current parameter values of the fit
:type parameter_values: iterable of float
:return: The additional cost imposed by the given parameter values
:rtype: float
"""
_selected_par_values = np.asarray(parameter_values)[self.indices]
_res = _selected_par_values - self.values
return _res.dot(self.cov_mat_inverse).dot(_res)
|
dsavoiu/kafe2
|
kafe2/core/constraint.py
|
Python
|
gpl-3.0
| 12,225
|
[
"Gaussian"
] |
ad98d746ad47acb5f9359ecc4cb4aa834feba129524de5e162d2d16b1c5465fb
|
# -*- coding: utf-8 -*-
"""Testing functions."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
from contextlib import contextmanager
from distutils.version import LooseVersion
from functools import partial, wraps
import os
import inspect
from io import StringIO
from shutil import rmtree
import sys
import tempfile
import traceback
from unittest import SkipTest
import warnings
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from ._logging import warn, ClosingStringIO
from .numerics import object_diff
def _explain_exception(start=-1, stop=None, prefix='> '):
"""Explain an exception."""
# start=-1 means "only the most recent caller"
etype, value, tb = sys.exc_info()
string = traceback.format_list(traceback.extract_tb(tb)[start:stop])
string = (''.join(string).split('\n') +
traceback.format_exception_only(etype, value))
string = ':\n' + prefix + ('\n' + prefix).join(string)
return string
class _TempDir(str):
"""Create and auto-destroy temp dir.
This is designed to be used with testing modules. Instances should be
defined inside test functions. Instances defined at module level can not
guarantee proper destruction of the temporary directory.
When used at module level, the current use of the __del__() method for
cleanup can fail because the rmtree function may be cleaned up before this
object (an alternative could be using the atexit module instead).
"""
def __new__(self): # noqa: D105
new = str.__new__(self, tempfile.mkdtemp(prefix='tmp_mne_tempdir_'))
return new
def __init__(self): # noqa: D102
self._path = self.__str__()
def __del__(self): # noqa: D105
rmtree(self._path, ignore_errors=True)
def requires_nibabel():
"""Wrap to requires_module with a function call (fewer lines to change)."""
return partial(requires_module, name='nibabel')
def requires_dipy():
"""Check for dipy."""
import pytest
# for some strange reason on CIs we can get:
#
# can get weird ImportError: dlopen: cannot load any more object
# with static TLS
#
# so let's import everything in the decorator.
try:
from dipy.align import imaffine, imwarp, metrics, transforms # noqa, analysis:ignore
from dipy.align.reslice import reslice # noqa, analysis:ignore
from dipy.align.imaffine import AffineMap # noqa, analysis:ignore
from dipy.align.imwarp import DiffeomorphicMap # noqa, analysis:ignore
except Exception:
have = False
else:
have = True
return pytest.mark.skipif(not have, reason='Requires dipy >= 0.10.1')
def requires_version(library, min_version='0.0'):
"""Check for a library version."""
import pytest
return pytest.mark.skipif(not check_version(library, min_version),
reason=('Requires %s version >= %s'
% (library, min_version)))
def requires_module(function, name, call=None):
"""Skip a test if package is not available (decorator)."""
import pytest
call = ('import %s' % name) if call is None else call
reason = 'Test %s skipped, requires %s.' % (function.__name__, name)
try:
exec(call) in globals(), locals()
except Exception as exc:
if len(str(exc)) > 0 and str(exc) != 'No module named %s' % name:
reason += ' Got exception (%s)' % (exc,)
skip = True
else:
skip = False
return pytest.mark.skipif(skip, reason=reason)(function)
_pandas_call = """
import pandas
version = LooseVersion(pandas.__version__)
if version < '0.8.0':
raise ImportError
"""
_mayavi_call = """
with warnings.catch_warnings(record=True): # traits
from mayavi import mlab
"""
_mne_call = """
if not has_mne_c():
raise ImportError
"""
_fs_call = """
if not has_freesurfer():
raise ImportError
"""
_n2ft_call = """
if 'NEUROMAG2FT_ROOT' not in os.environ:
raise ImportError
"""
requires_pandas = partial(requires_module, name='pandas', call=_pandas_call)
requires_pylsl = partial(requires_module, name='pylsl')
requires_sklearn = partial(requires_module, name='sklearn')
requires_mayavi = partial(requires_module, name='mayavi', call=_mayavi_call)
requires_mne = partial(requires_module, name='MNE-C', call=_mne_call)
def requires_freesurfer(arg):
"""Require Freesurfer."""
if isinstance(arg, str):
# Calling as @requires_freesurfer('progname'): return decorator
# after checking for progname existence
call = """
from . import run_subprocess
run_subprocess([%r, '--version'])
""" % (arg,)
return partial(
requires_module, name='Freesurfer (%s)' % (arg,), call=call)
else:
# Calling directly as @requires_freesurfer: return decorated function
# and just check env var existence
return requires_module(arg, name='Freesurfer', call=_fs_call)
requires_neuromag2ft = partial(requires_module, name='neuromag2ft',
call=_n2ft_call)
requires_vtk = partial(requires_module, name='vtk')
requires_pysurfer = partial(requires_module, name='PySurfer',
call="""import warnings
with warnings.catch_warnings(record=True):
from surfer import Brain""")
requires_good_network = partial(
requires_module, name='good network connection',
call='if int(os.environ.get("MNE_SKIP_NETWORK_TESTS", 0)):\n'
' raise ImportError')
requires_nitime = partial(requires_module, name='nitime')
requires_h5py = partial(requires_module, name='h5py')
def requires_numpydoc(func):
"""Decorate tests that need numpydoc."""
return requires_version('numpydoc', '1.0')(func) # validate needs 1.0
def check_version(library, min_version):
r"""Check minimum library version required.
Parameters
----------
library : str
The library name to import. Must have a ``__version__`` property.
min_version : str
The minimum version string. Anything that matches
``'(\d+ | [a-z]+ | \.)'``. Can also be empty to skip version
check (just check for library presence).
Returns
-------
ok : bool
True if the library exists with at least the specified version.
"""
ok = True
try:
library = __import__(library)
except ImportError:
ok = False
else:
if min_version:
this_version = LooseVersion(
getattr(library, '__version__', '0.0').lstrip('v'))
if this_version < min_version:
ok = False
return ok
def _check_mayavi_version(min_version='4.3.0'):
"""Check mayavi version."""
if not check_version('mayavi', min_version):
raise RuntimeError("Need mayavi >= %s" % min_version)
def _import_mlab():
"""Quietly import mlab."""
with warnings.catch_warnings(record=True):
from mayavi import mlab
return mlab
@contextmanager
def traits_test_context():
"""Context to raise errors in trait handlers."""
try:
from traits.api import push_exception_handler
except Exception:
yield
else:
push_exception_handler(reraise_exceptions=True)
try:
yield
finally:
push_exception_handler(reraise_exceptions=False)
def traits_test(test_func):
"""Raise errors in trait handlers (decorator)."""
@wraps(test_func)
def dec(*args, **kwargs):
with traits_test_context():
return test_func(*args, **kwargs)
return dec
def run_command_if_main():
"""Run a given command if it's __main__."""
local_vars = inspect.currentframe().f_back.f_locals
if local_vars.get('__name__', '') == '__main__':
local_vars['run']()
class ArgvSetter(object):
"""Temporarily set sys.argv."""
def __init__(self, args=(), disable_stdout=True,
disable_stderr=True): # noqa: D102
self.argv = list(('python',) + args)
self.stdout = ClosingStringIO() if disable_stdout else sys.stdout
self.stderr = ClosingStringIO() if disable_stderr else sys.stderr
def __enter__(self): # noqa: D105
self.orig_argv = sys.argv
sys.argv = self.argv
self.orig_stdout = sys.stdout
sys.stdout = self.stdout
self.orig_stderr = sys.stderr
sys.stderr = self.stderr
return self
def __exit__(self, *args): # noqa: D105
sys.argv = self.orig_argv
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
class SilenceStdout(object):
"""Silence stdout."""
def __init__(self, close=True):
self.close = close
def __enter__(self): # noqa: D105
self.stdout = sys.stdout
sys.stdout = StringIO()
return sys.stdout
def __exit__(self, *args): # noqa: D105
if self.close:
sys.stdout.close()
sys.stdout = self.stdout
def has_nibabel():
"""Determine if nibabel is installed.
Returns
-------
has : bool
True if the user has nibabel.
"""
try:
import nibabel # noqa
except ImportError:
return False
else:
return True
def has_mne_c():
"""Check for MNE-C."""
return 'MNE_ROOT' in os.environ
def has_freesurfer():
"""Check for Freesurfer."""
return 'FREESURFER_HOME' in os.environ
def buggy_mkl_svd(function):
"""Decorate tests that make calls to SVD and intermittently fail."""
@wraps(function)
def dec(*args, **kwargs):
try:
return function(*args, **kwargs)
except np.linalg.LinAlgError as exp:
if 'SVD did not converge' in str(exp):
msg = 'Intel MKL SVD convergence error detected, skipping test'
warn(msg)
raise SkipTest(msg)
raise
return dec
def assert_and_remove_boundary_annot(annotations, n=1):
"""Assert that there are boundary annotations and remove them."""
from ..io.base import BaseRaw
if isinstance(annotations, BaseRaw): # allow either input
annotations = annotations.annotations
for key in ('EDGE', 'BAD'):
idx = np.where(annotations.description == '%s boundary' % key)[0]
assert len(idx) == n
annotations.delete(idx)
def assert_object_equal(a, b):
"""Assert two objects are equal."""
d = object_diff(a, b)
assert d == '', d
def _raw_annot(meas_date, orig_time):
from .. import Annotations, create_info
from ..annotations import _handle_meas_date
from ..io import RawArray
info = create_info(ch_names=10, sfreq=10.)
raw = RawArray(data=np.empty((10, 10)), info=info, first_samp=10)
if meas_date is not None:
meas_date = _handle_meas_date(meas_date)
with raw.info._unlock(check_after=True):
raw.info['meas_date'] = meas_date
annot = Annotations([.5], [.2], ['dummy'], orig_time)
raw.set_annotations(annotations=annot)
return raw
def _get_data(x, ch_idx):
"""Get the (n_ch, n_times) data array."""
from ..evoked import Evoked
from ..io import BaseRaw
if isinstance(x, BaseRaw):
return x[ch_idx][0]
elif isinstance(x, Evoked):
return x.data[ch_idx]
def _check_snr(actual, desired, picks, min_tol, med_tol, msg, kind='MEG'):
"""Check the SNR of a set of channels."""
actual_data = _get_data(actual, picks)
desired_data = _get_data(desired, picks)
bench_rms = np.sqrt(np.mean(desired_data * desired_data, axis=1))
error = actual_data - desired_data
error_rms = np.sqrt(np.mean(error * error, axis=1))
np.clip(error_rms, 1e-60, np.inf, out=error_rms) # avoid division by zero
snrs = bench_rms / error_rms
# min tol
snr = snrs.min()
bad_count = (snrs < min_tol).sum()
msg = ' (%s)' % msg if msg != '' else msg
assert bad_count == 0, ('SNR (worst %0.2f) < %0.2f for %s/%s '
'channels%s' % (snr, min_tol, bad_count,
len(picks), msg))
# median tol
snr = np.median(snrs)
assert snr >= med_tol, ('%s SNR median %0.2f < %0.2f%s'
% (kind, snr, med_tol, msg))
def assert_meg_snr(actual, desired, min_tol, med_tol=500., chpi_med_tol=500.,
msg=None):
"""Assert channel SNR of a certain level.
Mostly useful for operations like Maxwell filtering that modify
MEG channels while leaving EEG and others intact.
"""
from ..io.pick import pick_types
picks = pick_types(desired.info, meg=True, exclude=[])
picks_desired = pick_types(desired.info, meg=True, exclude=[])
assert_array_equal(picks, picks_desired, err_msg='MEG pick mismatch')
chpis = pick_types(actual.info, meg=False, chpi=True, exclude=[])
chpis_desired = pick_types(desired.info, meg=False, chpi=True, exclude=[])
if chpi_med_tol is not None:
assert_array_equal(chpis, chpis_desired, err_msg='cHPI pick mismatch')
others = np.setdiff1d(np.arange(len(actual.ch_names)),
np.concatenate([picks, chpis]))
others_desired = np.setdiff1d(np.arange(len(desired.ch_names)),
np.concatenate([picks_desired,
chpis_desired]))
assert_array_equal(others, others_desired, err_msg='Other pick mismatch')
if len(others) > 0: # if non-MEG channels present
assert_allclose(_get_data(actual, others),
_get_data(desired, others), atol=1e-11, rtol=1e-5,
err_msg='non-MEG channel mismatch')
_check_snr(actual, desired, picks, min_tol, med_tol, msg, kind='MEG')
if chpi_med_tol is not None and len(chpis) > 0:
_check_snr(actual, desired, chpis, 0., chpi_med_tol, msg, kind='cHPI')
def assert_snr(actual, desired, tol):
"""Assert actual and desired arrays are within some SNR tolerance."""
from scipy import linalg
with np.errstate(divide='ignore'): # allow infinite
snr = (linalg.norm(desired, ord='fro') /
linalg.norm(desired - actual, ord='fro'))
assert snr >= tol, '%f < %f' % (snr, tol)
def assert_stcs_equal(stc1, stc2):
"""Check that two STC are equal."""
assert_allclose(stc1.times, stc2.times)
assert_allclose(stc1.data, stc2.data)
assert_array_equal(stc1.vertices[0], stc2.vertices[0])
assert_array_equal(stc1.vertices[1], stc2.vertices[1])
assert_allclose(stc1.tmin, stc2.tmin)
assert_allclose(stc1.tstep, stc2.tstep)
def _dig_sort_key(dig):
"""Sort dig keys."""
return (dig['kind'], dig['ident'])
def assert_dig_allclose(info_py, info_bin, limit=None):
"""Assert dig allclose."""
from ..bem import fit_sphere_to_headshape
from ..io.constants import FIFF
from ..io.meas_info import Info
from ..channels.montage import DigMontage
# test dig positions
dig_py, dig_bin = info_py, info_bin
if isinstance(dig_py, Info):
assert isinstance(dig_bin, Info)
dig_py, dig_bin = dig_py['dig'], dig_bin['dig']
else:
assert isinstance(dig_bin, DigMontage)
assert isinstance(dig_py, DigMontage)
dig_py, dig_bin = dig_py.dig, dig_bin.dig
info_py = info_bin = None
assert isinstance(dig_py, list)
assert isinstance(dig_bin, list)
dig_py = sorted(dig_py, key=_dig_sort_key)
dig_bin = sorted(dig_bin, key=_dig_sort_key)
assert len(dig_py) == len(dig_bin)
for ii, (d_py, d_bin) in enumerate(zip(dig_py[:limit], dig_bin[:limit])):
for key in ('ident', 'kind', 'coord_frame'):
assert d_py[key] == d_bin[key], key
assert_allclose(d_py['r'], d_bin['r'], rtol=1e-5, atol=1e-5,
err_msg='Failure on %s:\n%s\n%s'
% (ii, d_py['r'], d_bin['r']))
if any(d['kind'] == FIFF.FIFFV_POINT_EXTRA for d in dig_py) and \
info_py is not None:
r_bin, o_head_bin, o_dev_bin = fit_sphere_to_headshape(
info_bin, units='m', verbose='error')
r_py, o_head_py, o_dev_py = fit_sphere_to_headshape(
info_py, units='m', verbose='error')
assert_allclose(r_py, r_bin, atol=1e-6)
assert_allclose(o_dev_py, o_dev_bin, rtol=1e-5, atol=1e-6)
assert_allclose(o_head_py, o_head_bin, rtol=1e-5, atol=1e-6)
@contextmanager
def modified_env(**d):
"""Use a modified os.environ with temporarily replaced key/value pairs.
Parameters
----------
**kwargs : dict
The key/value pairs of environment variables to replace.
"""
orig_env = dict()
for key, val in d.items():
orig_env[key] = os.getenv(key)
if val is not None:
assert isinstance(val, str)
os.environ[key] = val
elif key in os.environ:
del os.environ[key]
try:
yield
finally:
for key, val in orig_env.items():
if val is not None:
os.environ[key] = val
elif key in os.environ:
del os.environ[key]
def _click_ch_name(fig, ch_index=0, button=1):
"""Click on a channel name in a raw/epochs/ICA browse-style plot."""
from ..viz.utils import _fake_click
fig.canvas.draw()
text = fig.mne.ax_main.get_yticklabels()[ch_index]
bbox = text.get_window_extent()
x = bbox.intervalx.mean()
y = bbox.intervaly.mean()
_fake_click(fig, fig.mne.ax_main, (x, y), xform='pix',
button=button)
|
bloyl/mne-python
|
mne/utils/_testing.py
|
Python
|
bsd-3-clause
| 17,583
|
[
"Mayavi",
"VTK"
] |
28e9a3674c164dfe6a76068f03da28a5a7e195d4d84dd3374a3a420ddb90568b
|
import operator as op
from functools import partial
from itertools import permutations, combinations
import logging
import lib.const as C
import lib.visit as v
from ... import add_artifacts
from ... import util
from ... import sample
from ...encoder import add_ty_map
from ...meta import class_lookup
from ...meta.template import Template
from ...meta.clazz import Clazz, merge_flat
from ...meta.method import Method, sig_match, call_stt
from ...meta.field import Field
from ...meta.statement import Statement, to_statements
from ...meta.expression import Expression, to_expression, gen_E_gen
class Observer(object):
@classmethod
def find_obs(cls):
return lambda anno: anno.by_name(C.A.OBS)
# to avoid name conflict, use fresh counter as suffix
__cnt = 0
@classmethod
def fresh_cnt(cls):
cls.__cnt = cls.__cnt + 1
return cls.__cnt
@classmethod
def new_aux(cls, suffix=None):
if not suffix:
suffix = str(Observer.fresh_cnt())
return u"{}{}".format(C.OBS.AUX, suffix)
def __init__(self, smpls, obs_conf):
self._smpls = smpls
evt_kinds = sample.evt_kinds(smpls)
self._smpl_events = util.ffilter(map(class_lookup, evt_kinds))
self._obs_conf = obs_conf
self._tmpl = None
self._mq = None
self._cur_mtd = None
# classes that are involved in this pattern
self._clss = {} # { E1: [C1, D1], E2: [C2, D1], ... }
# event name to aux class name
self._evts = {} # { E1: Aux1, E2: Aux2, ... }
# class name to aux class names
self._auxs = {} # { C1: [Aux1], D1: [Aux1, Aux2], C2: [Aux2], ... }
# (subjectCall) methods to aux class name
self._subj_mtds = {} # { M1: [Aux1], M2: [Aux1, Aux2], ... }
@v.on("node")
def visit(self, node):
"""
This is the generic method to initialize the dynamic dispatcher
"""
# find possible classes for @Subject and @Observer
# so as to build self._clss and self._auxs
# at this point, assume those are annotated with @ObserverPattern(E+)
def find_clss_involved_w_anno_evt(self, tmpl):
for cls in util.flatten_classes(tmpl.classes, "inners"):
if not util.exists(Observer.find_obs(), cls.annos): continue
# ignore interface without implementers
if cls.is_itf and not cls.subs:
logging.debug("ignore {} due to no implementers".format(cls.name))
continue
events = util.find(Observer.find_obs(), cls.annos).events
for event in events:
cls_e = class_lookup(event)
if not cls_e: continue
for cls_smpl_e in self._smpl_events:
if cls_smpl_e <= cls_e: # subtype appears in the samples
util.mk_or_append(self._clss, event, cls)
for event in self._clss.keys():
# if # of candidates is less than 2, ignore that event
if len(self._clss[event]) < 2:
logging.debug("ignore {} {}".format(event, self._clss[event]))
del self._clss[event]
del tmpl.events[event]
continue
aux_name = Observer.new_aux(event)
tmpl.obs_auxs[aux_name] = self._clss[event]
self._evts[event] = aux_name
logging.debug("{}: {} {}".format(event, aux_name, self._clss[event]))
for cls in self._clss[event]:
util.mk_or_append(self._auxs, cls.name, aux_name)
if cls.outer:
util.mk_or_append(self._auxs, unicode(repr(cls)), aux_name)
# find possible classes for @Subject and @Observer
# so as to build self._clss and self._auxs
# at this point, assume those are annotated with @ObserverPattern
def find_clss_involved_w_anno(self, tmpl):
logging.debug("target events: {}".format(self._smpl_events))
for cls in util.flatten_classes(tmpl.classes, "inners"):
if not util.exists(Observer.find_obs(), cls.annos): continue
# ignore interface without implementers
if cls.is_itf and not cls.subs:
logging.debug("ignore {} due to no implementers".format(cls.name))
continue
involved_clss = map(class_lookup, cls.param_typs)
for cls_e in self._smpl_events:
for cls_i in involved_clss:
if cls_i and cls_e <= cls_i:
util.mk_or_append(self._clss, cls_e.name, cls)
for event in self._clss.keys():
# if # of candidates is less than 2, ignore that event
if len(self._clss[event]) < 2:
logging.debug("ignore {} {}".format(event, self._clss[event]))
del self._clss[event]
del tmpl.events[event]
continue
aux_name = Observer.new_aux(event)
tmpl.obs_auxs[aux_name] = self._clss[event]
self._evts[event] = aux_name
logging.debug("{}: {} {}".format(event, aux_name, self._clss[event]))
for cls in self._clss[event]:
util.mk_or_append(self._auxs, cls.name, aux_name)
if cls.outer:
util.mk_or_append(self._auxs, unicode(repr(cls)), aux_name)
# find possible classes for @Subject and @Observer
# so as to build self._clss and self._auxs
# at this point, annotations are no longer used
def find_clss_involved_wo_anno(self, tmpl):
event = C.ADR.MSG
self._clss[event] = []
for cls in util.flatten_classes(tmpl.classes, "inners"):
if not util.exists(Observer.find_obs(), cls.annos): continue
# ignore interface without implementers
if cls.is_itf and not cls.subs:
logging.debug("ignore {} due to no implementers".format(cls.name))
continue
util.mk_or_append(self._clss, event, cls)
for e in tmpl.events.keys():
if e != event:
del tmpl.events[e]
aux_name = Observer.new_aux(event)
tmpl.obs_auxs[aux_name] = self._clss[event]
self._evts[event] = aux_name
logging.debug("{}: {} {}".format(event, aux_name, self._clss[event]))
for cls in self._clss[event]:
util.mk_or_append(self._auxs, cls.name, aux_name)
if cls.outer:
util.mk_or_append(self._auxs, unicode(repr(cls)), aux_name)
# subtype based lookup
@staticmethod
def subtype_lookup(dic, ty):
_ty = util.sanitize_ty(ty)
if _ty in dic: return dic[_ty]
cls = class_lookup(ty)
if not cls: return None
if cls.itfs:
for itf in cls.itfs:
res = Observer.subtype_lookup(dic, itf)
if res: return res
if cls.sup: return Observer.subtype_lookup(dic, cls.sup)
return None
# find the corresponding aux type based on subtypes
def find_aux(self, ty):
return Observer.subtype_lookup(self._auxs, ty)
## @ObserverPattern(E)
## class C { ... }
## class D { ... void update(E obj2); ... }
## class E { ... T gettype(); ...}
## =>
## class C { @Subject(D, E, update) ... }
## class D { @Observer ... }
## class E { @Event ... }
@staticmethod
def check_rule1(aux, conf):
rule = Method(clazz=aux, mods=[C.mod.ST, C.mod.HN], name=u"checkRule1")
body = u"""
assert {aux.subject} != {aux.observer};
""".format(**locals())
if conf[0] < 2:
body += u"""
assert subcls(belongsTo({aux.update}), {aux.observer});
assert 1 <= (argNum({aux.update}));
//assert subcls({aux.event}, argType({aux.update}, 0));
""".format(**locals())
else:
body += u"""
assert subcls(belongsTo({aux.eventtype}), {aux.event});
assert 0 == (argNum({aux.eventtype}));
""".format(**locals())
for i in xrange(conf[0]):
aux_up = getattr(aux, "update_"+str(i))
body += u"""
assert subcls(belongsTo({aux_up}), {aux.observer});
assert 1 == (argNum({aux_up}));
assert subcls({aux.event}, argType({aux_up}, 0));
""".format(**locals())
for i, j in combinations(range(conf[0]), 2):
aux_up_i = getattr(aux, "update_"+str(i))
aux_up_j = getattr(aux, "update_"+str(j))
body += u"assert {aux_up_i} != {aux_up_j};".format(**locals())
rule.body = to_statements(rule, body)
aux.add_mtds([rule])
## @Subject(D, E, update)
## void M1(D obj1){}
## void M2(D obj2){}
## void M3(E obj3){}
## =>
## List<D> _obs;
## void M1(D obj1) { @Attach(obj1, _obs) }
## void M2(D obj2) { @Detach(obj2, _obs) }
## void M3(E obj3) { @Handle(D, update, obj3, _obs) }
@staticmethod
def check_rule2(aux, conf):
rule = Method(clazz=aux, mods=[C.mod.ST, C.mod.HN], name=u"checkRule2")
body = u""
if conf[1] > 0:
body += u"""
assert subcls(belongsTo({aux.attach}), {aux.subject});
assert 1 == (argNum({aux.attach}));
assert subcls({aux.observer}, argType({aux.attach}, 0));
""".format(**locals())
if conf[2] > 0:
body += u"""
assert subcls(belongsTo({aux.detach}), {aux.subject});
assert 1 == (argNum({aux.detach}));
assert subcls({aux.observer}, argType({aux.detach}, 0));
""".format(**locals())
if conf[1] > 0 and conf[2] > 0:
body += u"""
assert {aux.attach} != {aux.detach};
""".format(**locals())
def handle_related(aux, hdl):
constraints = u"""
assert subcls(belongsTo({hdl}), {aux.subject});
assert 1 == (argNum({hdl}));
assert subcls({aux.event}, argType({hdl}, 0));
""".format(**locals())
if conf[1] > 0:
constraints += u"""
assert {hdl} != {aux.attach};
""".format(**locals())
if conf[2] > 0:
constraints += u"""
assert {hdl} != {aux.detach};
""".format(**locals())
return constraints
if conf[0] < 2:
body += handle_related(aux, aux.handle)
else:
for i in xrange(conf[0]):
aux_hdl = getattr(aux, "handle_"+str(i))
body += handle_related(aux, aux_hdl)
for i, j in combinations(range(conf[0]), 2):
aux_hdl_i = getattr(aux, "handle_"+str(i))
aux_hdl_j = getattr(aux, "handle_"+str(j))
body += u"assert {aux_hdl_i} != {aux_hdl_j};".format(**locals())
rule.body = to_statements(rule, body)
aux.add_mtds([rule])
# assume candidate methods will be neither <init> nor static
# and have at least one parameter whose type is of interest (if any)
def is_candidate_mtd(self, aux, mtd):
if mtd.is_init or mtd.is_static: return False
for (ty, _) in mtd.params:
cls_ty = class_lookup(ty)
if not cls_ty: continue
for cls in aux.subs + [aux.evt]:
if cls_ty <= cls: return True
# events are allowed to be downcasted
if aux.evt <= cls_ty: return True
return False
# retrieve candidate methods
def get_candidate_mtds(self, aux, cls):
mtds = cls.mtds
# if it's an interface with implementers
if cls.is_itf and cls.subs:
# collect all sub-classes
subss = util.flatten_classes(cls.subs, "subs")
# filter out sub-interfaces (e.g., Action < ActionListener)
subss, _ = util.partition(lambda c: c.is_class, subss)
# then collect actual methods from those sub-classes
mtds = util.flatten(map(op.attrgetter("mtds"), subss))
return filter(partial(self.is_candidate_mtd, aux), mtds)
# common params for methods in Aux...
@staticmethod
def mtd_params(aux):
aname, ename = aux.name, aux.evt.name
rcv = u'_'.join(["rcv", aname])
return [(aname, rcv), (aname, u"arg"), (ename, u"evt")]
# restrict call stack for the given method via a global counter
@staticmethod
def limit_depth(aux, mtd, depth):
fname = mtd.name + "_depth"
z = to_expression(u"0")
d = Field(clazz=aux, mods=C.PRST, typ=C.J.i, name=fname, init=z)
aux.add_flds([d])
ret = u"return" if mtd.typ == C.J.v else u"return null"
prologue = to_statements(mtd, u"""
if ({fname} > {depth}) {ret};
{fname} = {fname} + 1;
""".format(**locals()))
epilogue = to_statements(mtd, u"""
{fname} = {fname} - 1;
""".format(**locals()))
mtd.body = prologue + mtd.body + epilogue
# event type getter
def egetter(self, aux, clss):
aname, ename = aux.name, aux.evt.name
rcv = u'_'.join(["rcv", ename])
params = [(C.J.i, u"mtd_id"), (ename, rcv)]
egetter = Method(clazz=aux, mods=C.PBST, typ=u"Object", params=params, name=u"egetter")
def switch( (cls, other) ):
mtds = cls.mtds
for mtd in mtds: util.mk_or_append(self._subj_mtds, repr(mtd), aux)
logging.debug("{}.{}, {}, {}, {}".format(aux.name, egetter.name, repr(cls), repr(other), mtds))
def invoke(mtd):
if mtd.typ == u"void": return u''
cls = mtd.clazz
# if there is no implementer for this method in interface, ignore it
if cls.is_itf and not cls.subs: return u''
#actual_params = [(other.name, u"arg")] + [params[-1]]
#args = u", ".join(sig_match(mtd.params, actual_params))
call = u"return rcv_{}.{}();".format(ename, mtd.name)
return u"if (mtd_id == {mtd.id}) {{ {call} }}".format(**locals())
invocations = util.ffilter(map(invoke, mtds))
return u"\nelse ".join(invocations)
tests = util.ffilter([switch((aux.evt, aux.evt))])
egetter.body = to_statements(egetter, u"\nelse ".join(tests))
Observer.limit_depth(aux, egetter, 2)
aux.add_mtds([egetter])
setattr(aux, "egetter", egetter)
# a method that simulates reflection
def reflect(self, aux, clss, conf):
params = [(C.J.i, u"mtd_id")] + Observer.mtd_params(aux)
reflect = Method(clazz=aux, mods=C.PBST, params=params, name=u"reflect")
def switch( (cls, other) ):
mtds = self.get_candidate_mtds(aux, cls)
for mtd in mtds: util.mk_or_append(self._subj_mtds, repr(mtd), aux)
logging.debug("{}.{}, {}, {}, {}".format(aux.name, reflect.name, repr(cls), repr(other), mtds))
def invoke(mtd):
cls = mtd.clazz
# if there is no implementer for this method in interface, ignore it
if cls.is_itf and not cls.subs: return u''
actual_params = [(other.name, u"arg")] + [params[-1]]
args = u", ".join(sig_match(mtd.params, actual_params))
casted_rcv = u"({})rcv_{}".format(mtd.clazz.name, aux.name)
call = u"({}).{}({});".format(casted_rcv, mtd.name, args)
return u"if (mtd_id == {mtd.id}) {{ {call} }}".format(**locals())
invocations = util.ffilter(map(invoke, mtds))
body = u"\nelse ".join(invocations)
if conf[0] >= 2:
hdl, mtd = getattr(aux, "handle"), getattr(aux, "mtd_handle")
args = u", ".join(sig_match(mtd.params, params))
call = u"{}.{}({});".format(aux.name, mtd.name, args)
body += u"\nelse if (mtd_id == {hdl}) {{ {call} }}".format(**locals())
return body
tests = util.ffilter(map(switch, permutations(clss, 2)))
reflect.body = to_statements(reflect, u"\nelse ".join(tests))
depth = 3 if conf[0] >= 2 else 2
Observer.limit_depth(aux, reflect, depth)
aux.add_mtds([reflect])
setattr(aux, "reflect", reflect)
# add a list of @Observer, along with an initializing statement
@staticmethod
def add_obs(aux, clss):
typ = u"{}<{}>".format(C.J.LST, C.J.OBJ)
obs = Field(clazz=aux, typ=typ, name=C.OBS.obs)
aux.add_flds([obs])
setattr(aux, "obs", obs)
tmp = '_'.join([C.OBS.tmp, aux.name])
for cls in clss:
if cls.is_itf: continue
for mtd in cls.inits:
body = u"""
{0} {1} = ({0})this;
{1}.{2} = new {3}();
""".format(aux.name, tmp, C.OBS.obs, typ)
mtd.body.extend(to_statements(mtd, body))
# attach code
@staticmethod
def attach(aux):
params = Observer.mtd_params(aux)
attach = Method(clazz=aux, mods=C.PBST, params=params, name=u"attachCode")
add = u"rcv_{}.{}.add(arg);".format(aux.name, C.OBS.obs)
attach.body = to_statements(attach, add)
aux.add_mtds([attach])
setattr(aux, "mtd_attach", attach)
# detach code
@staticmethod
def detach(aux):
params = Observer.mtd_params(aux)
detach = Method(clazz=aux, mods=C.PBST, params=params, name=u"detachCode")
rm = u"rcv_{}.{}.remove(arg);".format(aux.name, C.OBS.obs)
detach.body = to_statements(detach, rm)
aux.add_mtds([detach])
setattr(aux, "mtd_detach", detach)
# upper-level handle code
@staticmethod
def sub_handle(aux, idx):
params = Observer.mtd_params(aux)
handle = Method(clazz=aux, mods=C.PBST, params=params, name=u"subHandleCode")
cnt = Observer.__cnt
aname = aux.name
reflect = u"reflect" #getattr(aux, "reflect").name
loop = u"""
if (evt instanceof {aux.evt.name}) {{
List<Object> obs{cnt} = rcv_{aname}._obs;
for ({aname} o : obs{cnt}) {{
{aname}.{reflect}({aux.update}_{idx}, o, rcv_{aname}, ({aux.evt.name})evt);
}}
}}
""".format(**locals())
handle.body = to_statements(handle, loop)
aux.add_mtds([handle])
setattr(aux, "mtd_sub_handle", handle)
# handle code
@staticmethod
def handle(aux, conf):
ename = aux.evt.name
params = Observer.mtd_params(aux)
handle = Method(clazz=aux, mods=C.PBST, params=params, name=u"handleCode")
reflect = u"reflect" #getattr(aux, "reflect").name
if conf[0] >= 2: egetter = getattr(aux, "egetter").name
aname = aux.name
args = u", ".join(map(lambda (ty, nm): nm, params))
def handle_body(aux, role):
aname, evtname = aux.name, aux.evt.name
cnt = Observer.fresh_cnt()
loop = u"""
if (evt instanceof {evtname}) {{
List<Object> obs{cnt} = rcv_{aname}._obs;
for ({aname} o : obs{cnt}) {{
{aname}.reflect({role}, o, rcv_{aname}, ({evtname})evt);
}}
}}
""".format(**locals())
return loop
def handle_mtd(i):
handle_i = Method(clazz=aux, mods=C.PBST, params=params, name=u"handleCode_{i}".format(**locals()))
body_i = handle_body(aux, getattr(aux, "update_"+str(i)))
handle_i.body = to_statements(handle_i, body_i)
setattr(aux, "mtd_handle_"+str(i), handle_i)
return handle_i
if conf[0] < 2:
cnt = Observer.fresh_cnt()
body = handle_body(aux, aux.update)
handle.body = to_statements(handle, body)
else:
aux.add_mtds(map(handle_mtd, range(conf[0])))
evt_cls = class_lookup(aux.evt.name)
const_flds = []
if evt_cls.inners:
for inner in evt_cls.inners:
const_flds.extend(filter(lambda f: f.is_final and f.is_static, inner.flds))
evtyp = evt_cls.inners[0].name
evt_id = getattr(aux, u"eventtype")
get_type = u"""
{evtyp} et = {aname}.{egetter}({evt_id}, evt);
""".format(**locals())
def handle_switch(i):
evt_cls = class_lookup(aux.evt.name)
evtyp = evt_cls.inners[0].name
aname = aux.name
reflect = u"reflect" #getattr(aux, "reflect").name
cns_typ = '.'.join([evtyp, const_flds[i].name])
hdl_id = getattr(aux, u"handle_"+unicode(i))
params = Observer.mtd_params(aux)
args = u", ".join(map(lambda (ty, nm): nm, params))
return u"""
if (et == {cns_typ}) {aname}.{reflect}({hdl_id}, {args});
""".format(**locals())
choose = u"\nelse ".join(map(handle_switch, range(len(const_flds))))
handle.body = to_statements(handle, get_type + choose)
aux.add_mtds([handle])
setattr(aux, "mtd_handle", handle)
# add a role variable for the handle method
if conf[0] >= 2:
c_to_e = lambda c: to_expression(unicode(c))
new_fld = Field(clazz=aux, mods=[C.mod.ST], typ=C.J.i, name=getattr(aux, C.OBS.H), init=c_to_e(handle.id))
aux.add_flds([new_fld])
# attach/detach/handle will be dispatched here
@staticmethod
def subjectCall(aux, conf):
params = [(C.J.i, u"mtd_id")] + Observer.mtd_params(aux)
one = Method(clazz=aux, mods=C.PBST, params=params, name=u"subjectCall")
def switch(role):
aname = aux.name
args = ", ".join(map(lambda (ty, nm): nm, params[1:]))
v = getattr(aux, role)
f = getattr(aux, "mtd_"+role).name
return u"if (mtd_id == {v}) {aname}.{f}({args});".format(**locals())
roles = [C.OBS.H]
if conf[0] >= 2: map(lambda i: roles.append('_'.join([C.OBS.H, str(i)])), range(conf[0]))
if conf[1] > 0: roles.append(C.OBS.A)
if conf[2] > 0: roles.append(C.OBS.D)
one.body = to_statements(one, u'\n'.join(map(switch, roles)))
Observer.limit_depth(aux, one, 2)
aux.add_mtds([one])
setattr(aux, "one", one)
##
## generate an aux type for @Subject and @Observer
##
def gen_aux_cls(self, event, conf, clss):
aux_name = self._evts[event]
aux = merge_flat(aux_name, clss)
aux.mods = [C.mod.PB]
aux.subs = clss # virtual relations; to find proper methods
setattr(aux, "evt", class_lookup(event))
def extend_itf(cls):
_clss = [cls]
if cls.is_itf and cls.subs: _clss.extend(cls.subs)
return _clss
ext_clss = util.rm_dup(util.flatten(map(extend_itf, clss)))
# add a list of @Observer into candidate classes
self.add_obs(aux, ext_clss)
# set role variables
def set_role(role):
setattr(aux, role, '_'.join([role, aux.name]))
for r in C.obs_roles:
if r == C.OBS.H or r == C.OBS.U:
if conf[0] < 2: set_role(r)
else:
set_role(r)
map(lambda i: set_role('_'.join([r, str(i)])), range(conf[0]))
elif r == C.OBS.A:
if conf[1] > 0: set_role(r)
elif r == C.OBS.D:
if conf[2] > 0: set_role(r)
else:
set_role(r)
# add fields that stand for non-deterministic rule choices
def aux_fld(init, ty, nm):
if hasattr(aux, nm): nm = getattr(aux, nm)
return Field(clazz=aux, mods=[C.mod.ST], typ=ty, name=nm, init=init)
hole = to_expression(C.T.HOLE)
aux_int = partial(aux_fld, hole, C.J.i)
c_to_e = lambda c: to_expression(unicode(c))
# if explicitly annotated, use those concrete event names
if self._tmpl.is_event_annotated:
ev_init = c_to_e(aux.evt.id)
role_var_evt = aux_fld(ev_init, C.J.i, C.OBS.EVT)
else: # o.w., introduce a role variable for event
role_var_evt = aux_int(C.OBS.EVT)
aux.add_flds([role_var_evt])
## range check
gen_range = lambda ids: gen_E_gen(map(c_to_e, util.rm_dup(ids)))
get_id = op.attrgetter("id")
# range check for classes
cls_vars = [C.OBS.OBSR, C.OBS.SUBJ]
cls_ids = map(get_id, clss)
cls_init = gen_range(cls_ids)
aux_int_cls = partial(aux_fld, cls_init, C.J.i)
aux.add_flds(map(aux_int_cls, cls_vars))
# range check for methods
mtd_vars = []
if conf[1] > 0: mtd_vars.append(C.OBS.A)
if conf[2] > 0: mtd_vars.append(C.OBS.D)
for r in [C.OBS.H, C.OBS.U]:
if conf[0] < 2: mtd_vars.append(r)
else: map(lambda i: mtd_vars.append('_'.join([r, str(i)])), range(conf[0]))
mtds = util.flatten(map(partial(self.get_candidate_mtds, aux), clss))
mtd_ids = map(get_id, mtds)
mtd_init = gen_range(mtd_ids)
aux_int_mtd = partial(aux_fld, mtd_init, C.J.i)
aux.add_flds(map(aux_int_mtd, mtd_vars))
# range check for event type getter
if conf[0] >= 2:
evt_mtds = aux.evt.mtds
evt_mtd_init = gen_range(map(get_id, evt_mtds))
aux.add_flds([aux_fld(evt_mtd_init, C.J.i, C.OBS.EVTTYP)])
## rules regarding non-deterministic rewritings
Observer.check_rule1(aux, conf)
Observer.check_rule2(aux, conf)
if conf[0] >= 2: self.egetter(aux, clss)
Observer.handle(aux, conf)
Observer.attach(aux)
Observer.detach(aux)
Observer.subjectCall(aux, conf)
self.reflect(aux, clss, conf)
add_artifacts([aux.name])
return aux
# add a message queue
@staticmethod
def add_message_queue(cls):
mq_typ = u"Queue<{}>".format(C.ADR.MSG)
mq_name = u"_msq_queue"
mq = Field(clazz=cls, typ=mq_typ, name=mq_name)
cls.add_flds([mq])
setattr(cls, "mq", mq)
cls.init_fld(mq)
@v.when(Template)
def visit(self, node):
self._tmpl = node
if not node.events: return
self._mq = class_lookup(C.ADR.QUE)
# build mappings from event kinds to involved classes
if self._tmpl.is_event_annotated:
self.find_clss_involved_w_anno_evt(node)
else:
self.find_clss_involved_w_anno(node)
# introduce AuxObserver$n$ for @Subject and @Observer
for event in self._clss:
clss = self._clss[event]
node.add_classes([self.gen_aux_cls(event, self._obs_conf[event], clss)])
# add a message queue
Observer.add_message_queue(self._mq)
# add type conversion mappings
trimmed_auxs = {}
for k in self._auxs: trimmed_auxs[k] = self._auxs[k][0]
add_ty_map(trimmed_auxs)
@v.when(Clazz)
def visit(self, node): pass
@v.when(Field)
def visit(self, node): pass
@v.when(Method)
def visit(self, node):
self._cur_mtd = node
# special methods
if node.clazz.name == C.ADR.QUE:
mq = self._mq
# MessageQueue.next
if node.name == "next":
body = u"if (this != null) return ({}){}.remove(); else return null;".format(node.typ, mq.mq.name)
node.body = to_statements(node, body)
# MessageQueue.enqueueMessage
elif "enqueue" in node.name:
_, msg = node.params[0]
body = u"if (this != null) {{ {}.add({}); return true; }} return false;".format(mq.mq.name, msg)
node.body = to_statements(node, body)
elif node.clazz.name == C.ADR.HDL:
# Handler.dispatchMessage
if "dispatch" in node.name:
_, msg = node.params[0]
# TODO: should be placed at dispatch... in Window(Manager)'s Handler
cls_ievt = class_lookup(u"InputEvent")
switches = u''
for event, i in self._tmpl.events.iteritems():
if event not in self._clss: continue
cls_h = class_lookup(self._evts[event])
cls_h_name = cls_h.name
reflect = cls_h.reflect.name
hdl = '.'.join([cls_h_name, cls_h.handle])
rcv_retrieval = u''
cls_evt = class_lookup(event)
if cls_evt <= cls_ievt: # InputEvent, KeyEvent, MotionEvent
rcv_retrieval = u"""
// XXX: View-specific source retrieval
{event} evt_{i} = ({event})({msg}.obj);
int id_{i} = evt_{i}.getSource();
ActivityThread t_{i} = ActivityThread.currentActivityThread();
Activity act_{i} = t_{i}.getActivity();
View rcv_{i} = act_{i}.findViewById(id_{i});
""".format(**locals())
else: # TODO: how to retrieve the source of the event in general?
rcv_retrieval = u"""
{cls_h_name} rcv_{i} = ({cls_h_name}){msg}.getSource();
""".format(**locals())
cond_call = u"""
else if ({msg}_k == {i}) {{
{rcv_retrieval}
{cls_h_name}.{reflect}({hdl}, rcv_{i}, null, evt_{i});
}}
""".format(**locals())
switches += cond_call
# TODO: should be placed at dispatch... in ActivityManager's Handler
act_conds = []
acts = self._tmpl.find_cls_kind(C.ADR.ACT)
for act in acts:
cond_new = u"""
if (act_name.equals(\"{act.name}\")) {{
act = new {act.name}();
(({act.name})act).onCreate(null);
}}
""".format(**locals())
act_conds.append(cond_new)
act_switches = u"\nelse ".join(act_conds)
body = u"""
if ({msg} == null) return;
int {msg}_k = {msg}.what;
if ({msg}_k == -1) {{ // Intent
Intent i = (Intent)({msg}.obj);
ComponentName c = i.getComponent();
String act_name = c.getClassName();
Activity act;
// TODO: reflective Activity instance generation
//Class cls = Class.forName(act_name);
//act = cls.newInstance();
{act_switches}
// TODO: should be pushed and maintained by ActivityStack
ActivityThread t = ActivityThread.currentActivityThread();
t._activity = act;
}} {switches}
""".format(**locals())
node.body = to_statements(node, body)
# for methods that are candidates of @Attach/@Detach/@Handle
if node.clazz.is_itf: return
if repr(node) in self._subj_mtds:
cname = node.clazz.name
for aux in self._subj_mtds[repr(node)]:
logging.debug("{}.{} => {}.subjectCall".format(cname, node.name, aux.name))
if node.is_static: params = node.params
else: params = [(cname, C.J.THIS)] + node.params
one_params = [(C.J.i, unicode(node.id))]
for (ty, nm) in params:
cls_ty = class_lookup(ty)
# downcast an abstracted (superclass) event to actual event
if aux.evt <= cls_ty:
one_params.append( (aux.evt.name, nm) )
elif self.find_aux(ty):
one_params.append( (aux.name, nm) )
else:
one_params.append( (ty, nm) )
body = u"{};".format(call_stt(aux.one, one_params))
node.body = to_statements(node, body) + node.body
@v.when(Statement)
def visit(self, node): return [node]
## @React
## =>
## Message m = q.next();
## Handler h = m.getTarget();
## h.dispatchMessage(m);
# NOTE: assume @React is in @Harness only; and then use variable q there
@v.when(Expression)
def visit(self, node):
if node.kind == C.E.ANNO:
_anno = node.anno
if _anno.name == C.A.REACT:
logging.debug("reducing: {}".format(str(_anno)))
suffix = Observer.fresh_cnt()
body = u"""
{1} msg{0} = q.next();
{2} hdl{0} = msg{0}.getTarget();
hdl{0}.dispatch{1}(msg{0});
""".format(suffix, C.ADR.MSG, C.ADR.HDL)
return to_statements(self._cur_mtd, body)
return node
|
plum-umd/pasket
|
pasket/rewrite/android/observer.py
|
Python
|
mit
| 29,595
|
[
"VisIt"
] |
89dd6204fb174488d7804bd7c4d06503e173fba1370206b6ad53e2dffae7ba30
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from subprocess import call
import numpy as np
import sys
def mms_cases(h_list, string_list):
for h in h_list:
n = int(1/h)
dt = h / 1.4
for string in string_list:
# args = ["mpirun", "-np", str(1), "navier_stokes-opt", "-i", "2d_advection_error_testing.i",
args = ["lldb", "--", "navier_stokes-dbg", "-i", "2d_advection_error_testing.i",
"Mesh/nx=%s" % n,
"Mesh/ny=%s" % n,
# "Outputs/file_base=%s_%sx%s" % (string, n, n),
"Outputs/file_base=debug",
"Executioner/TimeStepper/dt=%s" % dt,
"Executioner/num_steps=1000000",
"Executioner/trans_ss_check=true",
"Executioner/ss_check_tol=1e-10"]
call(args)
arg_string = sys.argv[1] if 1 < len(sys.argv) else None
strings = ['_'.join(filter(None, ('stabilized', arg_string)))] # , '_'.join(filter(None, ('unstabilized', sys.argv[1])))]
h_list = np.array([.5,
.25,
.125,
.0625,
.03125])
# .015625,
# .0078125,
# .00390625])
mms_cases(h_list, strings)
|
nuclear-wizard/moose
|
modules/navier_stokes/test/tests/scalar_adr/supg/run_cases.py
|
Python
|
lgpl-2.1
| 1,565
|
[
"MOOSE"
] |
9c648df295cd06ef79ecbbe453d69e4bd46f4fe8341b7d23017f8fd1349c529d
|
class InputSection(object):
"""Base class for all the automatically created classes that represents
sections in the CP2K input structure.
"""
def __init__(self):
self._name = None
self._keywords = {}
self._repeated_keywords = {}
self._default_keywords = {}
self._repeated_default_keywords = {}
self._subsections = {}
self._repeated_subsections = {}
self._aliases = {}
self._repeated_aliases = {}
self._attributes = []
def __getattr__(self, attr):
"""Called when self.attr doesn't exist
"""
message = (
"The attribute {0} does not exist. This is either a typo (remember"
" that section names should be in uppercase, and keywords should be"
" capitalized) or you are trying to access a repeatable item that"
" should be first added with {0}_add() which returns the newly"
" added object for that section."
).format(attr)
raise AttributeError(message)
def _format_variable(self, item):
# The boolean values are reformatted
if isinstance(item, bool):
if item:
item = "TRUE"
else:
item = "FALSE"
return str(item)
def _parse_default_keyword(self, item, level):
"""Parses default keywords into sensible input sections."""
if type(item) is list:
output = (level + 1) * " "
for i, value in enumerate(item):
output += str(value)
if i != len(item)-1:
output += " "
output += "\n"
return output
else:
return (level + 1) * " " + self._format_variable(item) + "\n"
def _parse_repeatable_default_keyword(self, item, level):
"""Parses repeatable default keywords into sensible input sections."""
if type(item) is list:
output = ""
for i, value in enumerate(item):
output += (level + 1) * " "
if type(value) is list:
for j, sub_value in enumerate(value):
output += str(sub_value)
if j != len(value)-1:
output += " "
else:
output += self._format_variable(value)
output += "\n"
return output
else:
return (level + 1) * " " + self._format_variable(item) + "\n"
def _parse_keyword(self, item, name, level):
"""Parses non-repeatable keywords into sensible input sections."""
if type(item) is list:
output = (level + 1) * " " + name
for value in item:
if type(value) is list:
for sub_value in value:
output += " " + str(sub_value)
else:
output += " " + str(value)
output += "\n"
return output
else:
return (level + 1) * " " + name + " " + self._format_variable(item) + "\n"
def _parse_repeatable_keyword(self, item, name, level):
"""Parses repeatable keywords into sensible input sections."""
if type(item) is list:
output = ""
for i, value in enumerate(item):
output += (level + 1) * " " + name
if type(value) is list:
for sub_value in value:
output += " " + str(sub_value)
else:
output += " " + str(value)
output += "\n"
return output
else:
return (level + 1) * " " + name + " " + self._format_variable(item) + "\n"
def _check_typos(self):
for attribute in self.__dict__.keys():
typos_found = True
if attribute in iter(self._keywords.keys()):
typos_found = False
elif attribute in iter(self._repeated_keywords.keys()):
typos_found = False
elif attribute in iter(self._subsections.keys()):
typos_found = False
elif attribute in iter(self._repeated_subsections.keys()):
typos_found = False
elif attribute in iter(self._aliases.keys()):
typos_found = False
elif attribute in iter(self._repeated_aliases.keys()):
typos_found = False
elif attribute in self._attributes:
typos_found = False
elif attribute[0] == "_":
typos_found = False
if typos_found:
raise Exception((
"Nonexisting keyword '{}' defined in CP2K input tree"
" section '{}'. This might be a typo (remember that section"
" names should be in uppercase, and keywords should be"
" capitalized)."
).format(attribute, self._name))
def _print_input(self, level):
# Check if any undefined items have been created. These are usually typos.
self._check_typos()
inp = ""
# Non-repeatable default keywords
for attname, realname in self._default_keywords.items():
value = self.__dict__[attname]
if value is not None:
if not (type(value) is list and not value):
parsed = self._parse_default_keyword(value, level)
inp += parsed
# Repeatable default keywords
for attname, realname in self._repeated_default_keywords.items():
keyword = self.__dict__[attname]
if keyword is not None:
if not (type(keyword) is list and not keyword):
parsed = self._parse_repeatable_default_keyword(keyword, level)
inp += parsed
# Non-repeatable keywords
for attname, realname in self._keywords.items():
value = self.__dict__[attname]
if value is not None:
if not (type(value) is list and not value):
parsed = self._parse_keyword(value, realname, level)
inp += parsed
# Repeatable keywords
for attname, realname in self._repeated_keywords.items():
keyword = self.__dict__[attname]
if keyword is not None:
if not (type(keyword) is list and not keyword):
parsed = self._parse_repeatable_keyword(keyword, realname, level)
inp += parsed
# Non-repeatable subsections
for attname, realname in self._subsections.items():
value = self.__dict__[attname]
substring = value._print_input(level + 1)
if substring != "":
inp += substring + "\n"
# Repeatable subsections
for attname, realname in self._repeated_subsections.items():
for subsection in self.__dict__[attname + "_list"]:
if subsection is not None:
substring = subsection._print_input(level + 1)
if substring != "":
inp += substring + "\n"
# Don't print the CP2K_INPUT root
if level != -1:
# Header and footer
has_section_parameter = False
inp_header = level * " " + "&" + self._name
if hasattr(self, "Section_parameters"):
if self.Section_parameters is not None:
parsed = self._parse_default_keyword(self.Section_parameters, -1)
inp_header += " " + parsed
has_section_parameter = True
if not has_section_parameter:
inp_header += "\n"
inp_footer = level * " " + "&END " + self._name
if not has_section_parameter and inp == "":
return ""
else:
return inp_header + inp + inp_footer
else:
return inp
|
SINGROUP/pycp2k
|
pycp2k/inputsection.py
|
Python
|
lgpl-3.0
| 8,058
|
[
"CP2K"
] |
5fb4b8005ce0833be49839afe663c398d1cfca2497829c2f388c5a6f41f67e10
|
import os, sys, inspect
import h5py
import numpy as np
import random
import math
import multiprocessing
from Crypto.Random.random import randint
import gc
import resource
# Visualization
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
# from mayavi import mlab
# from mayavi.core.ui.mayavi_scene import MayaviScene
# import volume_slicer
# Load the configuration file
import config
from numpy import float32, int32, uint8, dtype
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.append(cmd_folder)
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], config.caffe_path + "/python")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
sys.path.append(config.caffe_path + "/python")
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "../../malis")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
# Ensure correct compilation of Caffe and Pycaffe
if config.library_compile:
cpus = multiprocessing.cpu_count()
cwd = os.getcwd()
os.chdir(config.caffe_path)
result = os.system("make all -j %s" % cpus)
if result != 0:
sys.exit(result)
result = os.system("make pycaffe -j %s" % cpus)
if result != 0:
sys.exit(result)
os.chdir(cwd)
# Import pycaffe
import caffe
import malis as malis
# Import visualization and display
# import visualizer
# Fix up OpenCL variables. Can interfere with the
# frame buffer if the GPU is also a display driver
os.environ["GPU_MAX_ALLOC_PERCENT"] = "100"
os.environ["GPU_SINGLE_ALLOC_PERCENT"] = "100"
os.environ["GPU_MAX_HEAP_SIZE"] = "100"
os.environ["GPU_FORCE_64BIT_PTR"] = "1"
dims = len(config.output_dims)
def normalize(dataset, newmin=-1, newmax=1):
maxval = dataset
while len(maxval.shape) > 0:
maxval = maxval.max(0)
minval = dataset
while len(minval.shape) > 0:
minval = minval.min(0)
return ((dataset - minval) / (maxval - minval)) * (newmax - newmin) + newmin
def error_scale(data, factor_low, factor_high):
scale = np.add((data >= 0.5) * factor_high, (data < 0.5) * factor_low)
return scale
def count_affinity(dataset):
aff_high = np.sum(dataset >= 0.5)
aff_low = np.sum(dataset < 0.5)
return aff_high, aff_low
def border_reflect(dataset, border):
return np.pad(dataset,((border, border)),'reflect')
def inspect_2D_hdf5(hdf5_file):
print 'HDF5 keys: %s' % hdf5_file.keys()
dset = hdf5_file[hdf5_file.keys()[0]]
print 'HDF5 shape: X: %s Y: %s' % dset.shape
print 'HDF5 data type: %s' % dset.dtype
print 'Max/Min: %s' % [np.asarray(dset).max(0).max(0), np.asarray(dset).min(0).min(0)]
def inspect_3D_hdf5(hdf5_file):
print 'HDF5 keys: %s' % hdf5_file.keys()
dset = hdf5_file[hdf5_file.keys()[0]]
print 'HDF5 shape: X: %s Y: %s Z: %s' % dset.shape
print 'HDF5 data type: %s' % dset.dtype
print 'Max/Min: %s' % [np.asarray(dset).max(0).max(0).max(0), np.asarray(dset).min(0).min(0).min(0)]
def inspect_4D_hdf5(hdf5_file):
print 'HDF5 keys: %s' % hdf5_file.keys()
dset = hdf5_file[hdf5_file.keys()[0]]
print 'HDF5 shape: T: %s X: %s Y: %s Z: %s' % dset.shape
print 'HDF5 data type: %s' % dset.dtype
print 'Max/Min: %s' % [np.asarray(dset).max(0).max(0).max(0).max(0), np.asarray(dset).min(0).min(0).min(0).min(0)]
def display_raw(raw_ds, index):
slice = raw_ds[0:raw_ds.shape[0], 0:raw_ds.shape[1], index]
minval = np.min(np.min(slice, axis=1), axis=0)
maxval = np.max(np.max(slice, axis=1), axis=0)
img = Image.fromarray((slice - minval) / (maxval - minval) * 255)
img.show()
def display_con(con_ds, index):
slice = con_ds[0:con_ds.shape[0], 0:con_ds.shape[1], index]
rgbArray = np.zeros((con_ds.shape[0], con_ds.shape[1], 3), 'uint8')
rgbArray[..., 0] = colorsr[slice] * 256
rgbArray[..., 1] = colorsg[slice] * 256
rgbArray[..., 2] = colorsb[slice] * 256
img = Image.fromarray(rgbArray, 'RGB')
img.show()
def display_aff(aff_ds, index):
sliceX = aff_ds[0, 0:520, 0:520, index]
sliceY = aff_ds[1, 0:520, 0:520, index]
sliceZ = aff_ds[2, 0:520, 0:520, index]
img = Image.fromarray((sliceX & sliceY & sliceZ) * 255)
img.show()
def display_binary(bin_ds, index):
slice = bin_ds[0:bin_ds.shape[0], 0:bin_ds.shape[1], index]
img = Image.fromarray(np.uint8(slice * 255))
img.show()
def slice_data(data, offsets, sizes):
if (len(offsets) == 1):
return data[offsets[0]:offsets[0] + sizes[0]]
if (len(offsets) == 2):
return data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1]]
if (len(offsets) == 3):
return data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2]]
if (len(offsets) == 4):
return data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2], offsets[3]:offsets[3] + sizes[3]]
def set_slice_data(data, insert_data, offsets, sizes):
if (len(offsets) == 1):
data[offsets[0]:offsets[0] + sizes[0]] = insert_data
if (len(offsets) == 2):
data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1]] = insert_data
if (len(offsets) == 3):
data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2]] = insert_data
if (len(offsets) == 4):
data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2], offsets[3]:offsets[3] + sizes[3]] = insert_data
def sanity_check_net_blobs(net):
for key in net.blobs.keys():
dst = net.blobs[key]
data = np.ndarray.flatten(dst.data[0].copy())
print 'Blob: %s; %s' % (key, data.shape)
failure = False
first = -1
for i in range(0,data.shape[0]):
if abs(data[i]) > 100000:
failure = True
if first == -1:
first = i
print 'Failure, location %d; objective %d' % (i, data[i])
print 'Failure: %s, first at %d' % (failure,first)
if failure:
break;
def process(net, data_arrays, output_folder):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
dst = net.blobs['prob']
dummy_slice = [0]
for i in range(0, len(data_arrays)):
data_array = data_arrays[i]
dims = len(data_array.shape)
offsets = []
in_dims = []
out_dims = []
for d in range(0, dims):
offsets += [0]
in_dims += [data_array.shape[d]]
out_dims += [data_array.shape[d] - config.input_padding[d]]
pred_array = np.zeros(tuple([3] + out_dims))
while(True):
data_slice = slice_data(data_array, offsets, [config.output_dims[di] + config.input_padding[di] for di in range(0, dims)])
net.set_input_arrays(0, np.ascontiguousarray(data_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
net.forward()
output = dst.data[0].copy()
print offsets
# while(True):
# blob = raw_input('Blob:')
# fmap = int(raw_input('Enter the feature map:'))
# m = volume_slicer.VolumeSlicer(data=np.squeeze(net.blobs[blob].data[0])[fmap,:,:])
# m.configure_traits()
set_slice_data(pred_array, output, [0] + offsets, [3] + config.output_dims)
incremented = False
for d in range(0, dims):
if (offsets[dims - 1 - d] == out_dims[dims - 1 - d] - config.output_dims[dims - 1 - d]):
# Reset direction
offsets[dims - 1 - d] = 0
else:
# Increment direction
offsets[dims - 1 - d] = min(offsets[dims - 1 - d] + config.output_dims[dims - 1 - d], out_dims[dims - 1 - d] - config.output_dims[dims - 1 - d])
incremented = True
break
# Processed the whole input block
if not incremented:
break
# Safe the output
outhdf5 = h5py.File(output_folder+'/'+str(i)+'.h5', 'w')
outdset = outhdf5.create_dataset('main', tuple([3]+out_dims), np.float32, data=pred_array)
outdset.attrs['edges'] = np.string_('-1,0,0;0,-1,0;0,0,-1')
outhdf5.close()
def train(solver, data_arrays, label_arrays, mode='malis'):
losses = []
net = solver.net
if mode == 'malis':
nhood = malis.mknhood3d()
if mode == 'euclid':
nhood = malis.mknhood3d()
if mode == 'malis_aniso':
nhood = malis.mknhood3d_aniso()
if mode == 'euclid_aniso':
nhood = malis.mknhood3d_aniso()
data_slice_cont = np.zeros((1,1,132,132,132), dtype=float32)
label_slice_cont = np.zeros((1,1,44,44,44), dtype=float32)
aff_slice_cont = np.zeros((1,3,44,44,44), dtype=float32)
nhood_cont = np.zeros((1,1,3,3), dtype=float32)
error_scale_cont = np.zeros((1,1,44,44,44), dtype=float32)
dummy_slice = np.ascontiguousarray([0]).astype(float32)
# Loop from current iteration to last iteration
for i in range(solver.iter, solver.max_iter):
# First pick the dataset to train with
dataset = randint(0, len(data_arrays) - 1)
data_array = data_arrays[dataset]
label_array = label_arrays[dataset]
# affinity_array = affinity_arrays[dataset]
offsets = []
for j in range(0, dims):
offsets.append(randint(0, data_array.shape[j] - (config.output_dims[j] + config.input_padding[j])))
# These are the raw data elements
data_slice = slice_data(data_array, offsets, [config.output_dims[di] + config.input_padding[di] for di in range(0, dims)])
# These are the labels (connected components)
label_slice = slice_data(label_array, [offsets[di] + int(math.ceil(config.input_padding[di] / float(2))) for di in range(0, dims)], config.output_dims)
# These are the affinity edge values
# Also recomputing the corresponding labels (connected components)
aff_slice = malis.seg_to_affgraph(label_slice,nhood)
label_slice,ccSizes = malis.connected_components_affgraph(aff_slice,nhood)
print (data_slice[None, None, :]).shape
print (label_slice[None, None, :]).shape
print (aff_slice[None, :]).shape
print (nhood).shape
if mode == 'malis':
np.copyto(data_slice_cont, np.ascontiguousarray(data_slice[None, None, :]).astype(float32))
np.copyto(label_slice_cont, np.ascontiguousarray(label_slice[None, None, :]).astype(float32))
np.copyto(aff_slice_cont, np.ascontiguousarray(aff_slice[None, :]).astype(float32))
np.copyto(nhood_cont, np.ascontiguousarray(nhood[None, None, :]).astype(float32))
net.set_input_arrays(0, data_slice_cont, dummy_slice)
net.set_input_arrays(1, label_slice_cont, dummy_slice)
net.set_input_arrays(2, aff_slice_cont, dummy_slice)
net.set_input_arrays(3, nhood_cont, dummy_slice)
# We pass the raw and affinity array only
if mode == 'euclid':
net.set_input_arrays(0, np.ascontiguousarray(data_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
net.set_input_arrays(1, np.ascontiguousarray(aff_slice[None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
net.set_input_arrays(2, np.ascontiguousarray(error_scale(aff_slice[None, :],1.0,0.045)).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
if mode == 'softmax':
net.set_input_arrays(0, np.ascontiguousarray(data_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
net.set_input_arrays(1, np.ascontiguousarray(label_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
# Single step
loss = solver.step(1)
# Memory clean up and report
print("Memory usage (before GC): %d MiB" % ((resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / (1024)))
while gc.collect():
pass
print("Memory usage (after GC): %d MiB" % ((resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / (1024)))
# m = volume_slicer.VolumeSlicer(data=np.squeeze((net.blobs['Convolution18'].data[0])[0,:,:]))
# m.configure_traits()
print("Loss: %s" % loss)
losses += [loss]
hdf5_raw_file = 'fibsem_medulla_7col/tstvol-520-1-h5/img_normalized.h5'
hdf5_gt_file = 'fibsem_medulla_7col/tstvol-520-1-h5/groundtruth_seg.h5'
# hdf5_aff_file = 'fibsem_medulla_7col/tstvol-520-1-h5/groundtruth_aff.h5'
#hdf5_raw_file = 'zebrafish_friedrich/raw.hdf5'
#hdf5_gt_file = 'zebrafish_friedrich/labels_2.hdf5'
hdf5_raw = h5py.File(hdf5_raw_file, 'r')
hdf5_gt = h5py.File(hdf5_gt_file, 'r')
# hdf5_aff = h5py.File(hdf5_aff_file, 'r')
#inspect_3D_hdf5(hdf5_raw)
#inspect_3D_hdf5(hdf5_gt)
#inspect_4D_hdf5(hdf5_aff)
# Make the dataset ready for the network
hdf5_raw_ds = normalize(np.asarray(hdf5_raw[hdf5_raw.keys()[0]]).astype(float32), -1, 1)
hdf5_gt_ds = np.asarray(hdf5_gt[hdf5_gt.keys()[0]]).astype(float32)
# hdf5_aff_ds = np.asarray(hdf5_aff[hdf5_aff.keys()[0]])
#display_aff(hdf5_aff_ds, 1)
#display_con(hdf5_gt_ds, 0)
#display_raw(hdf5_raw_ds, 0)
#display_binary(hdf5_gt_ds, 0)
#Initialize caffe
caffe.set_mode_gpu()
caffe.set_device(config.device_id)
if(config.mode == "train"):
solver = caffe.get_solver_from_file(config.solver_proto)
#solver.restore("net__iter_8000.solverstate")
net = solver.net
train(solver, [normalize(hdf5_raw_ds)], [hdf5_gt_ds])
if(config.mode == "process"):
net = caffe.Net(config.test_net, config.trained_model, caffe.TEST)
process(net, [normalize(hdf5_raw_ds)], config.output_folder)
|
srinituraga/caffe_neural_models
|
dataset_06/greentea_brew_tool.py
|
Python
|
bsd-2-clause
| 14,490
|
[
"Mayavi"
] |
e71aa22a36e7f315cfe53dcfee57f1010ce3abf29ddb8cf4138f405e4fb3f7fb
|
# -*- coding: utf-8 -*-
#
# ABOUT
# Artisan PID Controllers (Fuji, DTA, Arduino TC4)
# LICENSEsvLen
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
# AUTHOR
# Marko Luther, 2020
###################################################################################
########################## FUJI PID CLASS DEFINITION ############################
###################################################################################
# This class can work for either one Fuji PXR or one Fuji PXG. It is used for the controlling PID only.
# NOTE: There is only one controlling PID. The second pid is only used for reading BT and therefore,
# there is no need to create a second PID object since the second pid all it does is read temperature (always use the same command).
# All is needed for the second pid is its unit id number stored in aw.qmc.device[].
# The command to read T is the always the same for PXR and PXG but with the unit ID changed.
import time as libtime
import numpy
import logging
try:
from typing import Final
except ImportError:
# for Python 3.7:
from typing_extensions import Final
from artisanlib.util import decs2string, fromCtoF, fromFtoC, hex2int, str2cmd, stringfromseconds, cmd2str
try:
#pylint: disable = E, W, R, C
from PyQt6.QtCore import pyqtSlot # @UnusedImport @Reimport @UnresolvedImport
from PyQt6.QtWidgets import QApplication # @UnusedImport @Reimport @UnresolvedImport
except Exception:
#pylint: disable = E, W, R, C
from PyQt5.QtCore import pyqtSlot # @UnusedImport @Reimport @UnresolvedImport
from PyQt5.QtWidgets import QApplication # @UnusedImport @Reimport @UnresolvedImport
_log: Final = logging.getLogger(__name__)
class FujiPID():
def __init__(self,aw):
self.aw = aw
# follow background: if True, Artisan sends SV values taken from the current background profile if any
self.followBackground = False
self.lookahead = 0 # the lookahead in seconds
self.rampsoak = False # True if RS is active
self.sv = None # the last sv send to the Fuji PID
## FUJI PXG input types
##0 (JPT 100'3f)
##1 (PT 100'3f)
##2 (J)
##3 (K)
##4 (R)
##5 (B)
##6 (S)
##7 (T)
##8 (E)
##9 (no function)
##10 (no function)
##11 (no function)
##12 (N)
##13 (PL- 2)
##14 (no function)
##15 (0V to 5V / 0mA to 20mA
##16 (1V to 5V/4mA to 20mA)
##17 (0mV to 10V)
##18 (2V to 10V)
##19 (0mV to 100mV)
self.PXGthermotypes = ["JPT 100",#0
"PT 100", #1
"J", #2
"K", #3
"R", #4
"B", #5
"S", #6
"T", #7
"E", #8
"N", #12
"PL-2", #13
"0V-5V/0mA-20mA", #15
"1V-5V/4mA-20mA", #16
"0mV-10V", #17
"2V to 10V", #18
"0mV-100mV" #19
]
self.PXGconversiontoindex = [0,1,2,3,4,5,6,7,8,12,13,15,16,17,18,19] #converts fuji PID PXG types to indexes
self.PXFthermotypes = [
"PT 100-2 (0-500C)", #8
"PT 100-3 (0-600C)", #9
"PT 100-7 (-199-600C)", #13
"PT 100-8 (-200-850C)", #14
"J-1 (0-400C)", #15
"J-2 (-20-400C)", #16
"J-3 (0-800C)", #17
"J-4 (-2000-1300C)", #18
"K-1 (0-400C)", #19
"K-2 (-20-500C)", #20
"K-3 (0-800C)", #21
"K-4 (-200-1300C)", #22
"R", #23
"B", #24
"S", #25
"T-2 (-199-400C)", #27
"E-1 (0-800C)", #28
"E-2 (-150-800C)", #29
"E-3 (-200-800C)", #30
"N", #34
"PL-2", #36
"0V to 5V", #37
"1V to 5V", #38
"0V to 10V", #39
"0mA to 20mA", #42
"4mA to 20mA", #43
]
self.PXFconversiontoindex = [8,9,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,29,30,34,36,37,38,39,42,43] #converts fuji PID PXF types to indexes
## FUJI PXR input types
##0 (JPT 100'3f)
##1 (PT 100'3f)
##2 (J)
##3 (K)
##4 (R)
##5 (B)
##6 (S)
##7 (T)
##8 (E)
##12 (N)
##13 (PL- 2)
##15 (0V to 5V/0mA to 20mA)
##16 (1V to 5V/4mA to 20mA)
##17 (0mV to 10V)
##18 (2V to 10V)
##19 (0mV to 100mV)
self.PXRthermotypes = [
"PT 100", #1
"J", #2
"K", #3
"R", #4
"B", #5
"S", #6
"T", #7
"E", #8
"N", #12
"PL-2", #13
"1V to 5V/4mA to 20mA" #16
]
self.PXRconversiontoindex = [1,2,3,4,5,6,7,8,12,13,16] #converts fuji PID PXR types to indexes
#refer to Fuji PID instruction manual for more information about the parameters and channels
#dictionary "KEY": [VALUE,MEMORY_ADDRESS]
self.PXG4={
############ CH1 Selects controller modes
# manual mode 0 = OFF(auto), 1 = ON(manual)
"manual": [0,41121],
#run or standby 0=OFF(during run), 1 = ON(during standby)
"runstandby": [0,41004],
#autotuning run command modes available 0=off, 1=on, 2=low
"autotuning": [0,41005],
#rampsoak command modes available 0=off, 1=run; 2=hold
"rampsoak": [0,41082],
#select SV sv1,...,sv7
"selectsv": [1,41221],
#selects PID number behaviour mode: pid1,...,pid7
"selectpid": [0,41222],
############ CH2 Main operating pid parameters.
#proportional band P0 (0% to 999.9%)
"p": [5,41006],
#integration time i0 (0 to 3200.0 sec)
"i": [240,41007],
#differential time d0 (0.0 to 999.9 sec)
"d": [60,41008],
############ CH3 These are 7 pid storage locations
"sv1": [300.0,41241], "p1": [5,41242], "i1": [240,41243], "d1": [60,41244],
"sv2": [350.0,41251], "p2": [5,41252], "i2": [240,41253], "d2": [60,41254],
"sv3": [400.0,41261], "p3": [5,41262], "i3": [240,41263], "d3": [60,41264],
"sv4": [450.0,41271], "p4": [5,41272], "i4": [240,41273], "d4": [60,41274],
"sv5": [500.0,41281], "p5": [5,41282], "i5": [240,41283], "d5": [60,41284],
"sv6": [550.0,41291], "p6": [5,41292], "i6": [240,41293], "d6": [60,41294],
"sv7": [575.0,41301], "p7": [5,41302], "i7": [240,41303], "d7": [60,41304],
"selectedpid":[7,41225],
############# CH4 Creates a pattern of temperatures (profiles) using ramp soak combination
#sv stands for Set Value (desired temperature value)
#the time to reach sv is called ramp
#the time to hold the temperature at sv is called soak
"timeunits": [1,41562], #0=hh.MM (hour:min) 1=MM.SS (min:sec) # PXG has two time formats HH:MM (factory default) and MM:SS
# Example. Dry roast phase. selects 3 or 4 minutes # PXG needs to have parameter TIMU set to 1 (MM:SS)
"segment1sv": [270.0,41581],"segment1ramp": [180,41582],"segment1soak": [0,41583], # See PXG Manual chapter 6: Ramp/Soak Time Units to set the parameter TIMU
"segment2sv": [300.0,41584],"segment2ramp": [180,41585],"segment2soak": [0,41586],
"segment3sv": [350.0,41587],"segment3ramp": [180,41588],"segment3soak": [0,41589],
"segment4sv": [400.0,41590],"segment4ramp": [180,41591],"segment4soak": [0,41592],
# Example. Phase to 1C. selects 6 or 8 mins
"segment5sv": [530.0,41593],"segment5ramp": [180,41594],"segment5soak": [0,41595],
"segment6sv": [530.0,41596],"segment6ramp": [180,41597],"segment6soak": [0,41598],
"segment7sv": [540.0,41599],"segment7ramp": [180,41600],"segment7soak": [0,41601],
"segment8sv": [540.0,41602],"segment8ramp": [180,41603],"segment8soak": [0,41604],
"segment9sv": [550.0,41605],"segment9ramp": [180,41606],"segment9soak": [0,41607],
"segment10sv": [550.0,41608],"segment10ramp": [180,41609],"segment10soak": [0,41610],
"segment11sv": [560.0,41611],"segment11ramp": [180,41612],"segment11soak": [0,41613],
"segment12sv": [560.0,41614],"segment12ramp": [180,41615],"segment12soak": [0,41616],
# Eaxample. Finish phase. selects 3 mins for regular coffee or 5 mins for espresso
"segment13sv": [570.0,41617],"segment13ramp": [180,41618],"segment13soak": [0,41619],
"segment14sv": [570.0,41620],"segment14ramp": [180,41621],"segment14soak": [0,41622],
"segment15sv": [580.0,41623],"segment15ramp": [180,41624],"segment15soak": [0,41625],
"segment16sv": [580.0,41626],"segment16ramp": [180,41627],"segment16soak": [0,41628],
# "rampsoakmode" 0-15 = 1-16 IMPORTANT: Factory setting is 3 (BAD). Set it up to number 0 or it will
# sit on stanby (SV blinks) at the end till rampsoakmode changes. It will appear as if the PID broke (unresponsive)
"rampsoakmode":[0,41081],
"rampsoakpattern": [6,41561], #ramp soak activation pattern 0=(1-4) 1=(5-8) 2=(1-8) 3=(9-12) 4=(13-16) 5=(9-16) 6=(1-16)
################ CH5 Checks the ramp soak progress, control output, remaining time and other status functions
"stat":[41561], #reads only. 0=off,1=1ramp,2=1soak,3=2ramp,4=2soak,...31=16ramp,32=16soak,33=end
################ CH6 Sets up the thermocouple type, input range, output range and other items for the controller
#input type: 0=NA,1=PT100ohms,2=J,3=K,4=R,5=B,6=S,7=T,8=E,12=N,13=PL2,15=(0-5volts),16=(1-5V),17=(0-10V),18=(2-10V),19=(0-100mV)
"pvinputtype": [3,41016],
"pvinputlowerlimit":[0,41018],
"pvinputupperlimit":[9999,41019],
"decimalposition": [1,41020],
"unitdisplay":[1,41345], #0=Celsius; 1=Fahrenheit
################# CH7 Assigns functions for DI (digital input), DO (digital output), LED lamp and other controls
"rampslopeunit":[1,41432], #0=hour,1=min
"controlmethod":[0,41002], #0=pid,2=fuzzy,2=self,3=pid2
################# CH8 Sets the defect conditions for each type of alarm
################# CH9 Sets the station number _id and communication parameters of the PID controller
################# CH10 Changes settings for valve control
################# CH11 Sets passwords
################# CH12 Sets the parameters mask functions to hide parameters from the user
################# READ ONLY MEMORY (address starts with digit 3)
"pv?":[0,31001],"sv?":[0,31002],"alarm?":[31007],"fault?":[31008],"stat?":[31041],"mv1":[0,31042]
}
# "KEY": [VALUE,MEMORY_ADDRESS]
self.PXR = {"autotuning":[0,41005],
"segment1sv":[100.0,41057],"segment1ramp":[3,41065],"segment1soak":[0,41066], #PXR uses only HH:MM time format but stored as minutes in artisan
"segment2sv":[100.0,41058],"segment2ramp":[3,41067],"segment2soak":[0,41068],
"segment3sv":[100.0,41059],"segment3ramp":[3,41069],"segment3soak":[0,41070],
"segment4sv":[100.0,41060],"segment4ramp":[3,41071],"segment4soak":[0,41072],
"segment5sv":[100.0,41061],"segment5ramp":[3,41073],"segment5soak":[0,41074],
"segment6sv":[100.0,41062],"segment6ramp":[3,41075],"segment6soak":[0,41076],
"segment7sv":[100.0,41063],"segment7ramp":[3,41077],"segment7soak":[0,41078],
"segment8sv":[100.0,41064],"segment8ramp":[3,41079],"segment8soak":[0,41080],
#Tells what to do after finishing or how to start. See documentation under ramp soak pattern: 0-15
"rampsoakmode":[0,41081],
#rampsoak command 0=OFF, 1= RUN, 2= HALTED, 3=END
"rampsoak":[0,41082],
#ramp soak pattern. 0=executes 1 to 4; 1=executes 5 to 8; 2=executes 1 to 8
"rampsoakpattern":[0,41083],
#PID=0,FUZZY=1,SELF=2
"controlmethod":[0,41002],
#sv set value
"sv0":[0,41003],
# run standby 0=RUN 1=STANDBY
"runstandby": [0,41004],
"p":[5,41006],
"i":[240,41007],
"d":[60,41008],
"decimalposition": [1,41020],
"svlowerlimit":[0,41031],
"svupperlimit":[0,41032],
"pvinputtype":[3,41016],
#READ ONLY
#current pv
"pv?":[0,31001],
#current sv on display (during ramp soak it changes)
"sv?":[0,31002],
#rampsoak current running position (1-8)
"segment?":[0,31009],
"mv1":[0,31004] #duty cycle rx -300 to 10300 = -3.00% to 103.00%
}
self.PXF=dict(self.PXG4)
# initialize the PXF register numbers from the PXG and an offset of 1000
for k in self.PXF.keys():
if len(self.PXF[k]) > 1:
self.PXF[k] = [self.PXF[k][0],self.PXF[k][1]+1000]
else:
self.PXF[k] = [self.PXF[k][0]+1000]
#writes new values for p - i - d
def setpidPXG(self,k,newPvalue,newIvalue,newDvalue):
if k is not None and k > 0:
#send command to the right sv
pkey = "p" + str(k)
ikey = "i" + str(k)
dkey = "d" + str(k)
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(self.aw.fujipid.PXG4[pkey][1],6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,int(float(newPvalue)*10.))
libtime.sleep(0.035)
reg = self.aw.modbus.address2register(self.aw.fujipid.PXG4[ikey][1],6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,int(float(newIvalue)*10.))
libtime.sleep(0.035)
reg = self.aw.modbus.address2register(self.aw.fujipid.PXG4[dkey][1],6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,int(float(newDvalue)*10.))
libtime.sleep(0.035)
p = i = d = " "
else:
commandp = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],6,self.aw.fujipid.PXG4[pkey][1],int(float(newPvalue)*10.))
commandi = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],6,self.aw.fujipid.PXG4[ikey][1],int(float(newIvalue)*10.))
commandd = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],6,self.aw.fujipid.PXG4[dkey][1],int(float(newDvalue)*10.))
p = self.aw.ser.sendFUJIcommand(commandp,8)
libtime.sleep(0.035)
i = self.aw.ser.sendFUJIcommand(commandi,8)
libtime.sleep(0.035)
d = self.aw.ser.sendFUJIcommand(commandd,8)
libtime.sleep(0.035)
#verify it went ok
if len(p) == 8 and len(i)==8 and len(d) == 8:
self.aw.fujipid.PXG4[pkey][0] = float(newPvalue)
self.aw.fujipid.PXG4[ikey][0] = float(newIvalue)
self.aw.fujipid.PXG4[dkey][0] = float(newDvalue)
message = QApplication.translate("StatusBar","pid #{0} successfully set to ({1},{2},{3})"
).format(str(k),str(newPvalue),str(newIvalue),str(newDvalue))
self.aw.sendmessage(message)
else:
lp = len(p)
li = len(i)
ld = len(d)
message = QApplication.translate("StatusBar","pid command failed. Bad data at pid{0} (8,8,8): ({1},{2},{3}) "
).format(str(k),str(lp),str(li),str(ld))
self.aw.sendmessage(message)
self.aw.qmc.adderror(message)
#writes new values for p - i - d
def setpidPXF(self,k,newPvalue,newIvalue,newDvalue):
if k is not None and k > 0:
#send command to the right sv
pkey = "p" + str(k)
ikey = "i" + str(k)
dkey = "d" + str(k)
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(self.aw.fujipid.PXF[pkey][1],6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,int(float(newPvalue)*10.))
libtime.sleep(0.035)
reg = self.aw.modbus.address2register(self.aw.fujipid.PXF[ikey][1],6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,int(float(newIvalue)*10.))
libtime.sleep(0.035)
reg = self.aw.modbus.address2register(self.aw.fujipid.PXF[dkey][1],6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,int(float(newDvalue)*10.))
libtime.sleep(0.035)
p = i = d = " "
else:
commandp = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],6,self.aw.fujipid.PXF[pkey][1],int(float(newPvalue)*10.))
commandi = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],6,self.aw.fujipid.PXF[ikey][1],int(float(newIvalue)*10.))
commandd = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],6,self.aw.fujipid.PXF[dkey][1],int(float(newDvalue)*10.))
p = self.aw.ser.sendFUJIcommand(commandp,8)
libtime.sleep(0.035)
i = self.aw.ser.sendFUJIcommand(commandi,8)
libtime.sleep(0.035)
d = self.aw.ser.sendFUJIcommand(commandd,8)
libtime.sleep(0.035)
#verify it went ok
if len(p) == 8 and len(i)==8 and len(d) == 8:
self.aw.fujipid.PXF[pkey][0] = float(newPvalue)
self.aw.fujipid.PXF[ikey][0] = float(newIvalue)
self.aw.fujipid.PXF[dkey][0] = float(newDvalue)
message = QApplication.translate("StatusBar","pid #{0} successfully set to ({1},{2},{3})"
).format(str(k),str(newPvalue),str(newIvalue),str(newDvalue))
self.aw.sendmessage(message)
else:
lp = len(p)
li = len(i)
ld = len(d)
message = QApplication.translate("StatusBar","pid command failed. Bad data at pid{0} (8,8,8): ({1},{2},{3}) "
).format(str(k),str(lp),str(li),str(ld))
self.aw.sendmessage(message)
self.aw.qmc.adderror(message)
# updates and returns the current ramp soak mode
def getCurrentRampSoakMode(self):
if self.aw.ser.controlETpid[0] == 0: # PXG
register = self.aw.fujipid.PXG4["rampsoakmode"][1]
elif self.aw.ser.controlETpid[0] == 1: # PXR
register = self.aw.fujipid.PXR["rampsoakmode"][1]
elif self.aw.ser.controlETpid[0] == 4: # PXF
register = self.aw.fujipid.PXF["rampsoakmode"][1]
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(register,3)
currentmode = self.aw.modbus.readSingleRegister(self.aw.ser.controlETpid[1],reg,3)
else:
msg = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],3,register,1)
currentmode = self.aw.fujipid.readoneword(msg)
if self.aw.ser.controlETpid[0] == 0: # PXG
self.aw.fujipid.PXG4["rampsoakmode"][0] = currentmode
elif self.aw.ser.controlETpid[0] == 1: # PXR
self.aw.fujipid.PXR["rampsoakmode"][0] = currentmode
elif self.aw.ser.controlETpid[0] == 4: # PXF
self.aw.fujipid.PXF["rampsoakmode"][0] = currentmode
return currentmode
def getCurrentPIDnumberPXG(self):
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(self.aw.fujipid.PXG4["selectedpid"][1],3)
N = self.aw.modbus.readSingleRegister(self.aw.ser.controlETpid[1],reg,3)
else:
command = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],3,self.aw.fujipid.PXG4["selectedpid"][1],1)
N = self.aw.fujipid.readoneword(command)
libtime.sleep(0.035)
return N
def getCurrentPIDnumberPXF(self):
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(self.aw.fujipid.PXF["selectedpid"][1],3)
N = self.aw.modbus.readSingleRegister(self.aw.ser.controlETpid[1],reg,3)
else:
command = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],3,self.aw.fujipid.PXF["selectedpid"][1],1)
N = self.aw.fujipid.readoneword(command)
libtime.sleep(0.035)
return N
def setpidPXR(self,var,v):
r = ""
if var == "p":
p = int(v*10)
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(self.aw.fujipid.PXR["p"][1],6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,p)
r = " "
else:
command = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],6,self.aw.fujipid.PXR["p"][1],p)
r = self.aw.ser.sendFUJIcommand(command,8)
elif var == "i":
i = int(v*10)
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(self.aw.fujipid.PXR["i"][1],6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,i)
r = " "
else:
command = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],6,self.aw.fujipid.PXR["i"][1],i)
r = self.aw.ser.sendFUJIcommand(command,8)
elif var == "d":
d = int(v*10)
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(self.aw.fujipid.PXR["d"][1],6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,d)
r = " "
else:
command = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],6,self.aw.fujipid.PXR["d"][1],d)
r = self.aw.ser.sendFUJIcommand(command,8)
if len(r) == 8:
message = QApplication.translate("StatusBar","{0} successfully sent to pid ").format(var)
self.aw.sendmessage(message)
if var == "p":
self.aw.fujipid.PXR["p"][0] = int(v)
elif var == "i":
self.aw.fujipid.PXR["i"][0] = int(v)
elif var == "d":
self.aw.fujipid.PXR["d"][0] = int(v)
else:
message = QApplication.translate("StatusBar","setpid(): There was a problem setting {0}").format(var)
self.aw.sendmessage(message)
self.aw.qmc.adderror(message)
def calcSV(self,tx):
if self.aw.qmc.background:
# Follow Background mode
if self.aw.qmc.swapETBT: # we observe the BT
res = self.aw.qmc.backgroundSmoothedBTat(tx + self.lookahead) # smoothed and approximated background
if res == -1:
return None # no background value for that time point
return res
res = self.aw.qmc.backgroundSmoothedETat(tx + self.lookahead) # smoothed and approximated background
if res == -1:
return None # no background value for that time point
return res
return None
##TX/RX FUNCTIONS
#This function reads read-only memory (with 3xxxx memory we need function=4)
#both PXR3 and PXG4 use the same memory location 31001 (3xxxx = read only)
# pidType: 0=PXG, 1=PXR, 2=None, 3=DTA, 4=PXF (here we support only 0, 1 and 4 for now)
def gettemperature(self, pidType, stationNo):
if pidType == 0:
reg = self.PXG4["pv?"][1]
elif pidType == 1:
reg = self.PXR["pv?"][1]
elif pidType == 4:
reg = self.PXF["pv?"][1]
else:
return -1
if self.aw.ser.useModbusPort:
# we use the pymodbus implementation
return self.aw.modbus.readSingleRegister(stationNo,self.aw.modbus.address2register(reg,4),4)
#we compose a message then we send it by using self.readoneword()
return self.readoneword(self.message2send(stationNo,4,reg,1))
# activates the SV slider
def activateONOFFsliderSV(self,flag):
self.aw.pidcontrol.activateSVSlider(flag)
def readcurrentsv(self):
if self.aw.ser.useModbusPort:
reg = None
#if control pid is fuji PXG4
if self.aw.ser.controlETpid[0] == 0:
reg = self.aw.modbus.address2register(self.PXG4["sv?"][1],4)
#or if control pid is fuji PXR
elif self.aw.ser.controlETpid[0] == 1:
reg = self.aw.modbus.address2register(self.PXR["sv?"][1],4)
#or if control pid is fuji PXF
elif self.aw.ser.controlETpid[0] == 4:
reg = self.aw.modbus.address2register(self.PXF["sv?"][1],4)
if reg is not None:
val = self.aw.modbus.readSingleRegister(self.aw.ser.controlETpid[1],reg,4)/10.
else:
val = -0.1
else:
command = ""
#if control pid is fuji PXG4
if self.aw.ser.controlETpid[0] == 0:
command = self.message2send(self.aw.ser.controlETpid[1],4,self.PXG4["sv?"][1],1)
#or if control pid is fuji PXR
elif self.aw.ser.controlETpid[0] == 1:
command = self.message2send(self.aw.ser.controlETpid[1],4,self.PXR["sv?"][1],1)
elif self.aw.ser.controlETpid[0] == 4:
command = self.message2send(self.aw.ser.controlETpid[1],4,self.PXF["sv?"][1],1)
val = self.readoneword(command)/10.
if val != -0.1:
return val
return -1
# returns Fuji duty signal in the range 0-100 or -1
def readdutycycle(self):
if self.aw.ser.useModbusPort:
reg = None
#if control pid is fuji PXG4
if self.aw.ser.controlETpid[0] == 0:
reg = self.aw.modbus.address2register(self.PXG4["mv1"][1],4)
#or if control pid is fuji PXR
elif self.aw.ser.controlETpid[0] == 1:
reg = self.aw.modbus.address2register(self.PXR["mv1"][1],4)
#or if control pid is fuji PXF
elif self.aw.ser.controlETpid[0] == 4:
reg = self.aw.modbus.address2register(self.PXF["mv1"][1],4)
if reg is not None:
v = self.aw.modbus.readSingleRegister(self.aw.ser.controlETpid[1],reg,4)
else:
val = -1
else:
command = ""
#if control pid is fuji PXG4
if self.aw.ser.controlETpid[0] == 0:
command = self.message2send(self.aw.ser.controlETpid[1],4,self.PXG4["mv1"][1],1)
v = self.readoneword(command)
#or if control pid is fuji PXR
elif self.aw.ser.controlETpid[0] == 1:
command = self.message2send(self.aw.ser.controlETpid[1],4,self.PXR["mv1"][1],1)
v = self.readoneword(command)
#or if control pid is fuji PXF
elif self.aw.ser.controlETpid[0] == 4:
command = self.message2send(self.aw.ser.controlETpid[1],4,self.PXF["mv1"][1],1)
v = self.readoneword(command)
if v is None:
val = -1
elif v >= 65236: # -3% to 0%
val = 0
elif v <= 10300: # <= 103%
val = v/100.
else: # value out of range (possible a communication error)
val = -1
#val range -3 to 103%. Check for possible decimal digit user settings
return val
def getrampsoakmode(self):
if self.aw.ser.controlETpid[0] == 0: #Fuji PXG
register = self.PXG4["rampsoakpattern"][1]
elif self.aw.ser.controlETpid[0] == 1: #Fuji PXR
register = self.PXR["rampsoakpattern"][1]
elif self.aw.ser.controlETpid[0] == 4: #Fuji PXF
register = self.PXF["rampsoakpattern"][1]
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(register,3)
currentmode = self.aw.modbus.readSingleRegister(self.aw.ser.controlETpid[1],reg,3)
else:
msg = self.message2send(self.aw.ser.controlETpid[1],3,register,1)
currentmode = self.readoneword(msg)
if self.aw.ser.controlETpid[0] == 0: #Fuji PXG
self.PXG4["rampsoakpattern"][0] = currentmode
elif self.aw.ser.controlETpid[0] == 1: #Fuji PXR
self.PXR["rampsoakpattern"][0] = currentmode
elif self.aw.ser.controlETpid[0] == 4: #Fuji PXF
self.PXF["rampsoakpattern"][0] = currentmode
return currentmode
# returns True on success and Fails otherwise
def setrampsoakmode(self,mode):
if self.aw.ser.controlETpid[0] == 0: #Fuji PXG
register = self.PXG4["rampsoakpattern"][1]
elif self.aw.ser.controlETpid[0] == 1: #Fuji PXR
register = self.PXR["rampsoakpattern"][1]
elif self.aw.ser.controlETpid[0] == 4: #Fuji PXF
register = self.PXF["rampsoakpattern"][1]
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(register,3)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,mode)
r = ""
else:
command = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],6,register,mode)
r = self.aw.ser.sendFUJIcommand(command,8)
if self.aw.ser.useModbusPort or len(r) == 8:
if self.aw.ser.controlETpid[0] == 0: #Fuji PXG
self.PXG4["rampsoakpattern"][0] = mode
elif self.aw.ser.controlETpid[0] == 1: #Fuji PXR
self.PXR["rampsoakpattern"][0] = mode
elif self.aw.ser.controlETpid[0] == 4: #Fuji PXF
self.PXF["rampsoakpattern"][0] = mode
return True
return False
#turns ON turns OFF current ramp soak mode
#flag =0 OFF, flag = 1 ON, flag = 2 hold
#A ramp soak pattern defines a whole profile. They have a minimum of 4 segments.
# returns True on success, False otherwise
def setrampsoak(self,flag):
register = None
if self.aw.ser.controlETpid[0] == 0: #Fuji PXG
register = self.PXG4["rampsoak"][1]
elif self.aw.ser.controlETpid[0] == 1: #Fuji PXR
register = self.PXR["rampsoak"][1]
elif self.aw.ser.controlETpid[0] == 4: #Fuji PXF
register = self.PXF["rampsoak"][1]
if self.aw.ser.useModbusPort:
if register is not None:
reg = self.aw.modbus.address2register(register,6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,flag)
if flag == 1:
self.aw.fujipid.rampsoak = True
self.aw.sendmessage(QApplication.translate("Message","RS ON"))
elif flag == 0:
self.aw.fujipid.rampsoak = False
self.aw.sendmessage(QApplication.translate("Message","RS OFF"))
else:
self.aw.sendmessage(QApplication.translate("Message","RS on HOLD"))
return True
elif register is not None:
command = self.message2send(self.aw.ser.controlETpid[1],6,register,flag)
r = self.aw.ser.sendFUJIcommand(command,8)
#if OK
if r == command:
if flag == 1:
self.aw.fujipid.rampsoak = True
self.aw.sendmessage(QApplication.translate("Message","RS ON"))
elif flag == 0:
self.aw.fujipid.rampsoak = False
self.aw.sendmessage(QApplication.translate("Message","RS OFF"))
else:
self.aw.sendmessage(QApplication.translate("Message","RS on HOLD"))
return True
self.aw.qmc.adderror(QApplication.translate("Error Message","RampSoak could not be changed"))
return False
return False
# returns True on success, False otherwise
def setONOFFstandby(self,flag):
_log.debug("setONOFFstandby(%s)",flag)
#flag = 0 standby OFF, flag = 1 standby ON (pid off)
#standby ON (pid off) will reset: rampsoak modes/autotuning/self tuning
#Fuji PXG
if self.aw.ser.controlETpid[0] == 0:
register = self.aw.fujipid.PXG4["runstandby"][1]
elif self.aw.ser.controlETpid[0] == 1:
register = self.aw.fujipid.PXR["runstandby"][1]
elif self.aw.ser.controlETpid[0] == 4:
register = self.aw.fujipid.PXF["runstandby"][1]
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(register,6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,flag)
else:
command = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],6,register,flag)
#TX and RX
r = self.aw.ser.sendFUJIcommand(command,8)
if self.aw.ser.useModbusPort or r == command:
if self.aw.ser.controlETpid[0] == 0:
self.aw.fujipid.PXG4["runstandby"][0] = flag
elif self.aw.ser.controlETpid[0] == 1:
self.aw.fujipid.PXR["runstandby"][0] = flag
elif self.aw.ser.controlETpid[0] == 4:
self.aw.fujipid.PXF["runstandby"][0] = flag
return True
mssg = QApplication.translate("Error Message","Exception:") + " setONOFFstandby()"
self.aw.qmc.adderror(mssg)
return False
def getONOFFstandby(self):
if self.aw.ser.controlETpid[0] == 0:
return self.aw.fujipid.PXG4["runstandby"][0]
if self.aw.ser.controlETpid[0] == 1:
return self.aw.fujipid.PXR["runstandby"][0]
if self.aw.ser.controlETpid[0] == 4:
return self.aw.fujipid.PXF["runstandby"][0]
return None
#sets a new sv value (if slient=False, no output nor event recording is done, if move is True the SV slider is moved)
def setsv(self,value,silent=False,move=True):
command = ""
#Fuji PXG / PXF
if self.aw.ser.controlETpid[0] in [0,4]: # Fuji PXG or PXF
if self.aw.ser.controlETpid[0] == 0:
reg_dict = self.PXG4
elif self.aw.ser.controlETpid[0] == 4:
reg_dict = self.PXF
#send command to the current sv (1-7)
# #-- experimental begin
# # read the current svN (1-7) being used
# if self.aw.ser.useModbusPort:
# reg = self.aw.modbus.address2register(reg_dict["selectsv"][1],3)
# N = self.aw.modbus.readSingleRegister(self.aw.ser.controlETpid[1],reg,3)
# else:
# command = self.message2send(self.aw.ser.controlETpid[1],3,reg_dict["selectsv"][1],1)
# N = self.readoneword(command)
# if N > 0:
# reg_dict["selectsv"][0] = N
# #-- experimental end
svkey = "sv"+ str(reg_dict["selectsv"][0]) #current sv
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(reg_dict[svkey][1],6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,int(value*10))
else:
# value = int(round(value)) # not sure why this is needed, but a FUJI PXF seems not to work without this and value as full floating point numbers!?
# this hack seems not to help
command = self.message2send(self.aw.ser.controlETpid[1],6,reg_dict[svkey][1],int(value*10))
r = self.aw.ser.sendFUJIcommand(command,8)
#check response
if self.aw.ser.useModbusPort or r == command:
if not silent:
# [Not sure the following will translate or even format properly... Need testing!]
message = QApplication.translate("Message","PXG/PXF sv#{0} set to {1}").format(reg_dict["selectsv"][0],"%.1f" % float(value))
self.aw.sendmessage(message)
reg_dict[svkey][0] = value
#record command as an Event
strcommand = "SETSV::" + str("%.1f"%float(value))
self.aw.qmc.DeviceEventRecord(strcommand)
self.sv = value
if move:
self.aw.moveSVslider(value,setValue=False)
else:
# error response
Rx = ""
if len(r):
import binascii
Rx = cmd2str(binascii.hexlify(r))
self.aw.qmc.adderror(QApplication.translate("Error Message","Exception:") + " setsv(): Rx = " + Rx)
#Fuji PXR
elif self.aw.ser.controlETpid[0] == 1:
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(self.aw.fujipid.PXR["sv0"][1],6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,int(value*10))
else:
command = self.message2send(self.aw.ser.controlETpid[1],6,self.aw.fujipid.PXR["sv0"][1],int(value*10))
r = self.aw.ser.sendFUJIcommand(command,8)
#check response
if self.aw.ser.useModbusPort or r == command:
if not silent:
# [Not sure the following will translate or even format properly... Need testing!]
message = QApplication.translate("Message","PXR sv set to {0}").format("%.1f" % float(value))
self.aw.fujipid.PXR["sv0"][0] = value
self.aw.sendmessage(message)
#record command as an Event
strcommand = "SETSV::" + str("%.1f"%float(value))
self.aw.qmc.DeviceEventRecord(strcommand)
self.sv = value
if move:
self.aw.moveSVslider(value,setValue=False)
else:
self.aw.qmc.adderror(QApplication.translate("Error Message","Exception:") + " setPXRsv()")
#used to set up or down SV by diff degrees from current sv setting; if move is True the SV slider is moved
def adjustsv(self,diff,move=True):
currentsv = self.readcurrentsv()
if currentsv != -1:
newsv = int((currentsv + diff)*10.) #multiply by 10 because we use a decimal point
# if control pid is fuji PXG or PXF
if self.aw.ser.controlETpid[0] in [0,4]:
if self.aw.ser.controlETpid[0] == 0:
reg_dict = self.PXG4
elif self.aw.ser.controlETpid[0] == 4:
reg_dict = self.PXF
# read the current svN (1-7) being used
#-- experimental begin
# read the current svN (1-7) being used
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(reg_dict["selectsv"][1],3)
N = self.aw.modbus.readSingleRegister(self.aw.ser.controlETpid[1],reg,3)
else:
command = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],3,reg_dict["selectsv"][1],1)
N = self.aw.fujipid.readoneword(command)
if N > 0:
reg_dict["selectsv"][0] = N
#-- experimental end
svkey = "sv"+ str(reg_dict["selectsv"][0]) #current sv
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(reg_dict[svkey][1],6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,newsv)
else:
command = self.message2send(self.aw.ser.controlETpid[1],6,reg_dict[svkey][1],newsv)
r = self.aw.ser.sendFUJIcommand(command,8)
if self.aw.ser.useModbusPort or len(r) == 8:
message = QApplication.translate("Message","SV{0} changed from {1} to {2})").format(str(N),str(currentsv),str(newsv/10.))
self.aw.sendmessage(message)
reg_dict[svkey][0] = newsv/10
#record command as an Event to replay (not binary as it needs to be stored in a text file)
strcommand = "SETSV::" + str("%.1f"%(newsv/10.))
self.aw.qmc.DeviceEventRecord(strcommand)
self.aw.lcd6.display("%.1f"%float(newsv/10.))
if move:
self.aw.moveSVslider(newsv/10.,setValue=False)
else:
msg = QApplication.translate("Message","Unable to set sv{0}").format(str(N))
self.aw.sendmessage(msg)
# or if control pid is fuji PXR
elif self.aw.ser.controlETpid[0] == 1:
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(self.PXR["sv0"][1],6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg,newsv)
else:
command = self.message2send(self.aw.ser.controlETpid[1],6,self.PXR["sv0"][1],newsv)
r = self.aw.ser.sendFUJIcommand(command,8)
if self.aw.ser.useModbusPort or len(r) == 8:
message = QApplication.translate("Message","SV changed from {0} to {1}").format(str(currentsv),str(newsv/10.))
self.aw.sendmessage(message)
self.PXR["sv0"][0] = newsv/10
#record command as an Event to replay (not binary as it needs to be stored in a text file)
strcommand = "SETSV::" + str("%.1f"%(newsv/10.))
self.aw.qmc.DeviceEventRecord(strcommand)
self.aw.lcd6.display("%.1f"%float(newsv/10.))
if move:
self.aw.moveSVslider(newsv/10.,setValue=False)
else:
self.aw.sendmessage(QApplication.translate("Message","Unable to set sv"))
else:
self.aw.sendmessage(QApplication.translate("Message","Unable to set new sv"))
#format of the input string Command: COMMAND::VALUE1::VALUE2::VALUE3::ETC
def replay(self,CommandString):
parts = CommandString.split("::")
command = parts[0]
values = parts[1:]
if command == "SETSV":
self.setsv(float(values[0]))
return
if command == "SETRS":
self.replaysetrs(CommandString)
#example of command string with four segments (minimum for Fuji PIDs)
# SETRS::270.0::3::0::SETRS::300.0::3::0::SETRS::350.0::3::0::SETRS::400.0::3::0
def replaysetrs(self,CommandString):
segments =CommandString.split("SETRS")
if len(segments[0]) == 0:
segments = segments[1:] #remove first empty [""] list [[""],[etc]]
if len(segments[-1]) == 0:
segments = segments[:-1] #remove last empty [""] list [[etc][""]]
n = len(segments)
#if parts is < 4, make it compatible with Fuji PID (4 segments needed)
if n < 4:
for i in range(4-n):
#last temperature
lasttemp = segments[-1].split("::")[1]
#create a string with 4 segments ("SETRS" alredy removed)
string = "::" + lasttemp + "::0::0" #add zero ramp time and zero soak time
segments.append(string)
rs = []
changeflag = 0
for i in range(n):
rs.append(segments[i].split("::"))
if len(rs[i][0]) == 0: #remove first empty "" [u"",u"300.5",u"3",u"0",u""] if one found
rs[i] = rs[i][1:]
if len(rs[i][-1]) == 0: #remove last empty "" [u"300.5",u"3",u"0",u""] if one found
rs[i] = rs[i][:-1]
if len(rs[i]) == 3:
svkey = "segment" + str(i+1) + "sv"
rampkey = "segment" + str(i+1) + "ramp"
soakkey = "segment" + str(i+1) + "soak"
if self.aw.ser.controlETpid[0] == 0: #PXG4
if not n%4 or n > 16:
self.aw.qmc.adderror((QApplication.translate("Error Message","Exception:") + " PXG4 replaysetrs(): {0}").format(n))
return
if self.PXG4[svkey][0] != float(rs[i][0]):
self.PXG4[svkey][0] = float(rs[i][0])
changeflag = 1
if self.PXG4[rampkey][0] != int(rs[i][1]):
self.PXG4[rampkey][0] = int(rs[i][1])
changeflag = 1
if self.PXG4[soakkey][0] != int(rs[i][2]):
self.PXG4[soakkey][0] = int(rs[i][2])
changeflag = 1
if changeflag:
self.setsegment((i+1), self.PXG4[svkey][0], self.PXG4[rampkey][0] ,self.PXG4[soakkey][0])
changeflag = 0
elif self.aw.ser.controlETpid[0] == 1: #PXR
if not n%4 or n > 8:
self.aw.qmc.adderror((QApplication.translate("Error Message","Exception:") + " PXR replaysetrs(): {0}").format(n))
return
if self.PXR[svkey][0] != float(rs[i][0]):
self.PXR[svkey][0] = float(rs[i][0])
changeflag = 1
if self.PXR[rampkey][0] != int(rs[i][1]):
self.PXR[rampkey][0] = int(rs[i][1])
changeflag = 1
if self.PXR[soakkey][0] != int(rs[i][2]):
self.PXR[soakkey][0] = int(rs[i][2])
changeflag = 1
if changeflag:
self.setsegment((i+1), self.PXR[svkey][0], self.PXR[rampkey][0] ,self.PXR[soakkey][0])
changeflag = 0
else:
self.aw.qmc.adderror(QApplication.translate("Error Message","Exception:") + " replaysetrs()")
return
#start ramp soak ON
self.setrampsoak(1)
def getsegment(self, idn):
if self.aw.ser.controlETpid[0] == 0:
reg_dict = self.PXG4
elif self.aw.ser.controlETpid[0] == 1:
reg_dict = self.PXR
elif self.aw.ser.controlETpid[0] == 4:
reg_dict = self.PXF
svkey = "segment" + str(idn) + "sv"
register = reg_dict[svkey][1]
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(register,3)
sv = self.aw.modbus.readSingleRegister(self.aw.ser.controlETpid[1],reg,3)
else:
svcommand = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],3,register,1)
sv = self.aw.fujipid.readoneword(svcommand)
if sv == -1:
return
reg_dict[svkey][0] = sv/10. #divide by 10 because the decimal point is not sent by the PID
rampkey = "segment" + str(idn) + "ramp"
register = reg_dict[rampkey][1]
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(register,3)
ramp = self.aw.modbus.readSingleRegister(self.aw.ser.controlETpid[1],reg,3)
else:
rampcommand = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],3,register,1)
ramp = self.aw.fujipid.readoneword(rampcommand)
if ramp == -1:
return
reg_dict[rampkey][0] = ramp
soakkey = "segment" + str(idn) + "soak"
register = reg_dict[soakkey][1]
if self.aw.ser.useModbusPort:
reg = self.aw.modbus.address2register(register,3)
soak = self.aw.modbus.readSingleRegister(self.aw.ser.controlETpid[1],reg,3)
else:
soakcommand = self.aw.fujipid.message2send(self.aw.ser.controlETpid[1],3,register,1)
soak = self.aw.fujipid.readoneword(soakcommand)
if soak == -1:
return
reg_dict[soakkey][0] = soak
#idn = id number, sv = float set value, ramp = ramp value, soak = soak value
#used in replaysetrs()
def setsegment(self,idn,sv,ramp,soak):
svkey = "segment" + str(idn) + "sv"
rampkey = "segment" + str(idn) + "ramp"
soakkey = "segment" + str(idn) + "soak"
if self.aw.ser.useModbusPort:
if self.aw.ser.controlETpid[0] == 0:
reg1 = self.aw.modbus.address2register(self.PXG4[svkey][1],6)
reg2 = self.aw.modbus.address2register(self.PXG4[rampkey][1],6)
reg3 = self.aw.modbus.address2register(self.PXG4[soakkey][1],6)
elif self.aw.ser.controlETpid[0] == 1:
reg1 = self.aw.modbus.address2register(self.PXR[svkey][1],6)
reg2 = self.aw.modbus.address2register(self.PXR[rampkey][1],6)
reg3 = self.aw.modbus.address2register(self.PXR[soakkey][1],6)
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg1,int(sv*10))
libtime.sleep(0.11) #important time between writings
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg2,ramp)
libtime.sleep(0.11) #important time between writings
self.aw.modbus.writeSingleRegister(self.aw.ser.controlETpid[1],reg3,soak)
r1 = r2 = r3 = " "
else:
if self.aw.ser.controlETpid[0] == 0:
svcommand = self.message2send(self.aw.ser.controlETpid[1],6,self.PXG4[svkey][1],int(sv*10))
rampcommand = self.message2send(self.aw.ser.controlETpid[1],6,self.PXG4[rampkey][1],ramp)
soakcommand = self.message2send(self.aw.ser.controlETpid[1],6,self.PXG4[soakkey][1],soak)
elif self.aw.ser.controlETpid[0] == 1:
svcommand = self.message2send(self.aw.ser.controlETpid[1],6,self.PXR[svkey][1],int(sv*10))
rampcommand = self.message2send(self.aw.ser.controlETpid[1],6,self.PXR[rampkey][1],ramp)
soakcommand = self.message2send(self.aw.ser.controlETpid[1],6,self.PXR[soakkey][1],soak)
r1 = self.aw.ser.sendFUJIcommand(svcommand,8)
libtime.sleep(0.11) #important time between writings
r2 = self.aw.ser.sendFUJIcommand(rampcommand,8)
libtime.sleep(0.11) #important time between writings
r3 = self.aw.ser.sendFUJIcommand(soakcommand,8)
#check if OK
if len(r1)!=8 or len(r2)!=8 or len(r3)!=8:
self.aw.qmc.adderror(QApplication.translate("Error Message","Segment values could not be written into PID"))
@staticmethod
def dec2HexRaw(decimal):
# This method converts a decimal to a raw string appropiate for Fuji serial TX
# Used to compose serial messages
Nbytes = []
while decimal:
decimal, rem = divmod(decimal, 256)
Nbytes.append(rem)
Nbytes.reverse()
if not Nbytes:
Nbytes.append(0)
return decs2string(Nbytes)
def message2send(self, stationNo, FunctionCode, memory, Nword):
# This method takes the arguments to compose a Fuji serial command and returns the complete raw string with crc16 included
# memory must be given as the Resistor Number Engineering unit (example of memory = 41057 )
#check to see if Nword is < 257. If it is, then add extra zero pad. 2^8 = 256 = 1 byte but 2 bytes always needed to send Nword
if Nword < 257:
pad1 = self.dec2HexRaw(0)
else:
pad1 = decs2string("")
part1 = self.dec2HexRaw(stationNo)
part2 = self.dec2HexRaw(FunctionCode)
_,r = divmod(memory,10000)
part3 = self.dec2HexRaw(r - 1)
part4 = self.dec2HexRaw(Nword)
datastring = part1 + part2 + part3 + pad1 + part4
# calculate the crc16 of all this data string
crc16int = self.fujiCrc16(datastring)
#convert crc16 to hex string to change the order of the 2 bytes from AB.CD to CD.AB to match Fuji requirements
crc16hex= hex(crc16int)[2:]
#we need 4 chars but sometimes we get only three or two because of abreviations by hex(). Therefore, add "0" if needed.
ll = 4 - len(crc16hex)
pad =["","0","00","000"]
crc16hex = pad[ll] + crc16hex
#change now from AB.CD to CD.AB and convert from hex string to int
crc16end = int(crc16hex[2:]+crc16hex[:2],16)
#now convert the crc16 from int to binary
part5 = self.dec2HexRaw(crc16end)
#return total sum of binary parts (assembled message)
return (datastring + part5)
#input string command. Output integer (not binary string); used for example to read temperature or to obtain the value of a variable
def readoneword(self,command):
#takes an already formated command to read 1 word data and returns the response from the pid
#SEND command and RECEIVE 7 bytes back
r = self.aw.ser.sendFUJIcommand(command,7)
if len(r) == 7:
# EVERYTHINK OK: convert data part binary string to hex representation
s1 = hex2int(r[3],r[4])
#conversion from hex to dec
return s1
#bad number of RX bytes
errorcode = QApplication.translate("Error Message","pid.readoneword(): {0} RX bytes received (7 needed) for unit ID={1}").format(len(r),command[0])
self.aw.qmc.adderror(errorcode)
return -1
#FUJICRC16 function calculates the CRC16 of the data. It expects a binary string as input and returns an int
@staticmethod
def fujiCrc16(string):
crc16tab = (0x0000,
0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880,
0xC841, 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40, 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0,
0x1C80, 0xDC41, 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641, 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100,
0xD1C1, 0xD081, 0x1040, 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240, 0x3600, 0xF6C1, 0xF781, 0x3740,
0xF501, 0x35C0, 0x3480, 0xF441, 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41, 0xFA01, 0x3AC0, 0x3B80,
0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41, 0xEE01, 0x2EC0,
0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40, 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, 0x2200,
0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041, 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441, 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80,
0xAE41, 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0,
0x7A80, 0xBA41, 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700,
0xB7C1, 0xB681, 0x7640, 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, 0x5000, 0x90C1, 0x9181, 0x5140,
0x9301, 0x53C0, 0x5280, 0x9241, 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440, 0x9C01, 0x5CC0, 0x5D80,
0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841, 0x8801, 0x48C0,
0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40, 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41, 0x4400,
0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040)
cr=0xFFFF
for j in string:
tmp = cr ^(j)
cr =(cr >> 8)^crc16tab[(tmp & 0xff)]
return cr
###################################################################################
########################## ARDUINO CLASS DEFINITION ############################
###################################################################################
class PIDcontrol():
def __init__(self,aw):
self.aw = aw
self.pidActive = False
self.sv = None # the last sv send to the Arduino
#
self.pidOnCHARGE = False
self.loadRampSoakFromProfile = False
self.loadRampSoakFromBackground = False
self.svLen = 8 # should stay at 8 for compatibility reasons!
self.svLabel = ""
self.svValues = [0]*self.svLen # sv temp as int per 8 channels
self.svRamps = [0]*self.svLen # seconds as int per 8 channels
self.svSoaks = [0]*self.svLen # seconds as int per 8 channels
self.svActions = [-1]*self.svLen # alarm action as int per 8 channels
self.svBeeps = [False]*self.svLen # alarm beep as bool per 8 channels
self.svDescriptions = [""]*self.svLen # alarm descriptions as string per 8 channels
#
self.svTriggeredAlarms = [False]*self.svLen # set to true once the corresponding alarm was triggered
# extra RS sets:
self.RSLen = 3 # can be changed to have less or more RSn sets
self.RS_svLabels = [""]*self.RSLen # label of the RS set
self.RS_svValues = [[0]*self.svLen]*self.RSLen # sv temp as int per 8 channels
self.RS_svRamps = [[0]*self.svLen]*self.RSLen # seconds as int per 8 channels
self.RS_svSoaks = [[0]*self.svLen]*self.RSLen # seconds as int per 8 channels
self.RS_svActions = [[-1]*self.svLen]*self.RSLen # alarm action as int per 8 channels
self.RS_svBeeps = [[False]*self.svLen]*self.RSLen # alarm beep as bool per 8 channels
self.RS_svDescriptions = [[""]*self.svLen]*self.RSLen # alarm descriptions as string per 8 channels
#
self.svSlider = False
self.svButtons = False
self.svMode = 0 # 0: manual, 1: Ramp/Soak, 2: Follow (background profile)
self.svLookahead = 0
self.dutySteps = 1
self.svSliderMin = 0
self.svSliderMax = 230
self.svValue = 180 # the value in the setSV textinput box of the PID dialog
self.dutyMin = -100
self.dutyMax = 100
self.pidKp = 15.0
self.pidKi = 0.01
self.pidKd = 20.0
# Proposional on Measurement mode see: http://brettbeauregard.com/blog/2017/06/introducing-proportional-on-measurement/
self.pOnE = True # True for Proposional on Error mode, False for Proposional on Measurement Mode
# pidSource
# either the TC4 input channel from [1,..,4] if self.qmc.device == 19 (Arduino/TC4)
# in all other cases (HOTTOP, MODBUS,..), 1 is interpreted as BT and 2 as ET
self.pidSource = 1
self.pidCycle = 1000
# the positive target should increase with positive PID duty
self.pidPositiveTarget = 0 # one of [0,1,..,4] with 0: None, 1,..,4: for slider event 1-4
# the negative target should decrease with negative PID duty
self.pidNegativeTarget = 0 # one of [0,1,..,4] with 0: None, 1,..,4: for slider event 1-4
# if invertControl is True, a PID duty of 100% delivers 0% positive duty and a 0% PID duty delivers 100% positive duty
self.invertControl = False
# PID sv smoothing
self.sv_smoothing_factor = 0 # off if 0
self.sv_decay_weights = None
self.previous_svs = []
# time @ PID ON
self.time_pidON = 0 # in monitoring mode, ramp-soak times are interperted w.r.t. the time after the PID was turned on and not the time after CHARGE as during recording
self.current_ramp_segment = 0 # the RS segment currently active. Note that this is 1 based, 0 indicates that no segment has started yet
self.current_soak_segment = 0 # the RS segment currently active. Note that this is 1 based, 0 indicates that no segment has started yet
self.ramp_soak_engaged = 1 # set to 0, disengaged, after the RS pattern was processed fully
self.RS_total_time = 0 # holds the total time of the current Ramp/Soak pattern
@staticmethod
def RStotalTime(ramps,soaks):
return sum(ramps) + sum(soaks)
# returns True if an external PID controller is in use (MODBUS or TC4 PID firmware)
# and False if the internal software PID is in charge
# the returned value indicates the type of external PID control:
# 0: internal PID
# 1: MODBUS
# 2: S7
# 3: TC4
def externalPIDControl(self):
# TC4 with PID firmware or MODBUS and SV register set or S7 and SV area set
if self.aw.modbus.PID_slave_ID != 0:
return 1
if self.aw.s7.PID_area != 0:
return 2
if (self.aw.qmc.device == 19 and self.aw.qmc.PIDbuttonflag):
return 3
return 0
# v is from [-min,max]
def setEnergy(self,v):
try:
if self.aw.pidcontrol.pidPositiveTarget:
slidernr = self.aw.pidcontrol.pidPositiveTarget - 1
if self.aw.pidcontrol.invertControl:
vp = abs(100 - v)
else:
vp = v
vp = min(100,max(0,int(round(vp))))
# we need to map the duty [0%,100%] to the [slidermin,slidermax] range
heat = int(round(numpy.interp(vp,[0,100],[self.aw.eventslidermin[slidernr],self.aw.eventslidermax[slidernr]])))
self.aw.block_quantification_sampling_ticks[slidernr] = self.aw.sampling_ticks_to_block_quantifiction
self.aw.qmc.temporarymovepositiveslider = (slidernr,heat)
if self.aw.pidcontrol.pidNegativeTarget:
slidernr = self.aw.pidcontrol.pidNegativeTarget - 1
if self.aw.pidcontrol.invertControl:
vn = 0 - v
else:
vn = v
vn = min(0,max(-100,int(vn)))
# we need to map the duty [0%,-100%] to the [slidermin,slidermax] range
self.aw.block_quantification_sampling_ticks[slidernr] = self.aw.sampling_ticks_to_block_quantifiction
cool = int(round(numpy.interp(vn,[-100,0],[self.aw.eventslidermax[slidernr],self.aw.eventslidermin[slidernr]])))
self.aw.qmc.temporarymovenegativeslider = (slidernr,cool)
except Exception as e: # pylint: disable=broad-except
_log.exception(e)
def conv2celsius(self):
try:
self.aw.qmc.rampSoakSemaphore.acquire(1)
self.svValue = int(round(fromFtoC(self.svValue)))
self.svSliderMin = int(round(fromFtoC(self.svSliderMin)))
self.svSliderMax = int(round(fromFtoC(self.svSliderMax)))
# establish ne limits on sliders
self.aw.sliderSV.setMinimum(self.svSliderMin)
self.aw.sliderSV.setMaximum(self.svSliderMax)
self.pidKp = self.pidKp * (9/5.)
self.pidKi = self.pidKi * (9/5.)
self.pidKd = self.pidKd * (9/5.)
for i in range(len(self.svValues)):
if self.svValues[i] != 0:
self.svValues[i] = fromFtoC(self.svValues[i])
for n in range(len(self.RS_svValues)):
for j in range(len(self.RS_svValues[n])):
if self.RS_svValues[n][j] != 0:
self.RS_svValues[n][j] = fromFtoC(self.RS_svValues[n][j])
except Exception as e: # pylint: disable=broad-except
_log.exception(e)
finally:
if self.aw.qmc.rampSoakSemaphore.available() < 1:
self.aw.qmc.rampSoakSemaphore.release(1)
def conv2fahrenheit(self):
try:
self.aw.qmc.rampSoakSemaphore.acquire(1)
self.svValue = fromCtoF(self.svValue)
self.svSliderMin = fromCtoF(self.svSliderMin)
self.svSliderMax = fromCtoF(self.svSliderMax)
# establish ne limits on sliders
self.aw.sliderSV.setMinimum(int(round(self.svSliderMin)))
self.aw.sliderSV.setMaximum(int(round(self.svSliderMax)))
self.pidKp = self.pidKp / (9/5.)
self.pidKi = self.pidKi / (9/5.)
self.pidKd = self.pidKd / (9/5.)
for i in range(len(self.svValues)):
if self.svValues[i] != 0:
self.svValues[i] = fromCtoF(self.svValues[i])
for n in range(len(self.RS_svValues)):
for j in range(len(self.RS_svValues[n])):
if self.RS_svValues[n][j] != 0:
self.RS_svValues[n][j] = fromCtoF(self.RS_svValues[n][j])
except Exception as e: # pylint: disable=broad-except
_log.exception(e)
finally:
if self.aw.qmc.rampSoakSemaphore.available() < 1:
self.aw.qmc.rampSoakSemaphore.release(1)
def togglePID(self):
if self.pidActive:
self.pidOff()
else:
self.pidOn()
# initializes the PID mode on PID ON and switch of mode
def pidModeInit(self):
if self.aw.qmc.flagon:
self.current_ramp_segment = 0
self.current_soak_segment = 0
self.ramp_soak_engaged = 1
self.RS_total_time = self.RStotalTime(self.svRamps,self.svSoaks)
self.svTriggeredAlarms = [False]*self.svLen
if self.aw.qmc.flagstart or len(self.aw.qmc.on_timex)<1:
self.time_pidON = 0
else:
self.time_pidON = self.aw.qmc.on_timex[-1]
if self.svMode == 1:
# turn the timer LCD color blue if in RS mode and not recording
self.aw.setTimerColor("rstimer")
# the internal software PID should be configured on ON, but not be activated yet to warm it up
def confSoftwarePID(self):
if self.aw.pidcontrol.externalPIDControl() not in [1, 2] and not(self.aw.qmc.device == 19 and self.aw.qmc.PIDbuttonflag) and self.aw.qmc.Controlbuttonflag:
# software PID
self.aw.qmc.pid.setPID(self.pidKp,self.pidKi,self.pidKd,self.pOnE)
self.aw.qmc.pid.setLimits((-100 if self.aw.pidcontrol.pidNegativeTarget else 0),(100 if self.aw.pidcontrol.pidPositiveTarget else 0))
self.aw.qmc.pid.setDutySteps(self.aw.pidcontrol.dutySteps)
self.aw.qmc.pid.setDutyMin(self.aw.pidcontrol.dutyMin)
self.aw.qmc.pid.setDutyMax(self.aw.pidcontrol.dutyMax)
self.aw.qmc.pid.setControl(self.aw.pidcontrol.setEnergy)
if self.aw.pidcontrol.svMode == 0:
self.aw.pidcontrol.setSV(self.aw.sliderSV.value())
def pidOn(self):
if self.aw.qmc.flagon:
if not self.pidActive:
self.aw.sendmessage(QApplication.translate("StatusBar","PID ON"))
self.pidModeInit()
self.aw.qmc.temporayslider_force_move = True
# TC4 hardware PID
# MODBUS hardware PID
if (self.aw.pidcontrol.externalPIDControl() == 1 and self.aw.modbus.PID_ON_action and self.aw.modbus.PID_ON_action != ""):
self.aw.eventaction(4,self.aw.modbus.PID_ON_action)
self.pidActive = True
self.aw.buttonCONTROL.setStyleSheet(self.aw.pushbuttonstyles["PIDactive"])
# S7 hardware PID
elif (self.aw.pidcontrol.externalPIDControl() == 2 and self.aw.s7.PID_ON_action and self.aw.s7.PID_ON_action != ""):
self.aw.eventaction(15,self.aw.s7.PID_ON_action)
self.pidActive = True
self.aw.buttonCONTROL.setStyleSheet(self.aw.pushbuttonstyles["PIDactive"])
elif self.aw.qmc.device == 19 and self.aw.qmc.PIDbuttonflag: # ArduinoTC4 firmware PID
if self.aw.ser.ArduinoIsInitialized:
self.confPID(self.pidKp,self.pidKi,self.pidKd,self.pidSource,self.pidCycle,self.aw.pidcontrol.pOnE) # first configure PID according to the actual settings
try:
#### lock shared resources #####
self.aw.ser.COMsemaphore.acquire(1)
if self.aw.ser.SP.isOpen():
duty_min = min(100,max(0,self.aw.pidcontrol.dutyMin))
duty_max = min(100,max(0,self.aw.pidcontrol.dutyMax))
self.aw.ser.SP.write(str2cmd("PID;LIMIT;" + str(duty_min) + ";" + str(duty_max) + "\n"))
self.aw.ser.SP.write(str2cmd("PID;ON\n"))
self.pidActive = True
self.aw.buttonCONTROL.setStyleSheet(self.aw.pushbuttonstyles["PIDactive"])
self.aw.sendmessage(QApplication.translate("Message","PID turned on"))
finally:
if self.aw.ser.COMsemaphore.available() < 1:
self.aw.ser.COMsemaphore.release(1)
# software PID
elif self.aw.qmc.Controlbuttonflag:
self.aw.qmc.pid.setPID(self.pidKp,self.pidKi,self.pidKd,self.pOnE)
self.aw.qmc.pid.setLimits((-100 if self.aw.pidcontrol.pidNegativeTarget else 0),(100 if self.aw.pidcontrol.pidPositiveTarget else 0))
self.aw.qmc.pid.setDutySteps(self.aw.pidcontrol.dutySteps)
self.aw.qmc.pid.setDutyMin(self.aw.pidcontrol.dutyMin)
self.aw.qmc.pid.setDutyMax(self.aw.pidcontrol.dutyMax)
self.aw.qmc.pid.setControl(self.aw.pidcontrol.setEnergy)
if self.aw.pidcontrol.svMode == 0:
self.aw.pidcontrol.setSV(self.aw.sliderSV.value())
self.pidActive = True
self.aw.qmc.pid.on()
self.aw.buttonCONTROL.setStyleSheet(self.aw.pushbuttonstyles["PIDactive"])
if self.sv is None:
self.setSV(self.svValue)
def pidOff(self):
if self.pidActive:
self.aw.sendmessage(QApplication.translate("Message","PID OFF"))
self.aw.setTimerColor("timer")
if self.aw.qmc.flagon and not self.aw.qmc.flagstart:
self.aw.qmc.setLCDtime(0)
# MODBUS hardware PID
if (self.aw.pidcontrol.externalPIDControl() == 1 and self.aw.modbus.PID_OFF_action and self.aw.modbus.PID_OFF_action != ""):
self.aw.eventaction(4,self.aw.modbus.PID_OFF_action)
if not self.aw.HottopControlActive:
self.aw.buttonCONTROL.setStyleSheet(self.aw.pushbuttonstyles["PID"])
self.pidActive = False
# S7 hardware PID
elif (self.aw.pidcontrol.externalPIDControl() == 2 and self.aw.s7.PID_OFF_action and self.aw.s7.PID_OFF_action != ""):
self.aw.eventaction(15,self.aw.s7.PID_OFF_action)
if not self.aw.HottopControlActive:
self.aw.buttonCONTROL.setStyleSheet(self.aw.pushbuttonstyles["PID"])
self.pidActive = False
# TC4 hardware PID
elif self.aw.qmc.device == 19 and self.aw.qmc.PIDbuttonflag and self.aw.qmc.Controlbuttonflag: # ArduinoTC4 firmware PID
if self.aw.ser.ArduinoIsInitialized:
try:
#### lock shared resources #####
self.aw.ser.COMsemaphore.acquire(1)
if self.aw.ser.SP.isOpen():
self.aw.ser.SP.reset_input_buffer() # self.aw.ser.SP.flushInput() # deprecated in v3
self.aw.ser.SP.reset_output_buffer() # self.aw.ser.SP.flushOutput() # deprecated in v3
self.aw.ser.SP.write(str2cmd("PID;OFF\n"))
self.aw.sendmessage(QApplication.translate("Message","PID turned off"))
finally:
if self.aw.ser.COMsemaphore.available() < 1:
self.aw.ser.COMsemaphore.release(1)
if not self.aw.HottopControlActive:
self.aw.buttonCONTROL.setStyleSheet(self.aw.pushbuttonstyles["PID"])
self.pidActive = False
# software PID
elif self.aw.qmc.Controlbuttonflag:
self.aw.qmc.pid.setControl(lambda _: _)
self.pidActive = False
self.aw.qmc.pid.off()
if not self.aw.HottopControlActive:
self.aw.buttonCONTROL.setStyleSheet(self.aw.pushbuttonstyles["PID"])
@pyqtSlot(int)
def sliderMinValueChanged(self,i):
self.svSliderMin = i
self.aw.sliderSV.setMinimum(self.svSliderMin)
@pyqtSlot(int)
def sliderMaxValueChanged(self,i):
self.svSliderMax = i
self.aw.sliderSV.setMaximum(self.svSliderMax)
# returns SV (or None) wrt. to the ramp-soak table and the given time t
# (used only internally)
def svRampSoak(self,t):
try:
self.aw.qmc.rampSoakSemaphore.acquire(1)
if self.ramp_soak_engaged == 0:
return None
if self.aw.qmc.flagon and not self.aw.qmc.flagstart:
self.aw.qmc.setLCDtime(self.RS_total_time-t)
segment_end_time = 0 # the (end) time of the segments
prev_segment_end_time = 0 # the (end) time of the previous segment
segment_start_sv = 0 # the (target) sv of the segment
prev_segment_start_sv = 0 # the (target) sv of the previous segment
for i in range(len(self.svValues)):
# Ramp
if self.svRamps[i] != 0:
segment_end_time = segment_end_time + self.svRamps[i]
segment_start_sv = self.svValues[i]
if segment_end_time > t:
# t is within the current segment
k = float(segment_start_sv - prev_segment_start_sv) / float(segment_end_time - prev_segment_end_time)
if self.current_ramp_segment != i+1:
self.aw.sendmessage(QApplication.translate("Message","Ramp {0}: in {1} to SV {2}".format(i+1,stringfromseconds(self.svRamps[i]),self.svValues[i])))
self.current_ramp_segment = i+1
return prev_segment_start_sv + k*(t - prev_segment_end_time)
prev_segment_end_time = segment_end_time
prev_segment_start_sv = segment_start_sv
# Soak
if self.svSoaks[i] != 0:
segment_end_time = segment_end_time + self.svSoaks[i]
segment_start_sv = self.svValues[i]
if segment_end_time > t:
prev_segment_start_sv = segment_start_sv # ensure that the segment sv is set even then the segments ramp is 00:00
# t is within the current segment
if self.current_soak_segment != i+1:
self.current_soak_segment = i+1
self.aw.sendmessage(QApplication.translate("Message","Soak {0}: for {1} at SV {2}".format(i+1,stringfromseconds(self.svSoaks[i]),self.svValues[i])))
return prev_segment_start_sv
prev_segment_end_time = segment_end_time
prev_segment_start_sv = segment_start_sv
if (self.current_ramp_segment > i or self.current_soak_segment > 1) and not self.svTriggeredAlarms[i]:
self.svTriggeredAlarms[i] = True
if self.svActions[i] > -1:
self.aw.qmc.processAlarmSignal.emit(0,self.svBeeps[i],self.svActions[i],self.svDescriptions[i])
self.aw.sendmessage(QApplication.translate("Message","Ramp/Soak pattern finished"))
self.aw.qmc.setLCDtime(0)
self.ramp_soak_engaged = 0 # stop the ramp/soak process
return None
finally:
if self.aw.qmc.rampSoakSemaphore.available() < 1:
self.aw.qmc.rampSoakSemaphore.release(1)
def smooth_sv(self,sv):
if self.sv_smoothing_factor:
# create or update smoothing decay weights
if self.sv_decay_weights is None or len(self.sv_decay_weights) != self.sv_smoothing_factor: # recompute only on changes
self.sv_decay_weights = numpy.arange(1,self.sv_smoothing_factor+1)
# add new value
self.previous_svs.append(sv)
# throw away superflous values
self.previous_svs = self.previous_svs[-self.sv_smoothing_factor:]
# compute smoothed output
if len(self.previous_svs) < self.sv_smoothing_factor:
res = sv # no smoothing yet
else:
res = numpy.average(self.previous_svs,weights=self.sv_decay_weights)
return res
return sv
# returns None if in manual mode or no other sv (via ramp/soak or follow mode) defined
def calcSV(self,tx):
if self.svMode == 1:
# Ramp/Soak mode
# actual time (after CHARGE) on recording and time after PID ON on monitoring:
return self.svRampSoak(tx - self.time_pidON)
if self.svMode == 2 and self.aw.qmc.background:
# Follow Background mode
followBT = True # if false, follow ET
if self.aw.qmc.device == 19 and self.aw.pidcontrol.externalPIDControl(): # in case we run TC4 with the PIDfirmware
if int(self.aw.ser.arduinoETChannel) == self.pidSource: # we observe the ET
followBT = False
elif int(self.aw.ser.arduinoBTChannel) == self.pidSource: # we observe the BT
followBT = True
else:
return None
else:
followBT = bool(self.pidSource == 1)
# if self.aw.qmc.timeindex[0] < 0 or self.aw.qmc.timeindex[6] > 0:
# # before and after DROP the SV configured in the dialog is returned (min/maxed)
# return max(self.aw.pidcontrol.svSliderMin,(min(self.aw.pidcontrol.svSliderMax,self.aw.pidcontrol.svValue)))
if self.aw.qmc.timeindex[6] > 0: # after DROP, the SV configured in the dialog is returned (min/maxed)
return max(self.aw.pidcontrol.svSliderMin, min(self.aw.pidcontrol.svSliderMax, self.aw.pidcontrol.svValue))
if self.aw.qmc.timeindex[0] < 0: # before CHARGE, the CHARGE temp of the background profile is returned
if self.aw.qmc.timeindexB[0] < 0:
# no CHARGE in background, return manual SV
return max(self.aw.pidcontrol.svSliderMin,(min(self.aw.pidcontrol.svSliderMax,self.aw.pidcontrol.svValue)))
# if background contains a CHARGE event
if followBT:
res = self.aw.qmc.backgroundBTat(self.aw.qmc.timeB[self.aw.qmc.timeindexB[0]]) # smoothed and approximated background
else: # in all other cases we observe the ET
res = self.aw.qmc.backgroundETat(self.aw.qmc.timeB[self.aw.qmc.timeindexB[0]]) # smoothed and approximated background
return self.smooth_sv(res)
if ((not self.aw.qmc.timeB or tx+self.svLookahead > self.aw.qmc.timeB[-1]) or (self.aw.qmc.timeindexB[6] > 0 and tx+self.svLookahead > self.aw.qmc.timeB[self.aw.qmc.timeindexB[6]])):
# if tx+self.svLookahead > last background data or background has a DROP and tx+self.svLookahead index is beyond that DROP index
return None # "deactivate" background follow mode
if followBT:
res = self.aw.qmc.backgroundSmoothedBTat(tx + self.svLookahead) # smoothed and approximated background
if res == -1:
return None # no background value for that time point
# j = self.aw.qmc.backgroundtime2index(tx + self.svLookahead)
# res = self.aw.qmc.stemp2B[j] # smoothed background
return self.smooth_sv(res)
# in all other cases we observe the ET
res = self.aw.qmc.backgroundSmoothedETat(tx + self.svLookahead) # smoothed and approximated background
if res == -1:
return None
# j = self.aw.qmc.backgroundtime2index(tx + self.svLookahead)
# res = self.aw.qmc.stemp1B[j] # smoothed background
return self.smooth_sv(res)
# return None in manual mode
return None
def setDutySteps(self,dutySteps):
if self.aw.qmc.Controlbuttonflag and not self.aw.pidcontrol.externalPIDControl():
self.aw.qmc.pid.setDutySteps(dutySteps)
def setSV(self,sv,move=True,init=False):
# if not move:
# self.aw.sendmessage(QApplication.translate("Message","SV set to %s"%sv))
if (self.aw.pidcontrol.externalPIDControl() == 1): # MODBUS PID and Control ticked
self.sv = max(0,sv)
if move:
self.aw.moveSVslider(sv,setValue=True)
self.aw.modbus.setTarget(sv)
self.sv = sv # remember last sv
elif (self.aw.pidcontrol.externalPIDControl() == 2): # S7 PID and Control ticked
self.sv = max(0,sv)
if move:
self.aw.moveSVslider(sv,setValue=True)
self.aw.s7.setTarget(sv,self.aw.s7.SVmultiplier)
self.sv = sv # remember last sv
elif self.aw.qmc.device == 19 and self.aw.pidcontrol.externalPIDControl(): # ArduinoTC4 firmware PID
if self.aw.ser.ArduinoIsInitialized:
sv = max(0,self.aw.float2float(sv,2))
if self.sv != sv: # nothing to do (avoid loops via moveslider!)
if move == True:
self.aw.moveSVslider(sv,setValue=True) # only move the slider
self.sv = sv # remember last sv
try:
#### lock shared resources #####
self.aw.ser.COMsemaphore.acquire(1)
if self.aw.ser.SP.isOpen():
self.aw.ser.SP.reset_input_buffer() # self.aw.ser.SP.flushInput() # deprecated in v3
self.aw.ser.SP.reset_output_buffer() # self.aw.ser.SP.flushOutput() # deprecated in v3
self.aw.ser.SP.write(str2cmd("PID;SV;" + str(sv) +"\n"))
self.sv = sv # remember last sv
finally:
if self.aw.ser.COMsemaphore.available() < 1:
self.aw.ser.COMsemaphore.release(1)
elif self.aw.qmc.Controlbuttonflag: # in all other cases if the "Control" flag is ticked
if move and self.aw.pidcontrol.svSlider:
self.aw.moveSVslider(sv,setValue=True)
self.aw.qmc.pid.setTarget(sv,init=init)
self.sv = sv # remember last sv
# set RS patterns from one of the RS sets
def setRSpattern(self,n):
try:
self.aw.qmc.rampSoakSemaphore.acquire(1)
if n < self.RSLen:
self.svLabel = self.RS_svLabels[n]
self.svValues = self.RS_svValues[n]
self.svRamps = self.RS_svRamps[n]
self.svSoaks = self.RS_svSoaks[n]
self.svActions = self.RS_svActions[n]
self.svBeeps = self.RS_svBeeps[n]
self.svDescriptions = self.RS_svDescriptions[n]
except Exception as e: # pylint: disable=broad-except
_log.exception(e)
finally:
if self.aw.qmc.rampSoakSemaphore.available() < 1:
self.aw.qmc.rampSoakSemaphore.release(1)
# returns the first RS patterrn idx with label or None
def findRSset(self,label):
try:
self.aw.qmc.rampSoakSemaphore.acquire(1)
return self.RS_svLabels.index(label)
except Exception as e: # pylint: disable=broad-except
_log.exception(e)
return None
finally:
if self.aw.qmc.rampSoakSemaphore.available() < 1:
self.aw.qmc.rampSoakSemaphore.release(1)
def adjustsv(self,diff):
if self.sv is None or self.sv<0:
self.sv = 0
self.setSV(self.sv + diff,True)
def activateSVSlider(self,flag):
if flag:
self.aw.sliderGrpBoxSV.setVisible(True)
self.aw.sliderSV.blockSignals(True)
self.aw.sliderSV.setMinimum(self.svSliderMin)
self.aw.sliderSV.setMaximum(self.svSliderMax)
# we set the SV slider/lcd to the last SV issues or the minimum
if self.aw.pidcontrol.sv is not None:
sv = self.aw.pidcontrol.sv
else:
sv = min(self.svSliderMax, max(self.svSliderMin, self.aw.pidcontrol.svValue))
sv = int(round(sv))
self.aw.updateSVSliderLCD(sv)
self.aw.sliderSV.setValue(sv)
self.aw.sliderSV.blockSignals(False)
self.svSlider = True
self.aw.slidersAction.setEnabled(True)
else:
self.aw.sliderGrpBoxSV.setVisible(False)
self.svSlider = False
self.aw.slidersAction.setEnabled(any(self.aw.eventslidervisibilities))
def activateONOFFeasySV(self,flag):
if flag:
if self.aw.qmc.flagon:
self.aw.buttonSVp5.setVisible(True)
self.aw.buttonSVp10.setVisible(True)
self.aw.buttonSVp20.setVisible(True)
self.aw.buttonSVm20.setVisible(True)
self.aw.buttonSVm10.setVisible(True)
self.aw.buttonSVm5.setVisible(True)
else:
self.aw.buttonSVp5.setVisible(False)
self.aw.buttonSVp10.setVisible(False)
self.aw.buttonSVp20.setVisible(False)
self.aw.buttonSVm20.setVisible(False)
self.aw.buttonSVm10.setVisible(False)
self.aw.buttonSVm5.setVisible(False)
# just store the p-i-d configuration
def setPID(self,kp,ki,kd,source=None,cycle=None,pOnE=True):
self.pidKp = kp
self.pidKi = ki
self.pidKd = kd
self.pOnE = pOnE
if source is not None:
self.pidSource = source
if cycle is not None:
self.pidCycle = cycle
# send conf to connected PID
def confPID(self,kp,ki,kd,source=None,cycle=None,pOnE=True):
if (self.aw.pidcontrol.externalPIDControl() == 1): # MODBUS (external) Control active
self.aw.modbus.setPID(kp,ki,kd)
self.pidKp = kp
self.pidKi = ki
self.pidKd = kd
self.aw.sendmessage(QApplication.translate("Message","p-i-d values updated"))
elif (self.aw.pidcontrol.externalPIDControl() == 2): # S7 (external) Control active
self.aw.s7.setPID(kp,ki,kd,self.aw.s7.PIDmultiplier)
self.pidKp = kp
self.pidKi = ki
self.pidKd = kd
self.aw.sendmessage(QApplication.translate("Message","p-i-d values updated"))
elif self.aw.qmc.device == 19 and self.aw.pidcontrol.externalPIDControl(): # ArduinoTC4 firmware PID
if self.aw.ser.ArduinoIsInitialized:
try:
#### lock shared resources #####
self.aw.ser.COMsemaphore.acquire(1)
if self.aw.ser.SP.isOpen():
self.aw.ser.SP.reset_input_buffer() # self.aw.ser.SP.flushInput() # deprecated in v3
self.aw.ser.SP.reset_output_buffer() # self.aw.ser.SP.flushOutput() # deprecated in v3
if pOnE:
self.aw.ser.SP.write(str2cmd("PID;T;" + str(kp) + ";" + str(ki) + ";" + str(kd) + "\n"))
else:
self.aw.ser.SP.write(str2cmd("PID;T_POM;" + str(kp) + ";" + str(ki) + ";" + str(kd) + "\n"))
self.pidKp = kp
self.pidKi = ki
self.pidKd = kd
if source is not None:
libtime.sleep(.03)
self.aw.ser.SP.write(str2cmd("PID;CHAN;" + str(source) + "\n"))
if cycle is not None:
libtime.sleep(.03)
self.aw.ser.SP.write(str2cmd("PID;CT;" + str(cycle) + "\n"))
self.aw.sendmessage(QApplication.translate("Message","p-i-d values updated"))
finally:
if self.aw.ser.COMsemaphore.available() < 1:
self.aw.ser.COMsemaphore.release(1)
elif self.aw.qmc.Controlbuttonflag: # in all other cases if the "Control" flag is ticked
self.aw.qmc.pid.setPID(kp,ki,kd,pOnE)
self.pidKp = kp
self.pidKi = ki
self.pidKd = kd
self.pOnE = pOnE
self.aw.qmc.pid.setLimits((-100 if self.aw.pidcontrol.pidNegativeTarget else 0),(100 if self.aw.pidcontrol.pidPositiveTarget else 0))
self.aw.sendmessage(QApplication.translate("Message","p-i-d values updated"))
###################################################################################
########################## DTA PID CLASS DEFINITION ############################
###################################################################################
# documentation
# http://www.deltaww.hu/homersekletszabalyozok/DTA_series_temperature_controller_instruction_sheet_English.pdf
class DtaPID():
def __init__(self,aw):
self.aw = aw
#refer to Delta instruction manual for more information
#dictionary "KEY": [VALUE,ASCII_MEMORY_ADDRESS] note: address contains hex alpha characters
self.dtamem={
"pv": [0,"4700"], # process value (temperature reading)
"sv": [100.0,"4701"], # set point
"p": [5,"4708"], # p value 0-9999
"i": [240,"4709"], # i value 0-9999
"d": [60,"470A"], # d value 0-9999
"duty" : [0,"471D"], # duty
"sensortype": [0,"4710"], # 0 = K type1; 1 = K type2; 2 = J type1; 3 = J type2
# 4 = T type1; 5 = T type2; 6 = E ; 7 = N; 8 = R; 9 = S; 10 = B
# 11 = JPT100 type1; 12 = JPT100 type2; 13 = PT100 type1; 14 = PT100 type2
# 15 = PT100 type3; 16 = L ; 17 = U; 18 = Txk
"controlmethod":[0,"4711"], # 0 = pid; 1 = ON/OFF; 2 = manual
"units":[1,"4717"], # units C = 1; F = 2
"controlsetting":[1,"4719"], # 1=Run; 0 = Stop
"error":[0,"472B"] # note: read only memory. Values:
# 0 = Normal,1 = Initial process; 2 = Initial status;
# 3 = sensor not connected; 4 = sensor input error
# 5 = Exceeds max temperature; 6 = Number Internal error
# 7 EEPROM error
}
#command string = ID (ADR)+ FUNCTION (CMD) + ADDRESS + NDATA + LRC_CHK
def writeDTE(self,value,DTAaddress):
newsv = hex(int(abs(float(str(value)))))[2:].upper()
slaveID = self.aw.ser.controlETpid[1]
if self.aw.ser.controlETpid[0] != 2: # control pid is not a DTA PID
slaveID = self.aw.ser.readBTpid[1]
command = self.aw.dtapid.message2send(slaveID,6,str(DTAaddress),newsv)
self.aw.ser.sendDTAcommand(command)
def message2send(self,unitID,FUNCTION,ADDRESS, NDATA):
#compose command
string_unitID = str(unitID).zfill(2)
string_FUNCTION = str(FUNCTION).zfill(2)
string_ADDRESS = ADDRESS #ADDRESS is a 4 char string
string_NDATA = str(NDATA).zfill(4)
cmd = string_unitID + string_FUNCTION + string_ADDRESS + string_NDATA
checksum = hex(self.DTACalcChecksum(cmd))[2:].zfill(2).upper()
command = ":" + cmd + checksum + "\r\n"
return command
@staticmethod
def DTACalcChecksum(string):
def tobin(x, count=8):
return "".join([str((x>>y)&1) for y in range(count-1, -1, -1)])
def twoscomp(num_str):
return tobin(-int(num_str,2),len(num_str))
length = len(string)
# start at index 1 because of heading ':' cmd
count = 0
val = 0x00
while count < length:
val += int(string[count] + string[count+1], 16) #string[count+1] goes out of range
count += 2
h_bs = bin(val)[2:]
h2comp = twoscomp(h_bs)
rval = int(h2comp,2)
if (val & 0x80) == 0:
rval = rval | 0x80
return rval
|
artisan-roaster-scope/artisan
|
src/artisanlib/pid_control.py
|
Python
|
gpl-3.0
| 97,075
|
[
"ESPResSo"
] |
c811a75973b406b7f02ada56a3fab4286db81fa805b393ccecc415d1db39861c
|
import numpy as np
from collections import deque
from astropy.table import Table
from scipy.optimize import fmin_bfgs,check_grad,approx_fprime
#from scipy.optimize.linesearch import (line_search_BFGS, line_search_wolfe1, line_search_wolfe2, line_search_wolfe2 as line_search)
#from optimize import fmin_bfgs
import copy
import matplotlib.pyplot as plt
import sys
from astropy import log
K=4*np.log(2.0)
def jac_chi2(par,gc):
# If the background is fixed, include zero background value
val=np.nan*np.ones_like(par)
if par[ 3 ] > 0.0 and par[ 5 ] > 0.0 and par[ 8 ] > 0.0:
if gc.fixback:
par=np.insert(par,1,0.0)
# update computations if necesary
gc.update_comp(par)
val=gc.get_jaco(par)
return val
def chi2(par,gc):
val=1e1000
# If the background is fixed, include zero background value
if par[ 3 ] > 0.0 and par[ 5 ] > 0.0 and par[ 8 ] > 0.0:
if gc.fixback:
par=np.insert(par,1,0.0)
# update computations if necesary
gc.update_comp(par)
val=gc.get_chi2(par)
return val
class GaussClumps:
def __init__(self):
self.defaultParams()
def defaultParams(self):
self.par=dict()
# Spectral Resoluion in pixels (smoothing function)
self.par['VELORES']=2.0
# Beam resoluion in pixels (smoothing function)
self.par['FWHMBEAM']=2.0
# The maximum allowed number of failed fits between succesful fits.
self.par['MAXSKIP']=10
# Maximum Clumps
self.par['MAXCLUMPS']=sys.maxint
# The iterative process ends when "npad" consecutive clumps all had peak
# values below "peak_thresh" or all had areas below "area_thresh".
self.par['NPAD']=10
# The lower threshold for clump peaks to a user-specified multiple of the RMS noise.
self.par['THRESH']=2.0
# The lower threshold for clump area to a user-specified number of pixels.
self.par['MINPIX']=3
# The lowest value (normalised to the RMS noise level) at which
# model Gaussians should be evaluated.
self.par['MODELMIN']=0.5
# The max allowed fraction of bad pixels in a clump.
# self.par['MAXBAD']=0.05
# No.of standard deviations at which to reject peaks
self.par['NSIGMA']=3.0
# But reject peaks only if at least NPEAKS were found
self.par['NPEAKS']=9
# Parameters which control the modification of the weights done by
# the chi2 (this modification is meant to give low weights to pixels
# which do not influence the Gaussian model
self.par['NWF']= 10
self.par['MINWF']=0.8
self.par['MAXWF']=1.1
# Maximum number of function evaluations to be used when fitting an
# individual clump.
self.par['MAXNF']=100
# Chi-square stiffness parameter "Sa" which encourages the peak
# amplitude of the fitted gaussian close to the maximum value in the
# observed data.
self.par['SA']=1.0
# Chi-square stiffness parameter "Sb" which encourages the
# background value to stay close to its initial value. This is an extra
# stiffness added by DSB which is not in the Stutzki & Gusten paper. It
# is used because the background value is usually determined by data
# points which have very low weight and is thus poorly constrained. It
# would thus be possibly to get completely erroneous background values
# without this extra stiffness.
self.par['SB']=0.1
# Chi-square stiffness parameter "S0" which encourages the peak
# amplitude of the fitted gaussian to be below the maximum value in the
# observed data.
self.par['S0']=1.0
# Chi-square stiffness parameter "Sc" which encourages the peak
# position of the fitted gaussian to be close to the peak position in the
# observed data.
self.par['SC']=1.0
# The ratio of the weighting function FWHM to the observed FWHM. */
self.par['WWIDTH']=2.0
# The value for which the weight is considered already zero
self.par['WMIN']=0.05
def get_jaco(self,par):
sa=self.par['SA']
sb=self.par['SB']
sc=self.par['SC']
jaco=np.zeros(11)
mod=np.zeros(11)
t=self.peakfactor*self.expv
jaco[0]=-2*t.dot(self.wres)/self.wsum
jaco[1]=-2*self.wres.sum()/self.wsum
ddx=self.X/self.sx2
ddy=self.Y/self.sy2
ddv=self.vt_off/self.sv2
mterm=self.peak*self.expv
t = -K*(-2*(ddx*self.cosv - ddy*self.sinv) + 2*par[9]*ddv)
t *= mterm
jaco[2]=-2*t.dot(self.wres)/self.wsum
t = -K*(-2*ddx*ddx*par[3]) + self.f3
t *= mterm
jaco[3]=-2*t.dot(self.wres)/self.wsum
t = -K*(-2*(ddx*self.sinv + ddy*self.cosv) + 2*par[10]*ddv)
t *= mterm
jaco[4]=-2*t.dot(self.wres)/self.wsum
t = -K*(-2*ddy*ddy*par[5]) + self.f5
t *= mterm
jaco[5]=-2*t.dot(self.wres)/self.wsum
t = -K*(-2*(ddx*(self.x_off*self.sinv - self.y_off*self.cosv) + ddy*(self.x_off*self.cosv + self.ym_off*self.sinv)))
t *= mterm
jaco[6]=-2*t.dot(self.wres)/self.wsum
t = -K*(-2*ddv)
t *= mterm
jaco[7]=-2*t.dot(self.wres)/self.wsum
t = -K*(-2*ddv*ddv*par[8]) + self.f8
t *= mterm
jaco[8]=-2*t.dot(self.wres)/self.wsum
t = -K*(-2*ddv*self.x_off)
t *= mterm
jaco[9]=-2*t.dot(self.wres)/self.wsum
t = -K*(-2*ddv*self.y_off)
t *= mterm
jaco[10]=-2*t.dot(self.wres)/self.wsum
# second pass
jaco[0]+=2*sa*self.pdiff*self.peakfactor
jaco[1]+=2*sa*self.pdiff + 2*sb*self.back_term
jaco[2]+=2*4*sc*self.xm_off/self.bfsq
jaco[3]+=2*sa*self.pdiff*self.f3*par[0]*self.peakfactor
jaco[4]+=2*4*sc*self.ym_off/self.bfsq
jaco[5]+=2*sa*self.pdiff*self.f5*par[0]*self.peakfactor
jaco[7]+=2*4*sc*self.vm_off/self.velsq
jaco[8]+=2*sa*self.pdiff*self.f8*par[0]*self.peakfactor
if self.fixback:
np.delete(jaco,[1])
return jaco
def get_chi2(self,par):
sa=self.par['SA']
sb=self.par['SB']
sc=self.par['SC']
chi2=self.wres.dot(self.res)
chi2/=self.wsum
off = (self.xm_off*self.xm_off + self.ym_off*self.ym_off )/self.bfsq
off += self.vm_off*self.vm_off/self.velsq
chi2 += sa*self.pdiff*self.pdiff + 4*sc*off + sb*self.back_term*self.back_term
return chi2
def update_results(self,clump,lb,ub):
self.update_comp(clump)
ff=self.model - clump[1]
#print lb,ub
lb=self.data.fix_limits(lb)
ub=self.data.fix_limits(ub)
#print lb,ub
#print ub[0]-lb[0],ub[1]-lb[1],ub[2]-lb[2]
#print ff.shape
ff=ff.reshape(ub[0]-lb[0],ub[1]-lb[1],ub[2]-lb[2])
ff*=self.par['RMS']
self.data.add_flux(-ff,lb,ub)
ccode=self.caa.data.max() + 1
caaff=self.caa.data[slice(lb[0],ub[0]),slice(lb[1],ub[1]),slice(lb[2],ub[2])]
synff=self.syn.data[slice(lb[0],ub[0]),slice(lb[1],ub[1]),slice(lb[2],ub[2])]
tmpff=ff.copy()
tmpff[tmpff<self.par['RMS']]=0
caaff[synff<tmpff]=ccode
self.caa.replace_flux(caaff,lb,ub)
self.syn.add_flux(ff,lb,ub)
#TODO: improve area computation, right now using weigth ub, lb
area=(ub-lb).sum()
csum=ff.sum()
return (csum,area)
def update_comp(self,par):
if np.array_equal(par,self.old_par):
return
self.old_par=par
self.back_term=par[1] - self.guess[1]
# Unpack parameters
nwf=self.par['NWF']
minwf=self.par['MINWF']
maxwf=self.par['MAXWF']
s0=self.par['S0']
# Get the factor by which to correct the peak amplitude of the model to
# take account of the smoothing by the instrumental beam.
t = par[3]*par[3]
sx2 = self.bfsq + t
f3 = self.bfsq/(par[3]*sx2)
peakfactor = t/sx2
t = par[5]*par[5]
sy2 = self.bfsq + t
f5 = self.bfsq/(par[5]*sy2)
peakfactor *= t/sy2
t = par[8]*par[8]
sv2 = self.velsq + t
f8 = self.velsq/(par[8]*sv2)
peakfactor *= t/sv2
if peakfactor > 0.0:
peakfactor = np.sqrt(peakfactor)
else:
peakfactor = 0.0
self.peak=par[0]*peakfactor
self.sx2=sx2
self.sy2=sy2
self.sv2=sv2
self.f3 = f3
self.f5 = f5
self.f8 = f8
self.peakfactor=peakfactor
# The difference between the model peak value (after being reduced to
# take account of instrumental smoothing) and the data peak value.
self.pdiff = self.peak + par[1] - self.valmax
# The offset from the model centre to the data peak
xm_off = par[2] - self.cval[0]
ym_off = par[4] - self.cval[1]
vm_off = par[7] - self.cval[2]
# Get the Gaussian model. Store the residual between the Gaussian model and data
self.cosv = np.cos(par[6])
self.sinv = np.sin(par[6])
x_off=self.feat[0] - par[2]
y_off=self.feat[1] - par[4]
v_off=self.feat[2] - par[7]
X = x_off*self.cosv + y_off*self.sinv
Y = -x_off*self.sinv + y_off*self.cosv
em = ( X*X/sx2 ) + ( Y*Y/sy2 )
self.vt_off=v_off - par[9]*x_off - par[10]*y_off
em += self.vt_off*self.vt_off/sv2
expv = np.exp( -K*em )
self.expv=expv
model=self.peak*expv+ par[1]
res= self.val - model
self.X=X
self.Y=Y
self.x_off=x_off
self.y_off=y_off
self.v_off=v_off
# If the changing of the model parameters make little difference to the
# residuals at a given place in the data, then those residuals should be
# given less weight since they could dominate the chi-squared value. If
# the residual at the current pixel has not change by much since the
# previous call, reduce the weight associated with the pixel. However,
# if the parameter has not change by much then you would not expect the
# residuals to change by much. Therefore, do not reduce the weight by so
# much if the model value at this pixel has not changed by much since the
# last call. In order to avoid instability, we only do this modification
# for a few iterations near the start, and then allow the fitting
# process to complete with fixed weights.
if (not self.fixback) and (self.nf > 2) and (self.nwm <= nwf):
# Only modify the weights if the background has changed. Without this,
# the outlying background regions would be given low weights if the
# background has not changed, resulting in the background being poorly
# determined.
if self.bg != 0.0:
dbg=(par[1] - self.bg)/self.bg > 0.001
else:
dbg=(par[1] != 0.0)
if dbg:
wf=(res-self.res)/res
wf/=(model - self.model)/model
wf=np.abs(wf)
wf[wf<minwf]=minwf
wf[wf>maxwf]=maxwf
wf[np.isnan(wf)]=1.0
self.we*=wf
self.we[self.we > 1.0]=1.0
self.nwm+=1
self.model=model
self.res=res
self.xm_off=xm_off
self.ym_off=ym_off
self.vm_off=vm_off
# Determine a scale factor which encourages the fitted intensity to stay
# below the observed intensity. This does the same job as the
# "s0.exp( Yi_fit - Yi )" term in the chi-squared expression given in
# the Stutski & Gusten paper. The form used here was inherited from the
# implementation of GaussClumps (obtained from
# ftp.astro.uni-bonn.de/pub/heith/gaussclumps on 27/9/05) upon which this
# implementation was based.
rr = (s0+1)*np.ones_like(res)
rr[res > 0.0]= 1.0
# Compute the sum of chi-squared. We save the scaled residuals
# in a work array (pr) so that we do not need to calculate them again if
# this function is called subsequently to find the gradient for the same
# set of parameer values.
self.wsum = self.we.sum()
self.wres = self.we*res*rr
# Remember the background value for next time.
self.bg = par[1]
# Update nf
self.nf+=1
def optimize(self):
# Unpack used parameters
maxnf=self.par['MAXNF']
wwidth=self.par['WWIDTH']
wmin=self.par['WMIN']
rms=self.par['RMS']
velres=self.par['VELORES']
beamfwhm=self.par['FWHMBEAM']
self.bfsq=beamfwhm*beamfwhm
self.velsq=velres*velres
# Gaussian Window
# The factor which scales the FWHM on each axis to the half-width of the
# section of the data array to be be fitted.
beta=0.5*wwidth*np.sqrt(-np.log( wmin )/ np.log( 2.0 ) )
(ld,lu)=(np.rint(self.cval-beta*self.fobs),np.rint(self.cval+beta*self.fobs))
lb=np.array([ld,lu]).min(axis=0)
ub=np.array([ld,lu]).max(axis=0)
lb=lb[::-1]
ub=ub[::-1]
# Store the data normalised to the
# RMS noise level. Also calculate and store the Gaussian weight for the
# pixel.
self.val=self.data.cut(lb,ub).copy().ravel()
self.feat=self.data.index_features(lb,ub)
xw_off=(self.feat[0] - self.cval[0])/(self.fobs[0]*wwidth)
yw_off=(self.feat[1] - self.cval[1])/(self.fobs[1]*wwidth)
vw_off=(self.feat[2] - self.cval[2])/(self.fobs[2]*wwidth)
self.we=np.exp(-K*(xw_off*xw_off + yw_off*xw_off + vw_off*xw_off))
self.we[self.we < wmin]=0.0
# Normalise all other data values in the guess structure and in the
# array to the RMS noise level.
self.val=self.val/rms
self.valmax /= rms
guess=self.guess
guess[1] /= rms
guess[0] /= rms
# Number of invocations of the function
self.nf=0
self.nwm=0
# Get the factor by which to correct the peak amplitude of the model to
# take account of the smoothing by the instrumental beam.
t = guess[3]*guess[3]
dx0_sq = self.bfsq + t
peakfactor = t/dx0_sq
t = guess[5]*guess[5]
dx1_sq = self.bfsq + t
peakfactor *= t/dx1_sq
t = guess[8]*guess[8]
dv_sq = self.velsq + t
peakfactor *= t/dv_sq
# Do the correction.
if peakfactor > 0.0:
guess[0] /= np.sqrt(peakfactor)
if self.fixback:
np.delete(guess,[1])
marg=(self,)
retval=fmin_bfgs(chi2, guess,args=marg,disp=False,full_output=True,fprime=jac_chi2)
# Unpack results
(xopt,fopt,gopt,Bopt,func_calls,grad_calls,warnflag)=retval
if warnflag!=0 and self.fixback:
self.fixback=False
(xopt,fopt,gopt,Bopt,func_calls,grad_calls,warnflag)=fmin_bfgs(chi2, guess,args=marg,fprime=jac_chi2,maxiter=maxnf,disp=True)
if self.fixback:
np.insert(xopt,1,self.bg)
if (xopt == self.guess).all():
xopt=None
# TODO: come back to normality!
return xopt,lb,ub
# TODO: Document this stuff (using cupid code...)
def profWidth(self,dim):
rms=self.par['RMS']
if dim==0:
vn=[0,0,-1]
vp=[0,0,1]
fwhm=self.par['FWHMBEAM']
elif dim==1:
vn=[0,-1,0]
vp=[0,1,0]
fwhm=self.par['FWHMBEAM']
else:
vn=[-1,0,0]
vp=[1,0,0]
fwhm=self.par['VELORES']
# left search for significant minima
left=np.array(self.imax)
prev=np.nan
vlow=self.data.data[self.imax]
plow=self.imax
csum=0.0
nsum=0
while True:
left+=vn
if (left < np.zeros(3)).any():
left-=vn
break
val=self.data.data[tuple(left)]
if np.ma.is_masked(val) or val==np.nan:
prev=np.nan
continue
if val < vlow and prev != np.nan and prev - val < 1.5*rms:
vlow=val
plow=left.copy()
#print "low cand",plow
csum=0.0
nsum=0
else:
csum+=val
nsum+=1
if csum/nsum - vlow >= 3*rms/np.sqrt(nsum) and nsum >= fwhm:
break
prev=val
vlow+=rms
#print "low",vlow,plow
# Do the same working upwards from the peak to upper axis values.
prev=np.nan
vup=self.data.data[self.imax]
pup=self.imax
csum=0.0
nsum=0
right=np.array(self.imax)
while True:
right+=vp
if (right >= np.array(self.data.data.shape)).any():
right-=vp
break
val=self.data.data[tuple(right)]
if np.ma.is_masked(val) or val==np.nan:
prev=np.nan
continue
if val < vup and prev != np.nan and prev - val < 1.5*rms:
vup=val
pup=right.copy()
csum=0.0
nsum=0
else:
csum+=val
nsum+=1
if csum/nsum - vup >= 3*rms/np.sqrt(nsum) and nsum >= fwhm:
break
prev=val
vup+=rms
#print "up",vup,pup
try:
off=np.min(vlow,vup) + rms
except ValueError:
print(vlow,vup,rms)
print(self.imax)
self.data.data[self.imax]
sys.exit()
if vlow < vup:
hgt=self.valmax - vlow
cand=self.data.data[plow[0]:self.imax[0]+1,plow[1]:self.imax[1]+1,plow[2]:self.imax[2]+1]
try:
cand=cand[0][0]
except IndexError:
print(cand)
print(plow, self.imax)
sys.exit()
cand-=vlow
cand=cand[::-1]
default=(self.imax-plow).sum()/2.0
else:
hgt=self.valmax - vup
cand=self.data.data[self.imax[0]:pup[0]+1,self.imax[1]:pup[1]+1,self.imax[2]:pup[2]+1]
cand=cand[0][0]
np.delete(cand,0)
cand-=vup
default=(np.array(self.imax)-np.array(pup)).sum()/2.0
cand=cand/hgt
idx=np.arange(1,cand.size+1)
cand=np.ma.fix_invalid(cand,fill_value=0.0)
idx=idx[cand>0.25]
cand=cand[cand>0.25]
idx=idx[cand<0.75]
cand=cand[cand<0.75]
if cand.size!=0:
default=1.665*(idx/np.log(cand)).sum()/cand.size
return (default,off)
def setInit(self):
# Unpack used parameters
beamfwhm=self.par['FWHMBEAM']
velres=self.par['VELORES']
rms=self.par['RMS']
guess=np.zeros(11)
# Get a guess at the observed clump fwhm by forming a radial profile and
# finding the distance to the first significant minimum. This also increments
# "off" by the minimum (i.e. base line) data value in the profile. Do
# this for both spatial axes, and then take the mean (i.e. we assume the
# clump is circular as an initial guess)
self.fobs=np.zeros(3)
off=np.zeros(3)
self.fobs[0],off[0] = self.profWidth(0)
self.fobs[1],off[1] = self.profWidth(1)
self.fobs[2],off[2] = self.profWidth(2)
# TODO: Small Hack
if self.fobs[2]<velres+0.1:
self.fobs[2]=velres+0.1
fbeam=0.5*(self.fobs[0] + self.fobs[1])/beamfwhm
if fbeam < 1.0:
fbeam=1.2
self.fobs[0] = fbeam*beamfwhm
self.fobs[1] = fbeam*beamfwhm
# Store the Guessed model
self.cval=np.array([self.imax[2],self.imax[1],self.imax[0]])
guess[2]=self.cval[0]
guess[4]=self.cval[1]
guess[7]=self.cval[2]
# Find the initial guess at the intrinsic FWHM (i.e. the FWHM of the
# clump before being blurred by the instrument beam). Do the same for
# the second axis. Assume zero rotation of the elliptical clump shape.
guess[3]=np.sqrt(fbeam*fbeam- 1.0 )*beamfwhm
guess[5]=guess[3]
guess[6]=0.0
# Now do the same for the third (velocity) axis if necessary. Assume
# zero velocity gradient
fvel=self.fobs[2]/velres
guess[8]=np.sqrt(fvel*fvel- 1.0 )*velres
if np.isnan(guess[8]):
print(fvel,self.fobs[2],fvel*fvel)
guess[9]=0.0
guess[10]=0.0
# Store the mean of the background estimates, and the peak value. Noise
# will result in the peak data value being larger than the peak clump value
# by about the RMS noise. Therefore, reduce the peak value by the RMS.
guess[1] = off.sum()/3
guess[0] = self.valmax - guess[1] - rms
# Negative background levels are unphysical (since it is assumed that
# any background has already been removed from the data before running
# this algorithm (TODO: CHECK)). However, an apparent negative background can be formed by
# a previous ill-position fit resulting in negative residiauls. Therefore
# we have to guard against negative backgrounds. If the initial background
# estimate is significantly less than zero, then set it to zero, and
# indicate that the background value should be fixed (i.e. not included
# as a free parameter in the fitting process). Here, "significant" means
# more than 5% of the total peak height. */
self.fixback=False
if guess[1] < -np.abs(guess[ 0 ]*0.05):
guess[0] += guess[1]
guess[1] = 0.0
self.fixback = True
self.guess=guess
self.old_par=None
def fit(self,cube,verbose=False,use_meta=True):
FWHM_TO_SIGMA = 1. / (8 * np.log(2))**0.5
# Set the RMS, or automatically find an estimate for it
if not self.par.has_key('RMS'):
rms=cube.rms()
self.par['RMS']=rms
# TODO: set parameters according to meta
# Unpack used parameters
npeaks=self.par['NPEAKS']
mlim=self.par['MODELMIN']
peak_thresh=self.par['THRESH']
area_thresh=self.par['MINPIX']
maxclump=self.par['MAXCLUMPS']
npad=self.par['NPAD']
maxskip=self.par['MAXSKIP']
nsig=self.par['NSIGMA']
# Copy the supplied cube into a work cube which will hold the
# residuals remaining after subtraction of the fitted Gaussians.
self.data=cube.copy()
self.syn=cube.empty_like()
self.caa=cube.empty_like()
# Initialise the number of clumps found so far.
iclump = 0
# Indicate that no peaks have been found below the lower threshold for clump
# peak values, or below the lower area threshold.
peaks_below = 0
area_below = 0
# Initialise the variables used to keep track of the mean and standard
# deviation of the most recent "npeak" fitted peak values.
mean_peak = 0.0
sigma_peak = 0.0
# The value most recently added to "peaks"
new_peak = 0.0
# Sum of the values in "peaks"
sum_peak = 0.0
# Sum of the squares of the values in "peaks"
sum_peak2 = 0.0
# Number of pixels contributing to the clump
area=0
# Iterations performed so far
niter = 0
iterate = True
# No. of failed fits since last good fit
nskip = 0
# Sum of the values in all the used clumps so far
sumclumps = 0.0
# Sum of the supplied data values
sumdata = self.data.flux()
# peaks contains the last npeaks...
peaks=np.zeros(npeaks)
clist=Table(names=("Intensity", "Offset", "RA mu", "RA std","DEC mu", "DEC std","Angle","FREQ mu","FREQ std","RA vel grad","DEC vel grad"),dtype=('f8','f8','f8','f8','f8','f8','f8','f8','f8','f8','f8'))
clist.meta=cube.meta
# Loop round fitting a gaussian to the largest remaining peak in the
# residuals array. */
while iterate:
# Report the iteration number to the user if required.
niter+=1
if verbose:
log.info("Iteration: "+str(niter))
# Find the cube index of the element with the largest value in the residuals cube.
# imax: Index of element with largest residual
(self.valmax,self.imax) = self.data.max()
# Finish iterating if all the residuals are bad, or if too many iterations
# have been performed since the last succesfully fitted clump.
if np.isnan(self.imax).any():
iterate = False
niter-=1
if verbose:
log.info("There are no good pixels left to be fitted.")
continue
elif nskip > maxskip:
iterate = False
niter-=1
if verbose:
log.info("The previous "+str(maxskip)+" fits were unusable.")
continue
# If not, make an initial guess at the Gaussian clump parameters centred on the current peak.
self.setInit()
# Find the best fitting parameters, starting from the above initial guess.
(clump,lb,ub)=self.optimize()
# If no fit could be performed, then found = False
if clump!=None:
# Skip this fit if we have an estimate of the standard deviation of the
# "npeaks" most recent clump peak values, and the peak value of the clump
# just fitted is a long way (more than NSIGMA standard deviations) from the
# peak value of the previously fitted clump. Also skip it if the peak
# value is less than the "mlim" value.
if (peaks.size == 0 or iclump < npeaks or np.abs(clump[0] - new_peak) < nsig*sigma_peak ) and clump[0] > mlim:
# Record the new peak value for use with the next peak, and update the
# standard deviation of the "npeaks" most recent peaks. These values are
# stored cyclically in the "peaks" array. */
if peaks.size > 0:
np.roll(peaks,1)
new_peak = clump[0]
old_peak = peaks[0]
peaks[0]=new_peak
sum_peak += new_peak - old_peak
sum_peak2 += new_peak*new_peak - old_peak*old_peak
if sum_peak2 < 0.0:
sum_peak2 = 0.0
mean_peak = sum_peak/npeaks
sigma_peak = np.sqrt(sum_peak2/npeaks - mean_peak*mean_peak)
# Increment the number of peaks found.
iclump+=1
# Reset the number of failed fits since the last good fit. */
nskip = 0
# Remove the model fit (excluding the background) from the residuals.
# This also creates data values asociated with the clumps
# The standard deviation of the new residuals is returned. */
if clump[0] >= peak_thresh:
#record clump
clist+=tuple(clump)
(csum,area)=self.update_results(clump,lb,ub)
sumclumps+=csum
# TODO: implement this!
# Display the clump parameters on the screen if required. */
#cupidGCListClump( iclump, ndim, x, chisq, slbnd, rms, status )
# If this clump has a peak value which is below the threshold, increment
# the count of consecutive clumps with peak value below the threshold.
# Otherwise, reset this count to zero.
if clump[0] < peak_thresh:
self.data.data.mask[self.imax]=True
peaks_below+=1
else:
peaks_below=0
# If this clump has an area which is below the threshold, increment
# the count of consecutive clumps with area below the threshold.
# Otherwise, reset this count to zero.
if area < area_thresh:
area_below+=1
else:
area_below=0
# If the maximum number of clumps have now been found, exit.*/
if iclump == maxclump:
iterate = False
if verbose:
log.info("The specified maximum number of clumps ("+str(maxclump)+") have been found.")
# If the integrated data sum in the fitted gaussians exceeds or equals
# the integrated data sum in the input, exit.
elif sumclumps >= sumdata:
iterate = False
if verbose:
log.info("The total data sum of the fitted Gaussians ("+str(sumclumps)+") has reached the total data sum in the supplied data ("+str(sumdata)+").")
# If the count of consecutive peaks below the threshold has reached
# "Npad", terminate.
elif peaks_below == npad:
iterate = False
if verbose:
log.info("The previous"+str(npad)+"clumps all had peak values below the threshold.")
# If the count of consecutive clumps with area below the threshold has reached
# "Npad", terminate.
elif area_below == npad:
iterate = False
if verbose:
log.info("The previous "+str(npad)+" clumps all had areas below the threshold.")
# If the peak value fitted is very different from the previous fitted peak
# value, set the residuals array element bad in order to prevent the
# algorithm from trying to fit a peak to the same pixel again.
else:
self.data.data.mask[self.imax]=True
new_peak = 0.5*(new_peak + clump[0])
nskip+=1
if verbose:
log.info("Clump rejected due to aberrant peak value. Ignoring Pixel...")
# Tell the user if no clump could be fitted around the current peak
# pixel value
else:
nskip+=1
if verbose:
log.info("No clump fitted (optimization falied). Ignoring Pixel...")
# Set the specified element of the residuals array bad if no fit was
# performed. This prevents the any subsequent attempt to fit a Gaussian
# to the same peak value.
self.data.data.mask[self.imax]=True
if verbose:
log.info("GaussClump finished normally")
# TODO: Usable Clumps
## Tell the problems with the clumps. */
#if nclump == 0:
# print "No usable clumps found."
#if iclump - nclump >= 1:
# print iclump - nclump,"clump(s) rejected because they touch an edge of the data array."
## Tell the user how many iterations have been performed (i.e. how many
## attempts there have been to fit a Gaussian peak
#if niter == 1:
# print "No fit attempted."
#else:
# print "Fits attempted for ",iclump," candidate clumps (",niter-iclump," failed)."
return clist
# def fit_up_to_SNR(self,cube,snr,verbose=False,):
#
# FWHM_TO_SIGMA = 1. / (8 * np.log(2))**0.5
# # Set the RMS, or automatically find an estimate for it
# if not self.par.has_key('RMS'):
# rms=cube.estimate_rms()
# self.par['RMS']=rms
#
# # TODO: set parameters according to meta
#
# # Unpack used parameters
# npeaks=self.par['NPEAKS']
# mlim=self.par['MODELMIN']
# peak_thresh=self.par['THRESH']
# area_thresh=self.par['MINPIX']
# maxclump=self.par['MAXCLUMPS']
# npad=self.par['NPAD']
# maxskip=self.par['MAXSKIP']
# nsig=self.par['NSIGMA']
#
# # Copy the supplied cube into a work cube which will hold the
# # residuals remaining after subtraction of the fitted Gaussians.
# self.data=cube.copy()
# self.syn=cube.empty_like()
# # Initialise the number of clumps found so far.
# iclump = 0
#
# # Indicate that no peaks have been found below the lower threshold for clump
# # peak values, or below the lower area threshold.
# peaks_below = 0
# area_below = 0
#
# # Initialise the variables used to keep track of the mean and standard
# # deviation of the most recent "npeak" fitted peak values.
# mean_peak = 0.0
# sigma_peak = 0.0
# # The value most recently added to "peaks"
# new_peak = 0.0
# # Sum of the values in "peaks"
# sum_peak = 0.0
# # Sum of the squares of the values in "peaks"
# sum_peak2 = 0.0
#
# # Number of pixels contributing to the clump
# area=0
#
# # Iterations performed so far
# niter = 0
# iterate = True
# # No. of failed fits since last good fit
# nskip = 0
# # Sum of the values in all the used clumps so far
# sumclumps = 0.0
# # Sum of the supplied data values
# sumdata = self.data.flux()
#
# # peaks contains the last npeaks...
# peaks=np.zeros(npeaks)
# clist=[]
# # Loop round fitting a gaussian to the largest remaining peak in the
# # residuals array. */
# while iterate:
# # Report the iteration number to the user if required.
# niter+=1
# if verbose:
# log.info("Iteration: "+str(niter))
# # Find the cube index of the element with the largest value in the residuals cube.
# # imax: Index of element with largest residual
# (self.valmax,self.imax) = self.data.max()
# if verbose:
# print "GAP ",self.valmax - (rms + snr*rms)
# if self.valmax < rms + snr*rms:
# iterate = False
# niter-=1
# if verbose:
# log.info("Maximum point is at SNR limit.")
# # Finish iterating if all the residuals are bad,
# if np.isnan(self.imax).any():
# iterate = False
# niter-=1
# if verbose:
# log.info("There are no good pixels left to be fitted.")
# continue
# # If not, make an initial guess at the Gaussian clump parameters centred on the current peak.
# self.setInit()
#
# # Find the best fitting parameters, starting from the above initial guess.
# (clump,lb,ub)=self.optimize()
# # If no fit could be performed, then found = False
# if clump!=None:
# # Skip this fit if we have an estimate of the standard deviation of the
# # "npeaks" most recent clump peak values, and the peak value of the clump
# # just fitted is a long way (more than NSIGMA standard deviations) from the
# # peak value of the previously fitted clump. Also skip it if the peak
# # value is less than the "mlim" value.
# if (peaks.size == 0 or iclump < npeaks or np.abs(clump[0] - new_peak) < nsig*sigma_peak ) and clump[0] > mlim:
#
# # Record the new peak value for use with the next peak, and update the
# # standard deviation of the "npeaks" most recent peaks. These values are
# # stored cyclically in the "peaks" array. */
# if peaks.size > 0:
# np.roll(peaks,1)
# new_peak = clump[0]
# old_peak = peaks[0]
# peaks[0]=new_peak
# sum_peak += new_peak - old_peak
# sum_peak2 += new_peak*new_peak - old_peak*old_peak
# if sum_peak2 < 0.0:
# sum_peak2 = 0.0
# mean_peak = sum_peak/npeaks
# sigma_peak = np.sqrt(sum_peak2/npeaks - mean_peak*mean_peak)
#
# # Increment the number of peaks found.
# iclump+=1
#
# # Reset the number of failed fits since the last good fit. */
# nskip = 0
#
# # Remove the model fit (excluding the background) from the residuals.
# # This also creates data values asociated with the clumps
# # The standard deviation of the new residuals is returned. */
# if clump[0] >= peak_thresh:
# #record clump
# clist.append(clump)
# (csum,area)=self.updateResults(clump,lb,ub)
# sumclumps+=csum
# # TODO: implement this!
# # Display the clump parameters on the screen if required. */
# #cupidGCListClump( iclump, ndim, x, chisq, slbnd, rms, status )
#
# # If this clump has a peak value which is below the threshold, increment
# # the count of consecutive clumps with peak value below the threshold.
# # Otherwise, reset this count to zero.
# if clump[0] < peak_thresh:
# self.data.data.mask[self.imax]=True
# if verbose:
# log.info("Clump rejected because it is below threshold.")
# peaks_below+=1
# else:
# peaks_below=0
#
# # If this clump has an area which is below the threshold, increment
# # the count of consecutive clumps with area below the threshold.
# # Otherwise, reset this count to zero.
# if area < area_thresh:
# log.info("Clump rejected because it is below threshold.")
# area_below+=1
# else:
# area_below=0
#
# # If the maximum number of clumps have now been found, exit.*/
# if iclump == maxclump:
# iterate = False
# if verbose:
# log.info("The specified maximum number of clumps ("+str(maxclump)+") have been found.")
#
# # If the integrated data sum in the fitted gaussians exceeds or equals
# # the integrated data sum in the input, exit.
# elif sumclumps >= sumdata:
# iterate = False
# if verbose:
# log.info("The total data sum of the fitted Gaussians ("+str(sumclumps)+") has reached the total data sum in the supplied data ("+str(sumdata)+").")
#
# # If the peak value fitted is very different from the previous fitted peak
# # value, set the residuals array element bad in order to prevent the
# # algorithm from trying to fit a peak to the same pixel again.
# else:
# self.data.data.mask[self.imax]=True
# new_peak = 0.5*(new_peak + clump[0])
# nskip+=1
# if verbose:
# log.info("Clump rejected due to aberrant peak value. Ignoring Pixel...")
#
# # Tell the user if no clump could be fitted around the current peak
# # pixel value
# else:
# nskip+=1
# if verbose:
# log.info("No clump fitted (optimization falied). Ignoring Pixel...")
# # Set the specified element of the residuals array bad if no fit was
# # performed. This prevents the any subsequent attempt to fit a Gaussian
# # to the same peak value.
# self.data.data.mask[self.imax]=True
# if verbose:
# log.info("GaussClump finished normally")
# # TODO: Usable Clumps
# ## Tell the problems with the clumps. */
# #if nclump == 0:
# # print "No usable clumps found."
# #if iclump - nclump >= 1:
# # print iclump - nclump,"clump(s) rejected because they touch an edge of the data array."
# ## Tell the user how many iterations have been performed (i.e. how many
# ## attempts there have been to fit a Gaussian peak
# #if niter == 1:
# # print "No fit attempted."
# #else:
# # print "Fits attempted for ",iclump," candidate clumps (",niter-iclump," failed)."
# return clist
|
ChileanVirtualObservatory/acalib
|
acalib/algorithms/attic/gaussClumps.py
|
Python
|
gpl-3.0
| 39,243
|
[
"Gaussian"
] |
4165105e2ee6b9933e6ba45175ef6e2d300fe71ec97a5602e95a4c92c40b1f67
|
"""
====================================================================
K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
====================================================================
Provides routines for k-means clustering, generating code books
from k-means models, and quantizing vectors by comparing them with
centroids in a code book.
.. autosummary::
:toctree: generated/
whiten -- Normalize a group of observations so each feature has unit variance
vq -- Calculate code book membership of a set of observation vectors
kmeans -- Performs k-means on a set of observation vectors forming k clusters
kmeans2 -- A different implementation of k-means with more methods
-- for initializing centroids
Background information
======================
The k-means algorithm takes as input the number of clusters to
generate, k, and a set of observation vectors to cluster. It
returns a set of centroids, one for each of the k clusters. An
observation vector is classified with the cluster number or
centroid index of the centroid closest to it.
A vector v belongs to cluster i if it is closer to centroid i than
any other centroids. If v belongs to i, we say centroid i is the
dominating centroid of v. The k-means algorithm tries to
minimize distortion, which is defined as the sum of the squared distances
between each observation vector and its dominating centroid. Each
step of the k-means algorithm refines the choices of centroids to
reduce distortion. The change in distortion is used as a
stopping criterion: when the change is lower than a threshold, the
k-means algorithm is not making sufficient progress and
terminates. One can also define a maximum number of iterations.
Since vector quantization is a natural application for k-means,
information theory terminology is often used. The centroid index
or cluster index is also referred to as a "code" and the table
mapping codes to centroids and vice versa is often referred as a
"code book". The result of k-means, a set of centroids, can be
used to quantize vectors. Quantization aims to find an encoding of
vectors that reduces the expected distortion.
All routines expect obs to be a M by N array where the rows are
the observation vectors. The codebook is a k by N array where the
i'th row is the centroid of code word i. The observation vectors
and centroids have the same feature dimension.
As an example, suppose we wish to compress a 24-bit color image
(each pixel is represented by one byte for red, one for blue, and
one for green) before sending it over the web. By using a smaller
8-bit encoding, we can reduce the amount of data by two
thirds. Ideally, the colors for each of the 256 possible 8-bit
encoding values should be chosen to minimize distortion of the
color. Running k-means with k=256 generates a code book of 256
codes, which fills up all possible 8-bit sequences. Instead of
sending a 3-byte value for each pixel, the 8-bit centroid index
(or code word) of the dominating centroid is transmitted. The code
book is also sent over the wire so each 8-bit code can be
translated back to a 24-bit pixel value representation. If the
image of interest was of an ocean, we would expect many 24-bit
blues to be represented by 8-bit codes. If it was an image of a
human face, more flesh tone colors would be represented in the
code book.
"""
from __future__ import division, print_function, absolute_import
__docformat__ = 'restructuredtext'
__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
# TODO:
# - implements high level method for running several times k-means with
# different initialialization
# - warning: what happens if different number of clusters ? For now, emit a
# warning, but it is not great, because I am not sure it really make sense to
# succeed in this case (maybe an exception is better ?)
import warnings
from numpy.random import randint
from numpy import (shape, zeros, sqrt, argmin, minimum, array, newaxis,
common_type, single, double, take, std, mean)
import numpy as np
from scipy._lib._util import _asarray_validated
from . import _vq
class ClusterError(Exception):
pass
def whiten(obs, check_finite=True):
"""
Normalize a group of observations on a per feature basis.
Before running k-means, it is beneficial to rescale each feature
dimension of the observation set with whitening. Each feature is
divided by its standard deviation across all observations to give
it unit variance.
Parameters
----------
obs : ndarray
Each row of the array is an observation. The
columns are the features seen during each observation.
>>> # f0 f1 f2
>>> obs = [[ 1., 1., 1.], #o0
... [ 2., 2., 2.], #o1
... [ 3., 3., 3.], #o2
... [ 4., 4., 4.]] #o3
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
result : ndarray
Contains the values in `obs` scaled by the standard deviation
of each column.
Examples
--------
>>> from scipy.cluster.vq import whiten
>>> features = np.array([[1.9, 2.3, 1.7],
... [1.5, 2.5, 2.2],
... [0.8, 0.6, 1.7,]])
>>> whiten(features)
array([[ 4.17944278, 2.69811351, 7.21248917],
[ 3.29956009, 2.93273208, 9.33380951],
[ 1.75976538, 0.7038557 , 7.21248917]])
"""
obs = _asarray_validated(obs, check_finite=check_finite)
std_dev = std(obs, axis=0)
zero_std_mask = std_dev == 0
if zero_std_mask.any():
std_dev[zero_std_mask] = 1.0
warnings.warn("Some columns have standard deviation zero. "
"The values of these columns will not change.",
RuntimeWarning)
return obs / std_dev
def vq(obs, code_book, check_finite=True):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
achieved by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'M' x 'N' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]] #c2
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
A length M array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
ct = common_type(obs, code_book)
c_obs = obs.astype(ct, copy=False)
if code_book.dtype != ct:
c_code_book = code_book.astype(ct)
else:
c_code_book = code_book
if ct in (single, double):
results = _vq.vq(c_obs, c_code_book)
else:
results = py_vq(obs, code_book)
return results
def py_vq(obs, code_book, check_finite=True):
""" Python version of vq algorithm.
The algorithm computes the euclidian distance between each
observation and every frame in the code_book.
Parameters
----------
obs : ndarray
Expects a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (eg columns) than obs.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This function is slower than the C version but works for
all input types. If the inputs have the wrong types for the
C versions of the function, this one is called as a last resort.
It is about 20 times slower than the C version.
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
# n = number of observations
# d = number of features
if np.ndim(obs) == 1:
if not np.ndim(obs) == np.ndim(code_book):
raise ValueError(
"Observation and code_book should have the same rank")
else:
return _py_vq_1d(obs, code_book)
else:
(n, d) = shape(obs)
# code books and observations should have same number of features and same
# shape
if not np.ndim(obs) == np.ndim(code_book):
raise ValueError("Observation and code_book should have the same rank")
elif not d == code_book.shape[1]:
raise ValueError("Code book(%d) and obs(%d) should have the same "
"number of features (eg columns)""" %
(code_book.shape[1], d))
code = zeros(n, dtype=int)
min_dist = zeros(n)
for i in range(n):
dist = np.sum((obs[i] - code_book) ** 2, 1)
code[i] = argmin(dist)
min_dist[i] = dist[code[i]]
return code, sqrt(min_dist)
def _py_vq_1d(obs, code_book):
""" Python version of vq algorithm for rank 1 only.
Parameters
----------
obs : ndarray
Expects a rank 1 array. Each item is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should rank 1 too.
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
"""
raise RuntimeError("_py_vq_1d buggy, do not use rank 1 arrays for now")
n = obs.size
nc = code_book.size
dist = np.zeros((n, nc))
for i in range(nc):
dist[:, i] = np.sum(obs - code_book[i])
print(dist)
code = argmin(dist)
min_dist = dist[code]
return code, sqrt(min_dist)
def py_vq2(obs, code_book, check_finite=True):
"""2nd Python version of vq algorithm.
The algorithm simply computes the euclidian distance between each
observation and every frame in the code_book/
Parameters
----------
obs : ndarray
Expect a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (eg columns) than obs.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This could be faster when number of codebooks is small, but it
becomes a real memory hog when codebook is large. It requires
N by M by O storage where N=number of obs, M = number of
features, and O = number of codes.
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
d = shape(obs)[1]
# code books and observations should have same number of features
if not d == code_book.shape[1]:
raise ValueError("""
code book(%d) and obs(%d) should have the same
number of features (eg columns)""" % (code_book.shape[1], d))
diff = obs[newaxis, :, :] - code_book[:,newaxis,:]
dist = sqrt(np.sum(diff * diff, -1))
code = argmin(dist, 0)
min_dist = minimum.reduce(dist, 0)
# The next line I think is equivalent and should be faster than the one
# above, but in practice didn't seem to make much difference:
# min_dist = choose(code,dist)
return code, min_dist
def _kmeans(obs, guess, thresh=1e-5):
""" "raw" version of k-means.
Returns
-------
code_book
the lowest distortion codebook found.
avg_dist
the average distance a observation is from a code in the book.
Lower means the code_book matches the data better.
See Also
--------
kmeans : wrapper around k-means
Examples
--------
Note: not whitened in this example.
>>> from numpy import array
>>> from scipy.cluster.vq import _kmeans
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 1.0,1.0]])
>>> book = array((features[0],features[2]))
>>> _kmeans(features,book)
(array([[ 1.7 , 2.4 ],
[ 0.73333333, 1.13333333]]), 0.40563916697728591)
"""
code_book = array(guess, copy=True)
avg_dist = []
diff = thresh+1.
while diff > thresh:
nc = code_book.shape[0]
# compute membership and distances between obs and code_book
obs_code, distort = vq(obs, code_book)
avg_dist.append(mean(distort, axis=-1))
# recalc code_book as centroids of associated obs
if(diff > thresh):
code_book, has_members = _vq.update_cluster_means(obs, obs_code, nc)
code_book = code_book.compress(has_members, axis=0)
if len(avg_dist) > 1:
diff = avg_dist[-2] - avg_dist[-1]
# print avg_dist
return code_book, avg_dist[-1]
def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True):
"""
Performs k-means on a set of observation vectors forming k clusters.
The k-means algorithm adjusts the centroids until sufficient
progress cannot be made, i.e. the change in distortion since
the last iteration is less than some threshold. This yields
a code book mapping centroids to codes and vice versa.
Distortion is defined as the sum of the squared differences
between the observations and the corresponding centroid.
Parameters
----------
obs : ndarray
Each row of the M by N array is an observation vector. The
columns are the features seen during each observation.
The features must be whitened first with the `whiten` function.
k_or_guess : int or ndarray
The number of centroids to generate. A code is assigned to
each centroid, which is also the row index of the centroid
in the code_book matrix generated.
The initial k centroids are chosen by randomly selecting
observations from the observation matrix. Alternatively,
passing a k by N array specifies the initial k centroids.
iter : int, optional
The number of times to run k-means, returning the codebook
with the lowest distortion. This argument is ignored if
initial centroids are specified with an array for the
``k_or_guess`` parameter. This parameter does not represent the
number of iterations of the k-means algorithm.
thresh : float, optional
Terminates the k-means algorithm if the change in
distortion since the last k-means iteration is less than
or equal to thresh.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
codebook : ndarray
A k by N array of k centroids. The i'th centroid
codebook[i] is represented with the code i. The centroids
and codes generated represent the lowest distortion seen,
not necessarily the globally minimal distortion.
distortion : float
The distortion between the observations passed and the
centroids generated.
See Also
--------
kmeans2 : a different implementation of k-means clustering
with more methods for generating initial centroids but without
using a distortion change threshold as a stopping criterion.
whiten : must be called prior to passing an observation matrix
to kmeans.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq, kmeans, whiten
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 0.1,0.1],
... [ 0.2,1.8],
... [ 2.0,0.5],
... [ 0.3,1.5],
... [ 1.0,1.0]])
>>> whitened = whiten(features)
>>> book = array((whitened[0],whitened[2]))
>>> kmeans(whitened,book)
(array([[ 2.3110306 , 2.86287398], # random
[ 0.93218041, 1.24398691]]), 0.85684700941625547)
>>> from numpy import random
>>> random.seed((1000,2000))
>>> codes = 3
>>> kmeans(whitened,codes)
(array([[ 2.3110306 , 2.86287398], # random
[ 1.32544402, 0.65607529],
[ 0.40782893, 2.02786907]]), 0.5196582527686241)
"""
obs = _asarray_validated(obs, check_finite=check_finite)
if int(iter) < 1:
raise ValueError('iter must be at least 1.')
# Determine whether a count (scalar) or an initial guess (array) was passed.
k = None
guess = None
try:
k = int(k_or_guess)
except TypeError:
guess = _asarray_validated(k_or_guess, check_finite=check_finite)
if guess is not None:
if guess.size < 1:
raise ValueError("Asked for 0 cluster ? initial book was %s" %
guess)
result = _kmeans(obs, guess, thresh=thresh)
else:
if k != k_or_guess:
raise ValueError('if k_or_guess is a scalar, it must be an integer')
# initialize best distance value to a large value
best_dist = np.inf
No = obs.shape[0]
k = k_or_guess
if k < 1:
raise ValueError("Asked for 0 cluster ? ")
for i in range(iter):
# the initial code book is randomly selected from observations
guess = take(obs, randint(0, No, k), 0)
book, dist = _kmeans(obs, guess, thresh=thresh)
if dist < best_dist:
best_book = book
best_dist = dist
result = best_book, best_dist
return result
def _kpoints(data, k):
"""Pick k points at random in data (one row = one observation).
This is done by taking the k first values of a random permutation of 1..N
where N is the number of observation.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
"""
if data.ndim > 1:
n = data.shape[0]
else:
n = data.size
p = np.random.permutation(n)
x = data[p[:k], :].copy()
return x
def _krandinit(data, k):
"""Returns k samples of a random variable which parameters depend on data.
More precisely, it returns k observations sampled from a Gaussian random
variable which mean and covariances are the one estimated from data.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
"""
def init_rank1(data):
mu = np.mean(data)
cov = np.cov(data)
x = np.random.randn(k)
x *= np.sqrt(cov)
x += mu
return x
def init_rankn(data):
mu = np.mean(data, 0)
cov = np.atleast_2d(np.cov(data, rowvar=0))
# k rows, d cols (one row = one obs)
# Generate k sample of a random variable ~ Gaussian(mu, cov)
x = np.random.randn(k, mu.size)
x = np.dot(x, np.linalg.cholesky(cov).T) + mu
return x
def init_rank_def(data):
# initialize when the covariance matrix is rank deficient
mu = np.mean(data, axis=0)
_, s, vh = np.linalg.svd(data - mu, full_matrices=False)
x = np.random.randn(k, s.size)
sVh = s[:, None] * vh / np.sqrt(data.shape[0] - 1)
x = np.dot(x, sVh) + mu
return x
nd = np.ndim(data)
if nd == 1:
return init_rank1(data)
elif data.shape[1] > data.shape[0]:
return init_rank_def(data)
else:
return init_rankn(data)
_valid_init_meth = {'random': _krandinit, 'points': _kpoints}
def _missing_warn():
"""Print a warning when called."""
warnings.warn("One of the clusters is empty. "
"Re-run kmean with a different initialization.")
def _missing_raise():
"""raise a ClusterError when called."""
raise ClusterError("One of the clusters is empty. "
"Re-run kmean with a different initialization.")
_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
missing='warn', check_finite=True):
"""
Classify a set of observations into k clusters using the k-means algorithm.
The algorithm attempts to minimize the Euclidian distance between
observations and centroids. Several initialization methods are
included.
Parameters
----------
data : ndarray
A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
'M' array of 'M' one-dimensional observations.
k : int or ndarray
The number of clusters to form as well as the number of
centroids to generate. If `minit` initialization string is
'matrix', or if a ndarray is given instead, it is
interpreted as initial cluster to use instead.
iter : int, optional
Number of iterations of the k-means algrithm to run. Note
that this differs in meaning from the iters parameter to
the kmeans function.
thresh : float, optional
(not used yet)
minit : str, optional
Method for initialization. Available methods are 'random',
'points', and 'matrix':
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
'points': choose k observations (rows) at random from data for
the initial centroids.
'matrix': interpret the k parameter as a k by M (or length k
array for one-dimensional data) array of initial centroids.
missing : str, optional
Method to deal with empty clusters. Available methods are
'warn' and 'raise':
'warn': give a warning and continue.
'raise': raise an ClusterError and terminate the algorithm.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
centroid : ndarray
A 'k' by 'N' array of centroids found at the last iteration of
k-means.
label : ndarray
label[i] is the code or index of the centroid the
i'th observation is closest to.
"""
data = _asarray_validated(data, check_finite=check_finite)
if missing not in _valid_miss_meth:
raise ValueError("Unkown missing method: %s" % str(missing))
# If data is rank 1, then we have 1 dimension problem.
nd = np.ndim(data)
if nd == 1:
d = 1
# raise ValueError("Input of rank 1 not supported yet")
elif nd == 2:
d = data.shape[1]
else:
raise ValueError("Input of rank > 2 not supported")
if np.size(data) < 1:
raise ValueError("Input has 0 items.")
# If k is not a single value, then it should be compatible with data's
# shape
if np.size(k) > 1 or minit == 'matrix':
if not nd == np.ndim(k):
raise ValueError("k is not an int and has not same rank than data")
if d == 1:
nc = len(k)
else:
(nc, dc) = k.shape
if not dc == d:
raise ValueError("k is not an int and has not same rank than\
data")
clusters = k.copy()
else:
try:
nc = int(k)
except TypeError:
raise ValueError("k (%s) could not be converted to an integer " % str(k))
if nc < 1:
raise ValueError("kmeans2 for 0 clusters ? (k was %s)" % str(k))
if not nc == k:
warnings.warn("k was not an integer, was converted.")
try:
init = _valid_init_meth[minit]
except KeyError:
raise ValueError("unknown init method %s" % str(minit))
clusters = init(data, k)
if int(iter) < 1:
raise ValueError("iter = %s is not valid. iter must be a positive integer." % iter)
return _kmeans2(data, clusters, iter, nc, _valid_miss_meth[missing])
def _kmeans2(data, code, niter, nc, missing):
""" "raw" version of kmeans2. Do not use directly.
Run k-means with a given initial codebook.
"""
for i in range(niter):
# Compute the nearest neighbour for each obs
# using the current code book
label = vq(data, code)[0]
# Update the code by computing centroids using the new code book
new_code, has_members = _vq.update_cluster_means(data, label, nc)
if not has_members.all():
missing()
# Set the empty clusters to their previous positions
new_code[~has_members] = code[~has_members]
code = new_code
return code, label
|
yuanagain/seniorthesis
|
venv/lib/python2.7/site-packages/scipy/cluster/vq.py
|
Python
|
mit
| 28,214
|
[
"Gaussian"
] |
da604791e6a0e5011fee94e52fe46dc1788ac60cdfd8f5b737492e6f4285fee7
|
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# A turn by turn navigation module.
#----------------------------------------------------------------------------
# Copyright 2007, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from modules.base_module import RanaModule
from core import geo
from threading import Thread
import math
import time
from . import instructions_generator
REROUTE_CHECK_INTERVAL = 5000 # in ms
#in m/s, about 46 km/h - if this speed is reached, the rerouting threshold is multiplied
# by REROUTING_THRESHOLD_MULTIPLIER
INCREASE_REROUTING_THRESHOLD_SPEED = 20
REROUTING_DEFAULT_THRESHOLD = 30
# not enabled at the moment - needs more field testing
REROUTING_THRESHOLD_MULTIPLIER = 1.0
# how many times needs the threshold be crossed to
# trigger rerouting
REROUTING_TRIGGER_COUNT = 3
MAX_CONSECUTIVE_AUTOMATIC_REROUTES = 3
AUTOMATIC_REROUTE_COUNTER_EXPIRATION_TIME = 600 # in seconds
# only import GKT libs if GTK GUI is used
from core import gs
if gs.GUIString == "GTK":
import pango
import pangocairo
def getModule(*args, **kwargs):
return TurnByTurn(*args, **kwargs)
class TurnByTurn(RanaModule):
"""A turn by turn navigation module."""
def __init__(self, *args, **kwargs):
RanaModule.__init__(self, *args, **kwargs)
# initial colors
self.navigationBoxBackground = (0, 0, 1, 0.3) # very transparent blue
self.navigationBoxText = (1, 1, 1, 1) # non-transparent white
self.TBTWorker = None
self.TBTWorkerEnabled = False
self.goToInitialState()
self.automaticRerouteCounter = 0 # counts consecutive automatic reroutes
self.lastAutomaticRerouteTimestamp = time.time()
# reroute even though the route was not yet reached (for special cases)
self.overrideRouteReached = False
def goToInitialState(self):
"""restore initial state"""
self.route = None
self.currentStepIndex = 0
self.currentStepIndicator = None
self.espeakFirstAndHalfTrigger = False
self.espeakFirstTrigger = False
self.espeakSecondTrigger = False
self.currentDistance = None
self.currentStep = None
self.navigationBoxHidden = False
self.mRouteLength = 0
self.locationWatchID = None
self.onRoute = False
#rerouting is enabled once the route is reached for the first time
self.routeReached = False
self.reroutingThresholdMultiplier = 1.0
self.reroutingThresholdCrossedCounter = 0
def firstTime(self):
icons = self.m.get('icons', None)
if icons:
icons.subscribeColorInfo(self, self.colorsChangedCallback)
def colorsChangedCallback(self, colors):
self.navigationBoxBackground = colors['navigation_box_background'].getCairoColor()
self.navigationBoxText = colors['navigation_box_text'].getCairoColor()
def handleMessage(self, message, messageType, args):
if message == 'start':
if messageType == 'ms':
fromWhere = args
self.startTBT(fromWhere)
elif message == 'stop':
self.stopTBT()
elif message == 'reroute': # manual rerouting
# reset automatic reroute counter
self.automaticRerouteCounter = 0
self._reroute()
elif message == "toggleBoxHiding":
self.log.info("toggling navigation box visibility")
self.navigationBoxHidden = not self.navigationBoxHidden
elif message == "switchToPreviousTurn":
self.switchToPreviousStep()
elif message == "switchToNextTurn":
self.switchToNextStep()
elif message == "showMessageInsideNotification":
currentStep = self.getCurrentStep()
if currentStep:
message = "<i>turn description:</i>\n%s" % currentStep.description
if self.dmod.hasNotificationSupport():
self.dmod.notify(message, 7000)
#TODO: add support for modRana notifications once they support line wrapping
def _rerouteAuto(self):
"""this function is called when automatic rerouting is triggered"""
# check time from last automatic reroute
dt = time.time() - self.lastAutomaticRerouteTimestamp
if dt >= AUTOMATIC_REROUTE_COUNTER_EXPIRATION_TIME:
# reset the automatic reroute counter
self.automaticRerouteCounter = 0
self.log.debug('automatic reroute counter expired, clearing')
# on some routes, when we are moving away from the start of the route, it
# is needed to reroute a couple of times before the correct way is found
# on the other hand there should be a limit on the number of times
# modRana reroutes in a row
#
# SOLUTION:
# 1. enable automatic rerouting even though the route was not yet reached
# (as we are moving away from it)
# 2. do this only a limited number of times (up to 3 times in a row)
# 3. the counter is reset by manual rerouting, by reaching the route or after 10 minutes
if self.automaticRerouteCounter < MAX_CONSECUTIVE_AUTOMATIC_REROUTES:
self.log.debug('faking that route was reached to enable new rerouting')
self.overrideRouteReached = True
else:
self.log.info('tbt: too many consecutive reroutes (%d),', self.automaticRerouteCounter)
self.log.info('reach the route to enable automatic rerouting')
self.log.info('or reroute manually')
# increment the automatic reroute counter & update the timestamp
self.automaticRerouteCounter += 1
self.lastAutomaticRerouteTimestamp = time.time()
# trigger rerouting
self._reroute()
def _reroute(self):
# 1. say rerouting is in progress
voiceMessage = "rerouting"
voice = self.m.get('voice', None)
if voice:
voice.say(voiceMessage, "en") # make sure rerouting said with english voice
time.sleep(2) #TODO: improve this
# 2. get a new route from current position to destination
self.sendMessage("ms:route:reroute:fromPosToDest")
# 3. restart routing for to this new route from the closest point
self.sendMessage("ms:turnByTurn:start:closest")
def drawMapOverlay(self, cr):
if self.route:
# get current step
currentStep = self.getCurrentStep()
proj = self.m.get('projection', None)
# draw the current step indicator circle
if currentStep and proj:
(lat, lon) = currentStep.getLL()
(pointX, pointY) = proj.ll2xy(lat, lon)
cr.set_source_rgb(1, 0, 0)
cr.set_line_width(4)
cr.arc(pointX, pointY, 12, 0, 2.0 * math.pi)
cr.stroke()
cr.fill()
def drawScreenOverlay(self, cr):
if self.route: # is there something relevant to draw ?
# get current step
currentStep = self.getCurrentStep()
# draw the routing message box
# we need to have the viewport available
vport = self.get('viewport', None)
menus = self.m.get('menu', None)
if vport and menus:
(sx, sy, w, h) = vport
(bx, by, bw, bh) = (w * 0.15, h * 0.20, w * 0.7, h * 0.4)
buttonStripOffset = 0.25 * bh
# construct parametric background for the cairo drawn buttons
background = "generic:;0;;1;5;0"
if self.navigationBoxHidden:
# * show button
showButtonWidth = bw * 0.2
# the show button uses custom parameters
parametricIconName = "center:show;0.1>%s" % background
menus.drawButton(cr, bx + (bw - showButtonWidth), by, showButtonWidth, buttonStripOffset, "",
parametricIconName, "turnByTurn:toggleBoxHiding")
else:
# draw the info-box background
cr.set_source_rgba(*self.navigationBoxBackground)
cr.rectangle(bx, by + buttonStripOffset, bw, bh - buttonStripOffset)
cr.fill()
# create a layout for our drawing area
pg = pangocairo.CairoContext(cr)
layout = pg.create_layout()
# get the current turn message
message = currentStep.description
# display current distance to the next point & other unit conversions
units = self.m.get('units', None)
if units and self.currentDistance:
distString = units.m2CurrentUnitString(self.currentDistance, 1, True)
if currentStep.getDistanceFromStart():
currentDistString = units.m2CurrentUnitString(currentStep.getDistanceFromStart(), 1, True)
else:
currentDistString = "?"
routeLengthString = units.m2CurrentUnitString(self.mRouteLength, 1, True)
else:
distString = ""
currentDistString = ""
routeLengthString = ""
# TODO: find why there needs to be a newline on the end
message = "%s : %s\n" % (distString, message)
border = min(bw / 50.0, bh / 50.0)
# compute how much space is actually available for the text
usableWidth = bw - 2 * border
usableHeight = bh - 6 * border - buttonStripOffset
layout.set_width(int(usableWidth * pango.SCALE))
layout.set_wrap(pango.WRAP_WORD)
layout.set_markup(message)
layout.set_font_description(pango.FontDescription("Sans Serif 24")) #TODO: custom font size ?
(lw, lh) = layout.get_size()
if lw == 0 or lh == 0:
# no need to draw a zero area layout
return
# get coordinates for the area available for text
ulX, ulY = (bx + border, by + border + buttonStripOffset)
cr.move_to(ulX, ulY)
cr.save()
if lh > usableHeight: # is the rendered text larger than the usable area ?
clipHeight = 0
# find last completely visible line
cut = False
for index in range(0, layout.get_line_count() - 1):
lineHeight = layout.get_line(index).get_pixel_extents()[1][3]
if clipHeight + lineHeight <= usableHeight:
clipHeight = clipHeight + lineHeight
else:
cut = True # signalize we cut off some lines
break
textEndY = by + border + clipHeight + buttonStripOffset
if cut:
# notify the user that a part of the text was cut,
# by drawing a red line and a scissors icon
# draw the red line
cr.set_source_rgb(1, 0, 0)
cr.set_line_width(bh * 0.01)
cr.move_to(bx, textEndY)
cr.line_to(bx + bw, textEndY)
cr.stroke()
# draw the scissors icon
cutSide = bw / 10
menus.drawButton(cr, bx + bw, textEndY - cutSide / 2.0, cutSide, cutSide, "",
"center:scissors_right;0>%s" % background,
"turnByTurn:showMessageInsideNotification")
#TODO: show the whole message in a notifications after clicking the scissors
# (this needs line wrapping support in modRana notifications)
# clip out the overflowing part of the text
cr.rectangle(ulX, ulY, usableWidth, clipHeight)
cr.translate(ulX, ulY)
cr.clip()
cr.set_source_rgba(*self.navigationBoxText)
pg.show_layout(layout)
cr.restore()
# use the bottom of the infobox to display info
(bottomX, bottomY) = (bx, by + bh - 6 * border)
if self.routeReached and self._automaticReroutingEnabled():
arString = "automatic rerouting enabled"
else:
arString = "tap this box to reroute"
note = "%s/%s, %d/%d <sub> %s</sub>" % (
currentDistString, routeLengthString, self.currentStepIndex + 1, self.getMaxStepIndex() + 1,
arString)
menus.drawText(cr, "%s" % note, bottomX, bottomY, bw, 6 * border, 0,
rgbaColor=self.navigationBoxText)
# make clickable
clickHandler = self.m.get('clickHandler', None)
if clickHandler:
action = "turnByTurn:reroute"
clickHandler.registerXYWH(bx, by + buttonStripOffset, bw, bh - buttonStripOffset, action)
# draw the button strip
hideButtonWidth = bw * 0.2
switchButtonWidth = bw * 0.4
# * previous turn button
menus.drawButton(cr, bx, by, switchButtonWidth, buttonStripOffset, "",
"center:less;0.1>%s" % background, "turnByTurn:switchToPreviousTurn")
# * next turn button
menus.drawButton(cr, bx + switchButtonWidth, by, switchButtonWidth, buttonStripOffset, "",
"center:more;0.1>%s" % background, "turnByTurn:switchToNextTurn")
# * hide button
menus.drawButton(cr, bx + 2 * switchButtonWidth, by, hideButtonWidth, buttonStripOffset, "",
"center:hide;0.1>%s" % background, "turnByTurn:toggleBoxHiding")
def _automaticReroutingEnabled(self):
return self.get('reroutingThreshold', REROUTING_DEFAULT_THRESHOLD)
def sayTurn(self, message, distanceInMeters, forceLanguageCode=False):
"""say a text-to-speech message about a turn
this basically wraps the simple say method from voice and adds some more information,
like current distance to the turn
"""
voice = self.m.get('voice', None)
units = self.m.get('units', None)
if voice and units:
(distString, short, long) = units.humanRound(distanceInMeters)
if distString == "0":
distString = ""
else:
distString = '<p xml:lang="en">in <emphasis level="strong">' + distString + ' ' + long + '</emphasis></p><br>'
# TODO: language specific distance strings
text = distString + message
# """ the message can contain unicode, this might cause an exception when printing it
# in some systems (SHR-u on Neo, for example)"""
# try:
# print("saying: %s" % text)
# pass
# except UnicodeEncodeError:
# print("voice: printing the current message to stdout failed do to unicode conversion error")
if forceLanguageCode:
espeakLanguageCode = forceLanguageCode
else:
# the espeak language code is the first part of this whitespace delimited string
espeakLanguageCode = self.get('directionsLanguage', 'en en').split(" ")[0]
return voice.say(text, espeakLanguageCode)
def getMaxStepIndex(self):
return self.route.getMessagePointCount() - 1
def getStartingStep(self, which='first'):
if self.route:
if which == 'first':
return self.getStep(0)
if which == 'closest':
return self.getClosestStep()
def getClosestStep(self):
"""get the geographically closest step"""
proj = self.m.get('projection', None) # we also need the projection module
pos = self.get('pos', None) # and current position
if pos and proj:
(lat1, lon1) = pos
tempSteps = self.route.getMessagePoints()
for step in tempSteps:
(lat2, lon2) = step.getLL()
step.setCurrentDistance = geo.distance(lat1, lon1, lat2, lon2) * 1000 # km to m
closestStep = sorted(tempSteps, key=lambda x: x.getCurrentDistance())[0]
return closestStep
def getStep(self, index):
"""return steps for valid index, None otherwise"""
maxIndex = self.getMaxStepIndex()
if index > maxIndex or index < -(maxIndex + 1):
self.log.error("wrong turn index: %d, max index is: %d", index, maxIndex)
return None
else:
return self.route.getMessagePointByID(index)
def setStepAsCurrent(self, step):
"""set a given step as current step"""
mpId = self.route.getMessagePointID(step)
self._setCurrentStepIndex(mpId)
def getCurrentStep(self):
"""return current step"""
return self.route.getMessagePointByID(self.currentStepIndex)
def getStepID(self, step):
return self.route.getMessagePointID(step)
def getCurrentStepVisitStatus(self):
"""report visit status for current step"""
return self.getCurrentStep().getVisited()
def markCurrentStepAsVisited(self):
"""mark current step as visited"""
self.getCurrentStep().setVisited(True)
def switchToPreviousStep(self):
"""switch to previous step and clean up"""
nextIndex = self.currentStepIndex - 1
if nextIndex >= 0:
self._setCurrentStepIndex(nextIndex)
self.espeakFirstTrigger = False
self.espeakSecondTrigger = False
self.log.info("switching to previous step")
else:
self.log.info("previous step reached")
def switchToNextStep(self):
"""switch to next step and clean up"""
maxIndex = self.getMaxStepIndex()
nextIndex = self.currentStepIndex + 1
if nextIndex <= maxIndex:
self._setCurrentStepIndex(nextIndex)
self.espeakFirstAndHalfTrigger = False
self.espeakFirstTrigger = False
self.espeakSecondTrigger = False
self.log.info("switching to next step")
else:
self.log.info("last step reached")
self._lastStepReached()
def _lastStepReached(self):
"""handle all tasks that are needed once the last step is reached"""
#disable automatic rerouting
self._stopTBTWorker()
# automatic rerouting needs to be disabled to prevent rerouting
# once the destination was reached
def _setCurrentStepIndex(self, index):
self.currentStepIndex = index
self._doNavigationUpdate()
def enabled(self):
"""return True if enabled, false otherwise"""
if self.route:
return True
else:
return False
def startTBT(self, fromWhere='first'):
"""start Turn-by-turn navigation"""
# clean up any possible previous navigation data
self.goToInitialState()
# NOTE: turn and step are used interchangeably in the documentation
m = self.m.get('route', None)
if m:
route = m.getCurrentDirections()
if route: # is the route nonempty ?
self.route = route
# get route in radians for automatic rerouting
self.radiansRoute = route.getPointsLLERadians(dropElevation=True)
# start rerouting watch
self._startTBTWorker()
# show the warning message
self.sendMessage('ml:notification:m:use at own risk, watch for cliffs, etc.;3')
# for some reason the combined distance does not account for the last step
self.mRouteLength = route.getLength()
# some statistics
metersPerSecSpeed = self.get('metersPerSecSpeed', None)
dt = m.routeLookupDuration
self.log.info("route lookup took: %f s" % dt)
if dt and metersPerSecSpeed:
dm = dt * metersPerSecSpeed
self.log.info("distance traveled during lookup: %f m" % dm)
# the duration of the road lookup and other variables are currently not used
# in the heuristics but might be added later to make the heuristics more robust
# now we decide if we use the closest turn, or the next one,
# as we might be already past it and on our way to the next turn
cs = self.getClosestStep() # get geographically closest step
pos = self.get('pos', None) # get current position
pReachedDist = int(self.get('pointReachedDistance', 30)) # get the trigger distance
nextTurnId = self.getStepID(cs) + 1
nextStep = self.getStep(nextTurnId)
# check if we have all the data needed for our heuristics
self.log.info("trying to guess correct step to start navigation")
if nextStep and pos and pReachedDist:
(lat, lon) = pos
(csLat, csLon) = cs.getLL()
(nsLat, nsLon) = nextStep.getLL()
pos2nextStep = geo.distance(lat, lon, nsLat, nsLon) * 1000
pos2currentStep = geo.distance(lat, lon, csLat, csLon) * 1000
currentStep2nextStep = geo.distance(csLat, csLon, nsLat, nsLon) * 1000
# self.log.debug("pos",(lat,lon))
# self.log.debug("cs",(csLat,csLon))
# self.log.debug("ns",(nsLat,nsLon))
self.log.debug("position to next turn: %f m" % pos2nextStep)
self.log.debug("position to current turn: %f m" % pos2currentStep)
self.log.debug("current turn to next turn: %f m" % currentStep2nextStep)
self.log.debug("turn reached trigger distance: %f m" % pReachedDist)
if pos2currentStep > pReachedDist:
#this means we are out of the "capture circle" of the closest step
# what is more distant, the closest or the next step ?
if pos2nextStep < currentStep2nextStep:
# we are mostly probably already past the closest step,
# so we switch to the next step at once
self.log.debug("already past closest turn, switching to next turn")
self.setStepAsCurrent(nextStep)
# we play the message for the next step,
# with current distance to this step,
# to assure there is some voice output immediately after
# getting a new route or rerouting"""
plaintextMessage = nextStep.getSSMLMessage()
self.sayTurn(plaintextMessage, pos2nextStep)
else:
# we have probably not yet reached the closest step,
# so we start navigation from it
self.log.debug("closest turn not yet reached")
self.setStepAsCurrent(cs)
else:
# we are inside the "capture circle" of the closest step,
# this means the navigation will trigger the voice message by itself
# and correctly switch to next step
# -> no need to switch to next step from here
self.log.debug("inside reach distance of closest turn")
self.setStepAsCurrent(cs)
else:
# we dont have some of the data, that is needed to decide
# if we start the navigation from the closest step of from the step that is after it
# -> we just start from the closest step
self.log.debug("not enough data to decide, using closest turn")
self.setStepAsCurrent(cs)
self._doNavigationUpdate() # run a first time navigation update
self.locationWatchID = self.watch('locationUpdated', self.locationUpdateCB)
self.log.info("started and ready")
def stopTBT(self):
"""stop Turn-by-turn navigation"""
# remove location watch
if self.locationWatchID:
self.removeWatch(self.locationWatchID)
# cleanup
self.goToInitialState()
self._stopTBTWorker()
self.log.info("stopped")
def locationUpdateCB(self, key, newValue, oldValue):
"""position changed, do a tbt navigation update"""
if key == "locationUpdated": # just to be sure
self._doNavigationUpdate()
else:
self.log.error("invalid key: %r", key)
def _doNavigationUpdate(self):
"""do a navigation update"""
# make sure there really are some steps
if not self.route:
self.log.error("no route")
return
pos = self.get('pos', None)
if pos is None:
self.log.error("skipping update, invalid position")
return
# get/compute/update necessary the values
(lat1, lon1) = pos
currentStep = self.getCurrentStep()
lat2, lon2 = currentStep.getLL()
currentDistance = geo.distance(lat1, lon1, lat2, lon2) * 1000 # km to m
self.currentDistance = currentDistance # update current distance
# use some sane minimum distance
distance = int(self.get('minAnnounceDistance', 100))
# GHK: make distance speed-sensitive
#
# I came up with this formula after a lot of experimentation with
# gnuplot. The idea is to give the user some simple parameters to
# adjust yet let him have a lot of control. There are five
# parameters in the equation:
#
# lowSpeed Speed below which the pre-announcement time is constant.
# lowTime Announcement time at and below lowSpeed.
# highSpeed Speed above which the announcement time is constant.
# highTime Announcement time at and above highSpeed.
# power Exponential power used in the formula; good values are 0.5-5
#
# The speeds are in m/s. Obviously highXXX must be greater than lowXXX.
# If power is 1.0, announcement times increase linearly above lowSpeed.
# If power < 1.0, times rise rapidly just above lowSpeed and more
# gradually approaching highSpeed. If power > 1.0, times rise
# gradually at first and rapidly near highSpeed. I like power > 1.0.
#
# The reasoning is that at high speeds you are probably on a
# motorway/freeway and will need extra time to get into the proper
# lane to take your exit. That reasoning is pretty harmless on a
# high-speed two-lane road, but it breaks down if you are stuck in
# heavy traffic on a four-lane freeway (like in Los Angeles
# where I live) because you might need quite a while to work your
# way across the traffic even though you're creeping along. But I
# don't know a good way to detect whether you're on a multi-lane road,
# I chose speed as an acceptable proxy.
#
# Regardless of speed, we always warn a certain distance ahead (see
# "distance" above). That distance comes from the value in the current
# step of the directions.
#
# BTW, if you want to use gnuplot to play with the curves, try:
# max(a,b) = a > b ? a : b
# min(a,b) = a < b ? a : b
# warn(x,t1,s1,t2,s2,p) = min(t2,(max(s1,x)-s1)**p*(t2-t1)/(s2-s1)**p+t1)
# plot [0:160][0:] warn(x,10,50,60,100,2.0)
#
metersPerSecSpeed = self.get('metersPerSecSpeed', None)
pointReachedDistance = int(self.get('pointReachedDistance', 30))
if metersPerSecSpeed:
# check if we can miss the point by going too fast -> mps speed > point reached distance
# also enlarge the rerouting threshold as it looks like it needs to be larger
# when moving at high speed to prevent unnecessary rerouting
if metersPerSecSpeed > pointReachedDistance * 0.75:
pointReachedDistance = metersPerSecSpeed * 2
# self.log.debug("tbt: enlarging point reached distance to: %1.2f m due to large speed (%1.2f m/s)". (pointReachedDistance, metersPerSecSpeed)
if metersPerSecSpeed > INCREASE_REROUTING_THRESHOLD_SPEED:
self.reroutingThresholdMultiplier = REROUTING_THRESHOLD_MULTIPLIER
else:
self.reroutingThresholdMultiplier = 1.0
# speed & time based triggering
lowSpeed = float(self.get('minAnnounceSpeed', 13.89))
highSpeed = float(self.get('maxAnnounceSpeed', 27.78))
highSpeed = max(highSpeed, lowSpeed + 0.1)
lowTime = int(self.get('minAnnounceTime', 10))
highTime = int(self.get('maxAnnounceTime', 60))
highTime = max(highTime, lowTime)
power = float(self.get('announcePower', 2.0))
warnTime = (max(lowSpeed, metersPerSecSpeed) - lowSpeed) ** power \
* (highTime - lowTime) / (highSpeed - lowSpeed) ** power \
+ lowTime
warnTime = min(highTime, warnTime)
distance = max(distance, warnTime * metersPerSecSpeed)
if self.get('debugTbT', False):
self.log.debug("#####")
self.log.debug("min/max announce time: %d/%d s", lowTime, highTime)
self.log.debug("trigger distance: %1.2f m (%1.2f s warning)", distance, distance / float(metersPerSecSpeed))
self.log.debug("current distance: %1.2f m", currentDistance)
self.log.debug("current speed: %1.2f m/s (%1.2f km/h)", metersPerSecSpeed, metersPerSecSpeed * 3.6)
self.log.debug("point reached distance: %f m", pointReachedDistance)
self.log.debug("1. triggered=%r, 1.5. triggered=%r, 2. triggered=%r",
self.espeakFirstTrigger, self.espeakFirstAndHalfTrigger, self.espeakSecondTrigger)
if warnTime > 30:
self.log.debug("optional (20 s) trigger distance: %1.2f", 20.0 * metersPerSecSpeed)
if currentDistance <= pointReachedDistance:
# this means we reached the point"""
if self.espeakSecondTrigger == False:
self.log.debug("triggering espeak nr. 2")
# say the message without distance
plaintextMessage = currentStep.getSSMLMessage()
# consider turn said even if it was skipped (ignore errors)
self.sayTurn(plaintextMessage, 0)
self.markCurrentStepAsVisited() # mark this point as visited
self.espeakFirstTrigger = True # everything has been said, again :D
self.espeakSecondTrigger = True # everything has been said, again :D
self.switchToNextStep() # switch to next step
else:
if currentDistance <= distance:
# this means we reached an optimal distance for saying the message"""
if self.espeakFirstTrigger == False:
self.log.debug("triggering espeak nr. 1")
plaintextMessage = currentStep.getSSMLMessage()
if self.sayTurn(plaintextMessage, currentDistance):
self.espeakFirstTrigger = True # first message done
if self.espeakFirstAndHalfTrigger == False and warnTime > 30:
if currentDistance <= (20.0 * metersPerSecSpeed):
# in case that the warning time gets too big, add an intermediate warning at 20 seconds
# NOTE: this means it is said after the first trigger
plaintextMessage = currentStep.getSSMLMessage()
if self.sayTurn(plaintextMessage, currentDistance):
self.espeakFirstAndHalfTrigger = True # intermediate message done
## automatic rerouting ##
# is automatic rerouting enabled from options
# enabled == threshold that is not not None
if self._automaticReroutingEnabled():
# rerouting is enabled only once the route is reached for the first time
if self.onRoute and not self.routeReached:
self.routeReached = True
self.automaticRerouteCounter = 0
self.log.info('route reached, rerouting enabled')
# did the TBT worker detect that the rerouting threshold was reached ?
if self._reroutingConditionsMet():
# test if enough consecutive divergence point were recorded
if self.reroutingThresholdCrossedCounter >= REROUTING_TRIGGER_COUNT:
# reset the routeReached override
self.overrideRouteReached = False
# trigger rerouting
self._rerouteAuto()
else:
# reset the counter
self.reroutingThresholdCrossedCounter = 0
def _reroutingConditionsMet(self):
return (self.routeReached or self.overrideRouteReached) and not self.onRoute
def _followingRoute(self):
"""are we still following the route or is rerouting needed"""
start1 = time.clock()
pos = self.get('pos', None)
proj = self.m.get('projection', None)
if pos and proj:
pLat, pLon = pos
# we use Radians to get rid of radian conversion overhead for
# the geographic distance computation method
radiansLL = self.radiansRoute
pLat = geo.radians(pLat)
pLon = geo.radians(pLon)
if len(radiansLL) == 0:
self.log.error("Divergence: can't follow a zero point route")
return False
elif len(radiansLL) == 1: # 1 point route
aLat, aLon = radiansLL[0]
minDistance = geo.distanceApproxRadians(pLat, pLon, aLat, aLon)
else: # 2+ points route
aLat, aLon = radiansLL[0]
bLat, bLon = radiansLL[1]
minDistance = geo.distancePointToLineRadians(pLat, pLon, aLat, aLon, bLat, bLon)
aLat, aLon = bLat, bLon
for point in radiansLL[1:]:
bLat, bLon = point
dist = geo.distancePointToLineRadians(pLat, pLon, aLat, aLon, bLat, bLon)
if dist < minDistance:
minDistance = dist
aLat, aLon = bLat, bLon
# the multiplier tries to compensate for high speed movement
threshold = float(
self.get('reroutingThreshold', REROUTING_DEFAULT_THRESHOLD)) * self.reroutingThresholdMultiplier
self.log.debug("Divergence from route: %1.2f/%1.2f m computed in %1.0f ms",
minDistance * 1000, float(threshold), (1000 * (time.clock() - start1)))
return minDistance * 1000 < threshold
def _startTBTWorker(self):
self.log.info("starting worker thread")
startThread = True
if not self.TBTWorker: # reuse previous thread or start new one
self.TBTWorkerEnabled = True
t = Thread(target=self._TBTWorker)
t.daemon = True
t.start()
self.TBTWorker = t
else:
self.log.info("reusing worker thread")
def _stopTBTWorker(self):
self.TBTWorkerEnabled = False
self.TBTWorker = None
def _TBTWorker(self):
"""this function is run in its own thread and check if
we are following the current route"""
self.log.info("TBTWorker: started")
while self.route and self.TBTWorkerEnabled:
# first make sure automatic rerouting is enabled
# eq. reroutingThreshold != None
if self._automaticReroutingEnabled():
# check if we are still following the route
# self.log.debug('TBTWorker: checking divergence from route')
self.onRoute = self._followingRoute()
if self._reroutingConditionsMet():
self.log.info('TBTWorker: divergence detected')
# switch to quick updates
for i in range(0, REROUTING_TRIGGER_COUNT + 1):
time.sleep(1)
onRoute = self._followingRoute()
if onRoute: # divergence stopped
self.onRoute = onRoute
self.log.info('TBTWorker: false alarm')
break
else: # still diverging from current route
self.onRoute = onRoute
# increase divergence counter
self.reroutingThresholdCrossedCounter += 1
self.log.debug('TBTWorker: increasing divergence counter (%d)',
self.reroutingThresholdCrossedCounter)
time.sleep(REROUTE_CHECK_INTERVAL / 1000.0)
self.log.info("TBTWorker: shutting down")
def getMonavTurns(self, monavResult):
return instructions_generator.detectMonavTurns(monavResult)
def shutdown(self):
# cleanup
self.stopTBT()
|
ryfx/modrana
|
modules/mod_turnByTurn/mod_turnByTurn.py
|
Python
|
gpl-3.0
| 39,661
|
[
"VisIt"
] |
29c7ac34ab7c6876d583c62672f9be6aeff963427beed1795c13308f48eb0e3b
|
# -*- coding: utf-8 -*-
###############################################################################
#
# PyLint tests that will never be applied for this file.
#
# Unused variables, these functions are organized so that they can be called
# from a string at runtime, from it's name being stored
# in an SQLAlchemy object attribute.
# pylint: disable-msg=W0612
###############################################################################
#
# PyLint tests that will be eventually fixed.
#
# Unused argument, the functionality, once it's implemented, will use
# the argument.
# pylint: disable-msg=W0613
"""
This module creates the functions that get used in symbol aggregation
There are row-based, and column-based, function builders, just to stay
organized.
"""
import pandas as pd
nan = pd.np.nan
def sorted_feed_cols(df):
"""
takes a dataframe's columns that would be of the form:
['feed003', 'failsafe_feed999', 'override_feed000', 'feed001', 'feed002']
and returns:
['override_feed000', 'feed001', 'feed002', 'feed003', 'failsafe_feed999']
"""
cols = df.columns
ind = [int(c.split("feed")[1]) for c in cols]
cols = zip(ind,cols)
cols.sort()
cols = [c[1] for c in cols]
return cols
def _row_wise_priority(adf):
adf = adf.dropna()
if len(adf) > 0:
return adf.values[0]
else:
return nan
class ApplyRow(object):
"""
Mixer used to identify row-based logic methods for
Trump's Feed aggregation step.
All these functions, should take in a dataframe of multiple columns,
and return a DataFrame with a single column, or a Series.
"""
@staticmethod
def priority_fill(adf):
"""
Looks at each row, and chooses the value from the highest priority
(lowest #) feed, one row at a time.
"""
# the logic to apply overrides, values from certain feeds,
# or the failsafes, is needed for high-level functions
# in this same file.
# so "priority_fill" just wraps this, for organization
# purposes.
return _row_wise_priority(adf)
@staticmethod
def mean_fill(adf):
""" Looks at each row, and calculates the mean. Honours
the Trump override/failsafe logic. """
ordpt = adf.values[0]
if not pd.isnull(ordpt):
return ordpt
fdmn = adf.iloc[1:-1].mean()
if not pd.isnull(fdmn):
return fdmn
flspt = adf.values[-1]
if not pd.isnull(flspt):
return flspt
return nan
@staticmethod
def median_fill(adf):
""" Looks at each row, and chooses the median. Honours
the Trump override/failsafe logic. """
ordpt = adf.values[0]
if not pd.isnull(ordpt):
return ordpt
fdmn = adf.iloc[1:-1].median()
if not pd.isnull(fdmn):
return fdmn
flspt = adf.values[-1]
if not pd.isnull(flspt):
return flspt
return nan
@staticmethod
def custom(adf):
"""
A custom Apply-Row Aggregator can be defined,
as any function which accepts a Series, and returns
any number-like object, which will get
assigned to the Dataframe's 'final' column in
using the pandas .apply, function.
"""
return [0] * len(adf)
class ChooseCol(object):
"""
Builds a dictionary of column-based logic to be applied by
Trump's aggregation step.
All these functions, should take in a dataframe of multiple columns,
and return a DataFrame with a single column, or a Series.
"""
@staticmethod
def most_populated(adf):
"""
Looks at each column, using the one with the most values
Honours the Trump override/failsafe logic. """
# just look at the feeds, ignore overrides and failsafes:
feeds_only = adf[adf.columns[1:-1]]
# find the most populated feed
cnt_df = feeds_only.count()
cnt = cnt_df.max()
selected_feeds = cnt_df[cnt_df == cnt]
# if there aren't any feeds, the first feed will work...
if len(selected_feeds) == 0:
pre_final = adf['feed001'] # if they are all empty
# they should all be
# equally empty
else:
#if there's one or more, take the highest priority one
pre_final = adf[selected_feeds.index[0]]
# create the final, applying the override and failsafe logic...
final_df = pd.concat([adf.override_feed000,
pre_final,
adf.failsafe_feed999], axis=1)
final_df = final_df.apply(_row_wise_priority, axis=1)
return final_df
@staticmethod
def most_recent(adf):
"""
Looks at each column, and chooses the feed with the most recent data
point. Honours the Trump override/failsafe logic. """
# just look at the feeds, ignore overrides and failsafes:
feeds_only = adf[adf.columns[1:-1]]
# find the feeds with the most recent data...
feeds_with_data = feeds_only.dropna(how='all')
selected_feeds = feeds_with_data.T.dropna().index
# if there aren't any feeds, the first feed will work...
if len(selected_feeds) == 0:
pre_final = adf['feed001'] # if there all empyty
# they should all be
# equally empty
else:
#if there's one or more, take the highest priority one
pre_final = adf[selected_feeds[0]]
# create the final, applying the override and failsafe logic...
final_df = pd.concat([adf.override_feed000,
pre_final,
adf.failsafe_feed999], axis=1)
final_df = final_df.apply(_row_wise_priority, axis=1)
return final_df
@staticmethod
def build_tri(adf):
"""
Looks at each column, and chooses the feed with the most recent data
point. Honours the Trump override/failsafe logic. """
# just look at the capital (price), in "feed one", and income (dividend), in "feed two"
cap, inc = adf.columns[1:3]
data = adf[[cap,inc]]
# find the feeds with the most recent data...
inc_pct = data[inc].div(data[cap].shift(1))
cap_pct = data[cap].pct_change(1)
pre_final = inc_pct + cap_pct
# create the final, applying the override and failsafe logic...
final_df = pd.concat([adf.override_feed000,
pre_final,
adf.failsafe_feed999], axis=1)
final_df = final_df.apply(_row_wise_priority, axis=1)
return final_df
@staticmethod
def custom(adf):
"""
A custom Choose-Column Aggregator can be defined,
as any function which accepts a dataframe, and returns
any Series-like object, which will get
assigned to the Dataframe's 'final' column.
"""
return [0] * len(adf)
class FeedAggregator(ApplyRow, ChooseCol):
def __init__(self,method):
try:
self.meth = getattr(self, method)
except:
raise "{} is not an arggregator method".format(method)
self.methname = method
def aggregate(self,df):
if self.methname in ApplyRow.__dict__:
return df.apply(self.meth, axis=1)
elif self.methname in ChooseCol.__dict__:
return self.meth(df)
else:
NotImplemented("This code path could be an ugly implementation, " + \
"of a default?")
if __name__ == '__main__':
def make_fake_feed_data(l=10):
dr = pd.date_range(start='2015-01-10', periods=l, freq='D')
data = pd.np.random.rand(l)
return pd.Series(data,dr)
ors = make_fake_feed_data(1).shift(1,freq='D')
s1 = make_fake_feed_data(10)
s2 = make_fake_feed_data(5)
s3 = make_fake_feed_data(7)
fls = make_fake_feed_data(1).shift(8,freq='D')
s1.iloc[6] = pd.np.nan
s1.iloc[8] = pd.np.nan
cols = ['override_'] + [''] * 3
cols = [c + "feed{0:03d}".format(i) for i, c in enumerate(cols)]
cols = cols + ['failsafe_feed999']
df = pd.concat([ors, s1, s2, s3, fls], axis=1)
df.columns = cols
df['final'] = FeedAggregator('most_populated').aggregate(df)
print df
#assert df['final'].iloc[1] == df['override_feed000'].iloc[1]
#assert df['final'].iloc[-1] == df['feed001'].iloc[-1]
#assert df['final'].iloc[-2] == df['failsafe_feed999'].iloc[-2]
#assert df['final'].iloc[-4] == df['feed003'].iloc[-4]
|
Equitable/trump
|
trump/aggregation/symbol_aggs.py
|
Python
|
bsd-3-clause
| 8,856
|
[
"ADF"
] |
6bf50f47bcc3cd58ef416d43ccaded47ba02a73287f6e2f78a636a180bf2c235
|
"""
API operations related to tagging items.
"""
import logging
from galaxy import web
from galaxy.web.base.controller import BaseAPIController, UsesTagsMixin
from paste.httpexceptions import HTTPBadRequest
log = logging.getLogger( __name__ )
class BaseItemTagsController( BaseAPIController, UsesTagsMixin ):
"""
"""
@web.expose_api
def index( self, trans, **kwd ):
"""
"""
tags = self._get_user_tags(trans, self.tagged_item_class, kwd[self.tagged_item_id])
return [ self._api_value( tag, trans, view='collection' ) for tag in tags ]
@web.expose_api
def show( self, trans, tag_name, **kwd ):
"""
"""
tag = self._get_item_tag_assoc( trans, self.tagged_item_class, kwd[self.tagged_item_id], tag_name )
if not tag:
raise HTTPBadRequest("Failed to retrieve specified tag.")
return self._api_value( tag, trans )
@web.expose_api
def create( self, trans, tag_name, payload=None, **kwd ):
"""
"""
payload = payload or {}
value = payload.get("value", None)
tag = self._apply_item_tag( trans, self.tagged_item_class, kwd[self.tagged_item_id], tag_name, value )
return self._api_value( tag, trans )
# Not handling these differently at this time
update = create
@web.expose_api
def delete( self, trans, tag_name, **kwd ):
"""
"""
deleted = self._remove_items_tag( trans, self.tagged_item_class, kwd[self.tagged_item_id], tag_name )
if not deleted:
raise HTTPBadRequest("Failed to delete specified tag.")
return 'OK'
def _api_value( self, tag, trans, view='element' ):
return tag.to_dict( view=view, value_mapper={ 'id': trans.security.encode_id } )
class HistoryContentTagsController( BaseItemTagsController ):
controller_name = "history_content_tags"
tagged_item_class = "HistoryDatasetAssociation"
tagged_item_id = "history_content_id"
class HistoryTagsController( BaseItemTagsController ):
controller_name = "history_tags"
tagged_item_class = "History"
tagged_item_id = "history_id"
class WorkflowTagsController( BaseItemTagsController ):
controller_name = "workflow_tags"
tagged_item_class = "StoredWorkflow"
tagged_item_id = "workflow_id"
# TODO: Visualization and Pages once APIs for those are available
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/webapps/galaxy/api/item_tags.py
|
Python
|
gpl-3.0
| 2,393
|
[
"Galaxy"
] |
1bf4e5cacab0b78be175a8db064d9627cfacbc674094ac1d4611778d8154f1ad
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import espressomd
import numpy as np
from espressomd.accumulators import Correlator
from espressomd.observables import ParticleVelocities, ParticleBodyAngularVelocities
from thermostats_common import ThermostatsCommon
class LangevinThermostat(ut.TestCase, ThermostatsCommon):
"""Tests velocity distributions and diffusion for Langevin Dynamics"""
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.cell_system.set_domain_decomposition(use_verlet_lists=True)
system.cell_system.skin = 0
system.periodicity = [0, 0, 0]
def setUp(self):
np.random.seed(42)
def tearDown(self):
self.system.time_step = 1e-12
self.system.cell_system.skin = 0.0
self.system.part.clear()
self.system.auto_update_accumulators.clear()
self.system.thermostat.turn_off()
self.system.integrator.set_vv()
def check_vel_dist_global_temp(self, recalc_forces, loops):
"""Test velocity distribution for global Langevin parameters.
Parameters
----------
recalc_forces : :obj:`bool`
True if the forces should be recalculated after every step.
loops : :obj:`int`
Number of sampling loops
"""
N = 200
system = self.system
system.time_step = 0.06
kT = 1.1
gamma = 3.5
system.thermostat.set_langevin(kT=kT, gamma=gamma, seed=41)
v_minmax = 5
bins = 4
error_tol = 0.016
self.check_global(
N, kT, loops, v_minmax, bins, error_tol, recalc_forces)
def test_vel_dist_global_temp(self):
"""Test velocity distribution for global Langevin parameters."""
self.check_vel_dist_global_temp(False, loops=150)
def test_vel_dist_global_temp_initial_forces(self):
"""Test velocity distribution for global Langevin parameters,
when using the initial force calculation.
"""
self.check_vel_dist_global_temp(True, loops=170)
@utx.skipIfMissingFeatures("THERMOSTAT_PER_PARTICLE")
def test_vel_dist_per_particle(self):
"""Test Langevin dynamics with particle-specific kT and gamma. Covers
all combinations of particle-specific gamma and temp set or not set.
"""
N = 400
system = self.system
system.time_step = 0.06
kT = 0.9
gamma = 3.2
gamma2 = 4.3
system.thermostat.set_langevin(kT=kT, gamma=gamma, seed=41)
loops = 300
v_minmax = 5
bins = 4
error_tol = 0.016
self.check_per_particle(
N, kT, gamma2, loops, v_minmax, bins, error_tol)
def setup_diff_mass_rinertia(self, p):
if espressomd.has_features("MASS"):
p.mass = 0.5
if espressomd.has_features("ROTATION"):
p.rotation = [1, 1, 1]
# Make sure rinertia does not change diff coeff
if espressomd.has_features("ROTATIONAL_INERTIA"):
p.rinertia = [0.4, 0.4, 0.4]
def verify_diffusion(self, p, corr, kT, gamma):
"""Verify diffusion coeff.
p: particle, corr: dict containing correlator with particle as key,
kT=kT, gamma=gamma as 3 component vector.
"""
c = corr
# Integral of vacf via Green-Kubo D = int_0^infty <v(t_0)v(t_0+t)> dt
# (or 1/3, since we work componentwise)
acf = c.result()
tau = c.lag_times()
# Integrate with trapezoidal rule
for i in range(3):
I = np.trapz(acf[:, p.id, i], tau)
ratio = I / (kT / gamma[i])
self.assertAlmostEqual(ratio, 1., delta=0.07)
def test_06__diffusion(self):
"""This tests rotational and translational diffusion coeff via Green-Kubo"""
system = self.system
kT = 1.37
dt = 0.1
system.time_step = dt
# Translational gamma. We cannot test per-component, if rotation is on,
# because body and space frames become different.
gamma = 3.1
# Rotational gamma
gamma_rot_i = 4.7
gamma_rot_a = [4.2, 1, 1.2]
# If we have langevin per particle:
# Translation
per_part_gamma = 1.63
# Rotational
per_part_gamma_rot_i = 2.6
per_part_gamma_rot_a = [2.4, 3.8, 1.1]
# Particle with global thermostat params
p_global = system.part.add(pos=(0, 0, 0))
# Make sure, mass doesn't change diff coeff
self.setup_diff_mass_rinertia(p_global)
# particle specific gamma, kT, and both
if espressomd.has_features("THERMOSTAT_PER_PARTICLE"):
p_gamma = system.part.add(pos=(0, 0, 0))
self.setup_diff_mass_rinertia(p_gamma)
if espressomd.has_features("PARTICLE_ANISOTROPY"):
p_gamma.gamma = per_part_gamma, per_part_gamma, per_part_gamma
if espressomd.has_features("ROTATION"):
p_gamma.gamma_rot = per_part_gamma_rot_a
else:
p_gamma.gamma = per_part_gamma
if espressomd.has_features("ROTATION"):
p_gamma.gamma_rot = per_part_gamma_rot_i
# Thermostat setup
if espressomd.has_features("ROTATION"):
if espressomd.has_features("PARTICLE_ANISOTROPY"):
# particle anisotropy and rotation
system.thermostat.set_langevin(
kT=kT, gamma=gamma, gamma_rotation=gamma_rot_a, seed=41)
else:
# Rotation without particle anisotropy
system.thermostat.set_langevin(
kT=kT, gamma=gamma, gamma_rotation=gamma_rot_i, seed=41)
else:
# No rotation
system.thermostat.set_langevin(kT=kT, gamma=gamma, seed=41)
system.cell_system.skin = 0.4
system.integrator.run(100)
# Correlators
vel_obs = {}
omega_obs = {}
corr_vel = {}
corr_omega = {}
all_particles = [p_global]
if espressomd.has_features("THERMOSTAT_PER_PARTICLE"):
all_particles.append(p_gamma)
# linear vel
vel_obs = ParticleVelocities(ids=system.part[:].id)
corr_vel = Correlator(
obs1=vel_obs, tau_lin=10, tau_max=1.4, delta_N=2,
corr_operation="componentwise_product", compress1="discard1")
system.auto_update_accumulators.add(corr_vel)
# angular vel
if espressomd.has_features("ROTATION"):
omega_obs = ParticleBodyAngularVelocities(ids=system.part[:].id)
corr_omega = Correlator(
obs1=omega_obs, tau_lin=10, tau_max=1.5, delta_N=2,
corr_operation="componentwise_product", compress1="discard1")
system.auto_update_accumulators.add(corr_omega)
system.integrator.run(80000)
system.auto_update_accumulators.remove(corr_vel)
corr_vel.finalize()
if espressomd.has_features("ROTATION"):
system.auto_update_accumulators.remove(corr_omega)
corr_omega.finalize()
# Verify diffusion
# Translation
# Cast gammas to vector, to make checks independent of
# PARTICLE_ANISOTROPY
gamma = np.ones(3) * gamma
per_part_gamma = np.ones(3) * per_part_gamma
self.verify_diffusion(p_global, corr_vel, kT, gamma)
if espressomd.has_features("THERMOSTAT_PER_PARTICLE"):
self.verify_diffusion(p_gamma, corr_vel, kT, per_part_gamma)
# Rotation
if espressomd.has_features("ROTATION"):
# Decide on effective gamma rotation, since for rotation it is
# direction dependent
eff_gamma_rot = None
if espressomd.has_features("PARTICLE_ANISOTROPY"):
eff_gamma_rot = gamma_rot_a
eff_per_part_gamma_rot = per_part_gamma_rot_a
else:
eff_gamma_rot = gamma_rot_i * np.ones(3)
eff_per_part_gamma_rot = per_part_gamma_rot_i * np.ones(3)
self.verify_diffusion(p_global, corr_omega, kT, eff_gamma_rot)
if espressomd.has_features("THERMOSTAT_PER_PARTICLE"):
self.verify_diffusion(
p_gamma, corr_omega, kT, eff_per_part_gamma_rot)
def test_08__noise_correlation(self):
"""Checks that the Langevin noise is uncorrelated"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.1
kT = 3.2
system.thermostat.set_langevin(kT=kT, gamma=5.1, seed=17)
system.part.add(id=(1, 2), pos=np.zeros((2, 3)))
steps = int(2E5)
error_delta = 0.04
self.check_noise_correlation(kT, steps, error_delta)
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/python/langevin_thermostat_stats.py
|
Python
|
gpl-3.0
| 9,562
|
[
"ESPResSo"
] |
aa571051f9b36cb18477a219e099b59d96fbec66feaddfd304652ec1743b9f60
|
#!/usr/bin/env python
""" move base utils
modified from movebasesquare
ren ye 2016-10-19
"""
import rospy
import actionlib
from actionlib_msgs.msg import *
from geometry_msgs.msg import Pose, Point, Quaternion, Twist, Vector3
from sensor_msgs.msg import NavSatFix
from nav_msgs.msg import Odometry
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import tf
from geographiclib.geodesic import Geodesic
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from visualization_msgs.msg import Marker
from math import radians, pi, sin, cos, sqrt
class MoveBaseUtil():
x0, y0, yaw0 = 0, 0, 0
lat, lon = 0, 0
cancel_id = ""
def __init__(self, nodename="nav_test", is_newnode=True):
if is_newnode:
rospy.init_node(nodename, anonymous=False)
rate = rospy.Rate(10)
else:
rate = None
self.base_frame = rospy.get_param("~base_frame", "base_link")
self.fixed_frame = rospy.get_param("~fixed_frame", "map")
# tf_listener
self.tf_listener = tf.TransformListener()
#rospy.on_shutdown(self.shutdown)
# * get parameters
# * Create a list to hold the target quaternions (orientations)
# * Create a list to hold the waypoint poses
# * create angles
# * convert the angles to quaternions
# * Append each of the four waypoints to the list. Each waypoint
# is a pose consisting of a position and orientation in the map frame.
# Initialize the visualization markers for RViz
self.init_markers()
self.odom_received = False
# rospy.wait_for_message("/odom", Odometry)
# rospy.Subscriber("/odom", Odometry, self.odom_callback, queue_size=50)
rospy.wait_for_message("/odometry/filtered/global", Odometry)
rospy.Subscriber("/odometry/filtered/global", Odometry, self.odom_callback, queue_size=50)
while not self.odom_received:
rospy.sleep(1)
rospy.Subscriber("move_base/cancel", GoalID, self.cancel_callback, queue_size=5)
# * Set a visualization marker at each waypoint
# * Publisher to manually control the robot (e.g. to stop it, queue_size=5)
# self.cmd_vel_pub = rospy.Publisher('move_base_cmd_vel', Twist, queue_size=5)
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=5)
# * Subscribe to the move_base action server
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
# * Wait 60 seconds for the action server to become available
rospy.loginfo("Waiting for move_base action server...")
self.move_base.wait_for_server(rospy.Duration(60))
rospy.loginfo("Connected to move base server")
rospy.loginfo("Starting navigation test")
# * Cycle through the four waypoints
def get_tf(self, fixed_frame, base_frame):
""" transform from base_link to map """
trans_received = False
while not trans_received:
try:
(trans, rot) = self.tf_listener.lookupTransform(fixed_frame,
base_frame,
rospy.Time(0))
trans_received = True
return (Point(*trans), Quaternion(*rot))
except (tf.LookupException,
tf.ConnectivityException,
tf.ExtrapolationException):
pass
def transform_tf(self, x_target_base, y_target_base, yaw_target_base, fixed_frame, base_frame):
""" get the (x, y) wrt fixed frame from (x, y) wrt base frame"""
# (x, y, yaw) of the base frame wrt fixed frame
(trans, rot) = self.get_tf(fixed_frame, base_frame)
x_base_fixed, y_base_fixed = trans.x, trans.y
_, _, yaw_base_fixed = tf.transformations.euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))
# get the point wrt fixed
# final vector = fixed vector + rot_mat * base vector
x_target_fixed, y_target_fixed = x_base_fixed + \
cos(yaw_base_fixed) * x_target_base - sin(yaw_base_fixed) * y_target_base, \
y_base_fixed + \
sin(yaw_base_fixed) * x_target_base + cos(yaw_base_fixed) * y_target_base
yaw_target_fixed = yaw_target_base + rot
return [x_target_fixed, y_target_fixed, yaw_target_fixed]
def convert_gps_to_absolute(self, lat, lon):
""" get current gps point of the boat,
calculate the distance and heading to the target point
remap to map frame """
# calculate distance and azimuth (angle between distance and north)
result = Geodesic.WGS84.Inverse(self.lat, self.lon, lat, lon)
r = result['s12']
azi = result['azi1'] * pi / 180.0
theta = pi / 2 - azi # wrt map's x axis
# print "r and theta", r, theta
center = [self.x0 + r * cos(theta), self.y0 + r * sin(theta), 0]
heading = theta
return [center, heading]
def navsat_fix_callback(self, msg):
""" callback navsat """
self.lat = msg.latitude
self.lon = msg.longitude
self.fix_received = True
def odom_callback(self, msg):
trans, rot = self.get_tf("map", "base_link")
self.x0 = trans.x
self.y0 = trans.y
_, _, self.yaw0 = euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))
self.odom_received=True
# """ call back to subscribe, get odometry data:
# pose and orientation of the current boat,
# suffix 0 is for origin """
# x0 = msg.pose.pose.position.x
# y0 = msg.pose.pose.position.y
# # self.x0 = msg.pose.pose.position.x
# # self.y0 = msg.pose.pose.position.y
# x = msg.pose.pose.orientation.x
# y = msg.pose.pose.orientation.y
# z = msg.pose.pose.orientation.z
# w = msg.pose.pose.orientation.w
# # _, _, self.yaw0 = euler_from_quaternion((x, y, z, w))
# _, _, yaw0 = euler_from_quaternion((x, y, z, w))
# # get odom to map transform
# self.x0, self.y0, self.yaw0 = self.transform_tf(x0, y0, yaw0, "map", "odom")
# rospy.loginfo([self.x0, self.y0, self.yaw0])
def cancel_callback(self, msg):
self.cancel_id = msg.id
print self.cancel_id
# rospy.loginfo(self.cancel_id)
def convert_relative_to_absolute(self, coordinate):
""" boat's tf is base_link
target is polar (r, theta) wrt base_link
need to spawn waypoint (x1, y1) at map
1. calculate target (xtb, ytb) wrt base_link by trignometry
2. tf transfrom from base_link to map
3. calculate target (x1, y1) wrt map by vector calculus:
(x1, y1) = (xb, yb) + rot_mat*(xtb, ytb)
where rot_mat = [cos theta, -sin theta; sin theta, cos theta]
"""
# wrt base_link
# theta is the angle between base_link's x axis and r
r, theta = coordinate
x_target_base, y_target_base = r * cos(theta), r * sin(theta)
x_target_rot, y_target_rot = \
cos(self.yaw0) * x_target_base - sin(self.yaw0) * y_target_base, \
sin(self.yaw0) * x_target_base + cos(self.yaw0) * y_target_base
heading = theta + self.yaw0
center = [self.x0 + x_target_rot, self.y0 + y_target_rot, 0]
return [center, heading]
def move(self, goal, mode, mode_param):
""" mode1: continuous movement function, mode_param is the distance from goal that will set the next goal
mode2: stop and rotate mode, mode_param is rotational angle in rad
mode3: normal stop in each waypoint mode, mode_param is unused """
# Send the goal pose to the MoveBaseAction server
self.move_base.send_goal(goal)
finished_within_time = True
go_to_next = False
if mode == 1: # continuous movement function, mode_param is the distance from goal that will set the next goal
# (trans, _) = self.get_tf()
while sqrt((self.x0 - goal.target_pose.pose.position.x) ** 2 +
(self.y0 - goal.target_pose.pose.position.y) ** 2) > mode_param:
rospy.sleep(rospy.Duration(1))
# (trans, _) = self.get_tf()
go_to_next = True
elif mode == 2: # stop and rotate mode, mode_param is rotational angle in rad
finished_within_time = self.move_base.wait_for_result(rospy.Duration(40 * 1))
self.rotation(mode_param)
self.rotation(-2 * mode_param)
self.rotation(mode_param)
else: # normal stop in each waypoint mode, mode_param is unused
finished_within_time = self.move_base.wait_for_result(rospy.Duration(60 * 1))
# If we don't get there in time, abort the goal
if not finished_within_time or go_to_next:
self.move_base.cancel_goal()
rospy.loginfo("Goal cancelled, next...")
else:
# We made it!
state = self.move_base.get_state()
if state == GoalStatus.SUCCEEDED:
rospy.loginfo("Goal succeeded!")
def rotation(self, ang):
rate = rospy.Rate(10)
an_vel = 0.1
duration = ang / an_vel
msg = Twist(Vector3(0.0, 0.0, 0.0), Vector3(0.0, 0.0, an_vel))
rate.sleep()
start_time = rospy.get_time()
while not rospy.is_shutdown():
current_time = rospy.get_time()
if (current_time - start_time) > duration:
self.cmd_vel_pub.publish(Twist(Vector3(0, 0.0, 0.0), Vector3(0.0, 0.0, -2 * an_vel)))
self.cmd_vel_pub.publish(Twist())
break
else:
self.cmd_vel_pub.publish(msg)
rate.sleep()
def reverse_tf(self, distance=5, speed=-1):
""" reverse to certain distance """
rate = rospy.Rate(10)
linear_speed = speed
if linear_speed > 0:
linear_speed = -1 * linear_speed
move_cmd = Twist()
# Set the movement command to forward motion
move_cmd.linear.x = linear_speed
# Get the starting position values
# (position, rotation) = self.get_tf()
# x_start = position.x
# y_start = position.y
rate.sleep()
x_start, y_start = self.x0, self.y0
print x_start, y_start
# Keep track of the distance traveled
d = 0
# Enter the loop to move along a side
while d < distance and not rospy.is_shutdown():
# Publish the Twist message and sleep 1 cycle
self.cmd_vel_pub.publish(move_cmd)
rate.sleep()
# Get the current position
# (position, rotation) = self.get_tf()
# Compute the Euclidean distance from the start
d = sqrt(pow((self.x0 - x_start), 2) +
pow((self.y0 - y_start), 2))
# print d
# Stop the robot before the rotation
move_cmd = Twist()
self.cmd_vel_pub.publish(move_cmd)
rospy.sleep(1)
def reverse_time(self, duration=5, speed=-1):
""" full reverse with a duration """
rate = rospy.Rate(10)
msg = Twist(Vector3(speed, 0.0, 0.0), Vector3(0.0, 0.0, 0.0))
rate.sleep()
start_time = rospy.get_time()
while not rospy.is_shutdown():
current_time = rospy.get_time()
if (current_time - start_time) > duration:
self.cmd_vel_pub.publish(Twist())
break
else:
self.cmd_vel_pub.publish(msg)
rate.sleep()
def init_markers(self):
# Set up our waypoint markers
marker_scale = 0.2
marker_lifetime = 0 # 0 is forever
marker_ns = 'waypoints'
marker_id = 0
marker_color = {'r': 1.0, 'g': 0.7, 'b': 1.0, 'a': 1.0}
# Define a marker publisher.
self.marker_pub = rospy.Publisher('waypoint_markers', Marker, queue_size=5)
# Initialize the marker points list.
self.markers = Marker()
self.markers.ns = marker_ns
self.markers.id = marker_id
# self.markers.type = Marker.ARROW
self.markers.type = Marker.CUBE_LIST
self.markers.action = Marker.ADD
self.markers.lifetime = rospy.Duration(marker_lifetime)
self.markers.scale.x = marker_scale
self.markers.scale.y = marker_scale
self.markers.scale.z = marker_scale
self.markers.color.r = marker_color['r']
self.markers.color.g = marker_color['g']
self.markers.color.b = marker_color['b']
self.markers.color.a = marker_color['a']
self.markers.header.frame_id = 'odom'
self.markers.header.stamp = rospy.Time.now()
self.markers.points = list()
def shutdown(self):
rospy.loginfo("Stopping the robot...")
# Cancel any active goals
self.move_base.cancel_goal()
rospy.sleep(2)
# Stop the robot
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)
if __name__ == "__main__":
util = MoveBaseUtil()
util.reverse_tf()
|
phamngtuananh/Singaboat_RobotX2016
|
robotx_nav/nodes/move_base_util.py
|
Python
|
gpl-3.0
| 13,294
|
[
"xTB"
] |
1c788c88cff7eaea4fde4d0298c272d1c5515ae3d2db92b18d35b18011ddafe4
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: mimetypes.py
"""Guess the MIME type of a file.
This module defines two useful functions:
guess_type(url, strict=1) -- guess the MIME type and encoding of a URL.
guess_extension(type, strict=1) -- guess the extension for a given MIME type.
It also contains the following, for tuning the behavior:
Data:
knownfiles -- list of files to parse
inited -- flag set when init() has been called
suffix_map -- dictionary mapping suffixes to suffixes
encodings_map -- dictionary mapping suffixes to encodings
types_map -- dictionary mapping suffixes to types
Functions:
init([files]) -- parse a list of files, default knownfiles (on Windows, the
default values are taken from the registry)
read_mime_types(file) -- parse one file, return a dictionary or None
"""
import os
import sys
import posixpath
import urllib
try:
import _winreg
except ImportError:
_winreg = None
__all__ = [
'guess_type', 'guess_extension', 'guess_all_extensions',
'add_type', 'read_mime_types', 'init']
knownfiles = [
'/etc/mime.types',
'/etc/httpd/mime.types',
'/etc/httpd/conf/mime.types',
'/etc/apache/mime.types',
'/etc/apache2/mime.types',
'/usr/local/etc/httpd/conf/mime.types',
'/usr/local/lib/netscape/mime.types',
'/usr/local/etc/httpd/conf/mime.types',
'/usr/local/etc/mime.types']
inited = False
_db = None
class MimeTypes:
"""MIME-types datastore.
This datastore can handle information from mime.types-style files
and supports basic determination of MIME type from a filename or
URL, and can guess a reasonable extension given a MIME type.
"""
def __init__(self, filenames=(), strict=True):
global inited
if not inited:
init()
self.encodings_map = encodings_map.copy()
self.suffix_map = suffix_map.copy()
self.types_map = ({}, {})
self.types_map_inv = ({}, {})
for ext, type in types_map.items():
self.add_type(type, ext, True)
for ext, type in common_types.items():
self.add_type(type, ext, False)
for name in filenames:
self.read(name, strict)
def add_type(self, type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
self.types_map[strict][ext] = type
exts = self.types_map_inv[strict].setdefault(type, [])
if ext not in exts:
exts.append(ext)
def guess_type(self, url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if
the type can't be guessed (no or unknown suffix) or a string
of the form type/subtype, usable for a MIME Content-type
header; and encoding is None for no encoding or the name of
the program used to encode (e.g. compress or gzip). The
mappings are table driven. Encoding suffixes are case
sensitive; type suffixes are first tried case sensitive, then
case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all
mapped to '.tar.gz'. (This is table-driven too, using the
dictionary suffix_map.)
Optional `strict' argument when False adds a bunch of commonly found,
but non-standard types.
"""
scheme, url = urllib.splittype(url)
if scheme == 'data':
comma = url.find(',')
if comma < 0:
return (None, None)
semi = url.find(';', 0, comma)
if semi >= 0:
type = url[:semi]
else:
type = url[:comma]
if '=' in type or '/' not in type:
type = 'text/plain'
return (type, None)
else:
base, ext = posixpath.splitext(url)
while ext in self.suffix_map:
base, ext = posixpath.splitext(base + self.suffix_map[ext])
if ext in self.encodings_map:
encoding = self.encodings_map[ext]
base, ext = posixpath.splitext(base)
else:
encoding = None
types_map = self.types_map[True]
if ext in types_map:
return (types_map[ext], encoding)
if ext.lower() in types_map:
return (types_map[ext.lower()], encoding)
if strict:
return (None, encoding)
types_map = self.types_map[False]
if ext in types_map:
return (types_map[ext], encoding)
if ext.lower() in types_map:
return (types_map[ext.lower()], encoding)
return (
None, encoding)
return
def guess_all_extensions(self, type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data stream,
but would be mapped to the MIME type `type' by guess_type().
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
type = type.lower()
extensions = self.types_map_inv[True].get(type, [])
if not strict:
for ext in self.types_map_inv[False].get(type, []):
if ext not in extensions:
extensions.append(ext)
return extensions
def guess_extension(self, type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension,
including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
extensions = self.guess_all_extensions(type, strict)
if not extensions:
return None
else:
return extensions[0]
def read(self, filename, strict=True):
"""
Read a single mime.types-format file, specified by pathname.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
with open(filename) as fp:
self.readfp(fp, strict)
def readfp(self, fp, strict=True):
"""
Read a single mime.types-format file.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
while 1:
line = fp.readline()
if not line:
break
words = line.split()
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words:
continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
self.add_type(type, '.' + suff, strict)
def read_windows_registry(self, strict=True):
"""
Load the MIME types database from Windows registry.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
if not _winreg:
return
def enum_types(mimedb):
i = 0
while True:
try:
ctype = _winreg.EnumKey(mimedb, i)
except EnvironmentError:
break
try:
ctype = ctype.encode(default_encoding)
except UnicodeEncodeError:
pass
else:
yield ctype
i += 1
default_encoding = sys.getdefaultencoding()
with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, 'MIME\\Database\\Content Type') as mimedb:
for ctype in enum_types(mimedb):
try:
with _winreg.OpenKey(mimedb, ctype) as key:
suffix, datatype = _winreg.QueryValueEx(key, 'Extension')
except EnvironmentError:
continue
if datatype != _winreg.REG_SZ:
continue
try:
suffix = suffix.encode(default_encoding)
except UnicodeEncodeError:
continue
self.add_type(ctype, suffix, strict)
def guess_type(url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if the
type can't be guessed (no or unknown suffix) or a string of the
form type/subtype, usable for a MIME Content-type header; and
encoding is None for no encoding or the name of the program used
to encode (e.g. compress or gzip). The mappings are table
driven. Encoding suffixes are case sensitive; type suffixes are
first tried case sensitive, then case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
to ".tar.gz". (This is table-driven too, using the dictionary
suffix_map).
Optional `strict' argument when false adds a bunch of commonly found, but
non-standard types.
"""
global _db
if _db is None:
init()
return _db.guess_type(url, strict)
def guess_all_extensions(type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_all_extensions(type, strict)
def guess_extension(type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension, including the
leading dot ('.'). The extension is not guaranteed to have been
associated with any particular data stream, but would be mapped to the
MIME type `type' by guess_type(). If no extension can be guessed for
`type', None is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_extension(type, strict)
def add_type(type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
if _db is None:
init()
return _db.add_type(type, ext, strict)
def init(files=None):
global encodings_map
global suffix_map
global types_map
global _db
global inited
global common_types
inited = True
db = MimeTypes()
if files is None:
if _winreg:
db.read_windows_registry()
files = knownfiles
for file in files:
if os.path.isfile(file):
db.read(file)
encodings_map = db.encodings_map
suffix_map = db.suffix_map
types_map = db.types_map[True]
common_types = db.types_map[False]
_db = db
return
def read_mime_types(file):
try:
f = open(file)
except IOError:
return None
db = MimeTypes()
db.readfp(f, True)
return db.types_map[True]
def _default_mime_types():
global types_map
global encodings_map
global common_types
global suffix_map
suffix_map = {'.tgz': '.tar.gz',
'.taz': '.tar.gz',
'.tz': '.tar.gz',
'.tbz2': '.tar.bz2'
}
encodings_map = {'.gz': 'gzip',
'.Z': 'compress',
'.bz2': 'bzip2'
}
types_map = {'.a': 'application/octet-stream',
'.ai': 'application/postscript',
'.aif': 'audio/x-aiff',
'.aifc': 'audio/x-aiff',
'.aiff': 'audio/x-aiff',
'.au': 'audio/basic',
'.avi': 'video/x-msvideo',
'.bat': 'text/plain',
'.bcpio': 'application/x-bcpio',
'.bin': 'application/octet-stream',
'.bmp': 'image/x-ms-bmp',
'.c': 'text/plain',
'.cdf': 'application/x-cdf',
'.cdf': 'application/x-netcdf',
'.cpio': 'application/x-cpio',
'.csh': 'application/x-csh',
'.css': 'text/css',
'.dll': 'application/octet-stream',
'.doc': 'application/msword',
'.dot': 'application/msword',
'.dvi': 'application/x-dvi',
'.eml': 'message/rfc822',
'.eps': 'application/postscript',
'.etx': 'text/x-setext',
'.exe': 'application/octet-stream',
'.gif': 'image/gif',
'.gtar': 'application/x-gtar',
'.h': 'text/plain',
'.hdf': 'application/x-hdf',
'.htm': 'text/html',
'.html': 'text/html',
'.ief': 'image/ief',
'.jpe': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.jpg': 'image/jpeg',
'.js': 'application/x-javascript',
'.ksh': 'text/plain',
'.latex': 'application/x-latex',
'.m1v': 'video/mpeg',
'.man': 'application/x-troff-man',
'.me': 'application/x-troff-me',
'.mht': 'message/rfc822',
'.mhtml': 'message/rfc822',
'.mif': 'application/x-mif',
'.mov': 'video/quicktime',
'.movie': 'video/x-sgi-movie',
'.mp2': 'audio/mpeg',
'.mp3': 'audio/mpeg',
'.mp4': 'video/mp4',
'.mpa': 'video/mpeg',
'.mpe': 'video/mpeg',
'.mpeg': 'video/mpeg',
'.mpg': 'video/mpeg',
'.ms': 'application/x-troff-ms',
'.nc': 'application/x-netcdf',
'.nws': 'message/rfc822',
'.o': 'application/octet-stream',
'.obj': 'application/octet-stream',
'.oda': 'application/oda',
'.p12': 'application/x-pkcs12',
'.p7c': 'application/pkcs7-mime',
'.pbm': 'image/x-portable-bitmap',
'.pdf': 'application/pdf',
'.pfx': 'application/x-pkcs12',
'.pgm': 'image/x-portable-graymap',
'.pl': 'text/plain',
'.png': 'image/png',
'.pnm': 'image/x-portable-anymap',
'.pot': 'application/vnd.ms-powerpoint',
'.ppa': 'application/vnd.ms-powerpoint',
'.ppm': 'image/x-portable-pixmap',
'.pps': 'application/vnd.ms-powerpoint',
'.ppt': 'application/vnd.ms-powerpoint',
'.ps': 'application/postscript',
'.pwz': 'application/vnd.ms-powerpoint',
'.py': 'text/x-python',
'.pyc': 'application/x-python-code',
'.pyo': 'application/x-python-code',
'.qt': 'video/quicktime',
'.ra': 'audio/x-pn-realaudio',
'.ram': 'application/x-pn-realaudio',
'.ras': 'image/x-cmu-raster',
'.rdf': 'application/xml',
'.rgb': 'image/x-rgb',
'.roff': 'application/x-troff',
'.rtx': 'text/richtext',
'.sgm': 'text/x-sgml',
'.sgml': 'text/x-sgml',
'.sh': 'application/x-sh',
'.shar': 'application/x-shar',
'.snd': 'audio/basic',
'.so': 'application/octet-stream',
'.src': 'application/x-wais-source',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc': 'application/x-sv4crc',
'.swf': 'application/x-shockwave-flash',
'.t': 'application/x-troff',
'.tar': 'application/x-tar',
'.tcl': 'application/x-tcl',
'.tex': 'application/x-tex',
'.texi': 'application/x-texinfo',
'.texinfo': 'application/x-texinfo',
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.tr': 'application/x-troff',
'.tsv': 'text/tab-separated-values',
'.txt': 'text/plain',
'.ustar': 'application/x-ustar',
'.vcf': 'text/x-vcard',
'.wav': 'audio/x-wav',
'.wiz': 'application/msword',
'.wsdl': 'application/xml',
'.xbm': 'image/x-xbitmap',
'.xlb': 'application/vnd.ms-excel',
'.xls': 'application/excel',
'.xls': 'application/vnd.ms-excel',
'.xml': 'text/xml',
'.xpdl': 'application/xml',
'.xpm': 'image/x-xpixmap',
'.xsl': 'application/xml',
'.xwd': 'image/x-xwindowdump',
'.zip': 'application/zip'
}
common_types = {'.jpg': 'image/jpg',
'.mid': 'audio/midi',
'.midi': 'audio/midi',
'.pct': 'image/pict',
'.pic': 'image/pict',
'.pict': 'image/pict',
'.rtf': 'application/rtf',
'.xul': 'text/xul'
}
_default_mime_types()
if __name__ == '__main__':
import getopt
USAGE = 'Usage: mimetypes.py [options] type\n\nOptions:\n --help / -h -- print this message and exit\n --lenient / -l -- additionally search of some common, but non-standard\n types.\n --extension / -e -- guess extension instead of type\n\nMore than one type argument may be given.\n'
def usage(code, msg=''):
print USAGE
if msg:
print msg
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hle', ['help',
'lenient',
'extension'])
except getopt.error as msg:
usage(1, msg)
strict = 1
extension = 0
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-l', '--lenient'):
strict = 0
elif opt in ('-e', '--extension'):
extension = 1
for gtype in args:
if extension:
guess = guess_extension(gtype, strict)
if not guess:
print "I don't know anything about type",
print gtype
else:
print guess
else:
guess, encoding = guess_type(gtype, strict)
if not guess:
print "I don't know anything about type", gtype
else:
print 'type:',
print guess, 'encoding:', encoding
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/mimetypes.py
|
Python
|
unlicense
| 19,263
|
[
"NetCDF"
] |
88ada33f3f66f146885825b46d871eb22dc9fdc1f00c8da16e7ba0775ea7ffa4
|
'''
Created on 26/04/2011
@author: jose
'''
import unittest
import os.path, subprocess, sys
from tempfile import NamedTemporaryFile
import franklin
from franklin.utils.misc_utils import TEST_DATA_DIR
from franklin.seq.writers import create_temp_seq_file
from franklin.seq.readers import seqs_in_file
from franklin.seq.seqs import SeqWithQuality, Seq
from franklin.utils.test_utils import create_random_seqwithquality
CLEAN_READS = os.path.join(os.path.split(franklin.__path__[0])[0],
'scripts', 'clean_reads')
def _call(cmd):
'It runs the command and it returns stdout, stderr and retcode'
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
retcode = process.returncode
return stdout, stderr, retcode
def _call_python(cmd):
'It runs a python executable'
cmd.insert(0, sys.executable)
return _call(cmd)
class CleanReadsTest(unittest.TestCase):
'It tests the clean_reads script'
def test_script_exists(self):
'clean_reads exists'
if not os.path.exists(CLEAN_READS):
self.fail('clean_reads does not exists')
def test_help(self):
'The help text is generated'
stdout = _call_python([CLEAN_READS])[0]
assert 'SEQ_IN' in stdout
stdout = _call_python([CLEAN_READS , '-h'])[0]
assert 'SEQ_IN' in stdout
stdout = _call_python([CLEAN_READS , '--version'])[0]
assert 'clean_reads' in stdout
def test_error(self):
'It tests that we can capture an unexpected error'
error_fhand = NamedTemporaryFile()
cmd = [CLEAN_READS, 'testerror', '--error_log', error_fhand.name]
stdout, stderr = _call_python(cmd)[:-1]
error_log = open(error_fhand.name).read()
assert 'An unexpected error happened' in stderr
assert 'function calls leading' in error_log
def test_tempdir(self):
'it test that the tmpdir work fine'
seq1 = create_random_seqwithquality(500, qual_range=55)
seq2 = create_random_seqwithquality(50, qual_range=15)
seqs = [seq1 + seq2]
inseq_fhand = create_temp_seq_file(seqs, format='qual')[0]
outseq_fhand = NamedTemporaryFile()
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', 'sanger', '--tmpdir', '.']
retcode = _call_python(cmd)[-1]
assert retcode == 0
dir_without_perm = '/usr'
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', 'sanger', '--tmpdir', dir_without_perm]
stderr, retcode = _call_python(cmd)[1:]
assert retcode == 1
assert "Permission denied: '%s" % dir_without_perm in stderr
dir_without_perm = '/usr/remove_this_dir'
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', 'sanger', '--tmpdir', dir_without_perm]
stderr, retcode = _call_python(cmd)[1:]
assert retcode == 14
assert "Permission denied: '%s" % dir_without_perm in stderr
errolog_path = 'clean_reads.error'
if os.path.exists(errolog_path):
os.remove(errolog_path)
def test_sanger(self):
'It tests the basic sanger cleaning'
seq1 = create_random_seqwithquality(500, qual_range=55)
seq2 = create_random_seqwithquality(50, qual_range=15)
seqs = [seq1 + seq2]
inseq_fhand, inqual_fhand = create_temp_seq_file(seqs, format='qual')
outseq_fhand = NamedTemporaryFile()
outqual_fhand = NamedTemporaryFile()
#platform is required
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name]
stderr = _call_python(cmd)[1]
assert 'required' in stderr
#a correct platform is required
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', 'hola']
stderr = _call_python(cmd)[1]
assert 'choice' in stderr
#disable quality trimming and lucy_splice are incompatible
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', 'sanger', '-x', '--lucy_splice', 'splice.fasta']
stderr = _call_python(cmd)[1]
assert 'incompatible' in stderr
#we can clean a sanger sequence with quality
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-q', inqual_fhand.name,
'-o', outseq_fhand.name, '-u', outqual_fhand.name,
'-p', 'sanger']
retcode = _call_python(cmd)[2]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
qual_fhand=open(outqual_fhand.name)))
assert out_seqs[0].qual[-1] == 55
#disable quality trimming
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-q', inqual_fhand.name,
'-o', outseq_fhand.name, '-u', outqual_fhand.name,
'-p', 'sanger', '-x']
retcode = _call_python(cmd)[2]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
qual_fhand=open(outqual_fhand.name)))
assert seqs[0].seq == out_seqs[0].seq
#we can clean a sanger sequence without quality
seq1 = create_random_seqwithquality(500, qual_range=55)
seqs = [SeqWithQuality(seq1.seq + Seq('NNNNNNNNNNNNNN'), name='Ns')]
inseq_fhand = create_temp_seq_file(seqs, format='fasta')[0]
outseq_fhand = NamedTemporaryFile()
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', 'sanger']
retcode = _call_python(cmd)[2]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name)))
assert not str(out_seqs[0].seq).lower().endswith('nnnnn')
def test_illumina(self):
'It tests the Illumina cleaning'
seq1 = create_random_seqwithquality(50, qual_range=35)
seq2 = create_random_seqwithquality(10, qual_range=15)
seqs = [seq1 + seq2]
inseq_fhand = create_temp_seq_file(seqs, format='fastq')[0]
outseq_fhand = NamedTemporaryFile()
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', 'illumina', '-f', 'fastq']
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
format='fastq'))
assert out_seqs[0].qual[-2] == 35
#disable quality trimming
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', 'illumina', '-f', 'fastq', '-x']
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
format='fastq'))
assert seqs[0].seq == out_seqs[0].seq
#illumina format
inseq_fhand = create_temp_seq_file(seqs, format='fastq-illumina')[0]
outseq_fhand = NamedTemporaryFile()
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', 'illumina', '-f', 'fastq-illumina']
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
format='fastq-illumina'))
assert out_seqs[0].qual[-2] == 35
def test_solid(self):
'It tests the solid cleaning'
#csfasta
cs_seq = '''# Thu Jul 15 13:25:41 2010 /share/apps/corona/bin/filter_
# Cwd: /home/pipeline
# Title: solid0065_20100630_FRAG
>2_21_490_F3
T3.23121101332.0133.2221.23.2.2103.330320302..32320
>2_22_386_F3
T3.00222003211.1011.2122.30.0.3210.013012201..20222
>2_22_431_F3
T0.03020122002.2022.2122.21.2.2122.222102322..12221
>8_25_1748_F3
T0..11031202101103031103110303212300122113032213202
'''
cs_qual = '''# Thu Jul 15 13:25:41 2010 /share/apps/corona/bin/filter_
# Cwd: /home/pipeline
# Title: solid0065_20100630_FRAG
>2_21_490_F3
31 -1 12 24 17 20 29 21 16 18 30 22 24 -1 24 10 26 22 -1 19 26 23 14 -1 27 26 -1 13 -1 20 6 10 11 -1 15 30 19 15 22 4 18 31 4 -1 -1 33 14 9 8 5
>2_22_386_F3
33 -1 23 27 30 24 31 30 32 30 33 14 33 -1 27 18 28 27 -1 31 27 27 30 -1 26 27 -1 17 -1 27 28 26 28 -1 4 17 21 33 14 28 14 17 26 -1 -1 30 30 30 15 7
>2_22_431_F3
29 -1 18 29 4 16 14 19 26 24 16 4 22 -1 21 26 4 30 -1 22 17 6 24 -1 24 32 -1 27 -1 23 17 21 27 -1 5 19 19 6 4 16 6 18 12 -1 -1 14 25 13 12 5
>8_25_1748_F3
31 -1 -1 29 30 30 28 27 28 24 29 24 24 31 20 32 31 18 28 15 32 28 31 29 31 29 32 27 30 29 27 24 31 32 23 27 28 14 30 17 31 20 7 30 29 23 30 8 29 29
'''
cs_seq_fhand = NamedTemporaryFile()
cs_qual_fhand = NamedTemporaryFile()
cs_seq_fhand.write(cs_seq)
cs_qual_fhand.write(cs_qual)
cs_seq_fhand.flush()
cs_qual_fhand.flush()
out_fhand = NamedTemporaryFile()
cmd = [CLEAN_READS, '-i', cs_seq_fhand.name, '-q', cs_qual_fhand.name,
'-o', out_fhand.name, '-p', 'solid', '-f', 'csfasta',
'-g', 'fastq']
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(out_fhand.name),
format='fastq'))
assert not out_seqs
#we allow more than one missing calls
cmd = [CLEAN_READS, '-i', cs_seq_fhand.name, '-q', cs_qual_fhand.name,
'-o', out_fhand.name, '-p', 'solid', '-f', 'csfasta',
'-g', 'fastq', '--solid_allow_missing_call']
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(out_fhand.name),
format='fastq'))
assert out_seqs[0].seq.startswith('..1103120210110')
assert out_seqs[0].qual[2] == 29
#no quality trimming
cmd = [CLEAN_READS, '-i', cs_seq_fhand.name, '-q', cs_qual_fhand.name,
'-o', out_fhand.name, '-p', 'solid', '-f', 'csfasta',
'-g', 'fastq', '--solid_allow_missing_call', '-x']
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(out_fhand.name),
format='fastq'))
assert len(out_seqs) == 4
assert len(out_seqs[0]) == 49
#double encoding
#we allow more than one missing calls
cmd = [CLEAN_READS, '-i', cs_seq_fhand.name, '-q', cs_qual_fhand.name,
'-o', out_fhand.name, '-p', 'solid', '-f', 'csfasta',
'-g', 'fastq', '--solid_allow_missing_call', '--double_encoding']
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(out_fhand.name),
format='fastq'))
assert out_seqs[0].seq.startswith('NNCCATCGAGCACC')
def test_adaptors(self):
'It removes adaptors'
seq1 = create_random_seqwithquality(5, qual_range=35)
adaptor = create_random_seqwithquality(15, qual_range=35)
seq2 = create_random_seqwithquality(50, qual_range=35)
seqs = [seq1 + adaptor + seq2]
inseq_fhand = create_temp_seq_file(seqs, format='fastq')[0]
outseq_fhand = NamedTemporaryFile()
adaptor_fhand = create_temp_seq_file([adaptor], format='fasta')[0]
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', 'illumina', '-f', 'fastq', '-a', adaptor_fhand.name]
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
format='fastq'))
assert seq2.seq == out_seqs[0].seq
seq1 = create_random_seqwithquality(5, qual_range=35)
adaptor = create_random_seqwithquality(15, qual_range=35)
seq2 = create_random_seqwithquality(50, qual_range=35)
seqs = [seq1 + adaptor + seq2]
inseq_fhand = create_temp_seq_file(seqs, format='fastq')[0]
outseq_fhand = NamedTemporaryFile()
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', '454', '-f', 'fastq', '-a']
stdout, stderr, retcode = _call_python(cmd)
print stderr
assert retcode == 0
assert "--adaptors_file: {'454': '" in stdout
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', 'illumina', '-f', 'fastq', '-a']
stdout, stderr, retcode = _call_python(cmd)
assert 'clean_reads does not have default adaptors file' in stderr
assert retcode == 14
def test_vector(self):
'It removes the vector'
seq1 = create_random_seqwithquality(5, qual_range=35)
vector = create_random_seqwithquality(3000, qual_range=35)
seq2 = create_random_seqwithquality(250, qual_range=35)
seqs = [seq1 + vector[30:60] + seq2]
inseq_fhand = create_temp_seq_file(seqs, format='fastq')[0]
outseq_fhand = NamedTemporaryFile()
vector_fhand = create_temp_seq_file([vector], format='fasta')[0]
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', '454', '-f', 'fastq', '-v', vector_fhand.name]
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
format='fastq'))
assert (len(seq2.seq) - len(out_seqs[0].seq)) < 5
def test_vectordb(self):
'It removes the vector from a vector database'
seq1 = create_random_seqwithquality(5, qual_range=35)
vector = 'CACTATCTCCGACGACGGCGATTTCACCGTTGACCTGATTTCCAGTTGCTACGTCAAGTTC'
vector = SeqWithQuality(Seq(vector), name='vect', qual=[30]*len(vector))
seq2 = create_random_seqwithquality(250, qual_range=35)
seqs = [seq1 + vector + seq2]
inseq_fhand = create_temp_seq_file(seqs, format='fastq')[0]
outseq_fhand = NamedTemporaryFile()
vector_db = os.path.join(TEST_DATA_DIR, 'blast', 'arabidopsis_genes+')
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', '454', '-f', 'fastq', '-d', vector_db]
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
format='fastq'))
assert (len(seq2.seq) - len(out_seqs[0].seq)) < 5
seq1 = create_random_seqwithquality(5, qual_range=35)
vector = 'GGTGCCTCCGGCGGGCCACTCAATGCTTGAGTATACTCACTAGACTTTGCTTCGCAAAG'
vector = SeqWithQuality(Seq(vector), name='vect', qual=[30]*len(vector))
seq2 = create_random_seqwithquality(250, qual_range=35)
seqs = [seq1 + vector + seq2]
inseq_fhand = create_temp_seq_file(seqs, format='fastq')[0]
outseq_fhand = NamedTemporaryFile()
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', '454', '-f', 'fastq', '-d']
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
format='fastq'))
assert (len(seq2.seq) - len(out_seqs[0].seq)) < 5
def test_words(self):
'It trims re words'
vector = 'ACTG'
vector = SeqWithQuality(Seq(vector), name='vect', qual=[30]*len(vector))
seq2 = create_random_seqwithquality(250, qual_range=35)
seqs = [vector + seq2]
inseq_fhand = create_temp_seq_file(seqs, format='fastq')[0]
outseq_fhand = NamedTemporaryFile()
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', '454', '-f', 'fastq', '-r', '"^ACTG","TTTTTTTTTTTTTT"']
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
format='fastq'))
assert seq2.seq == out_seqs[0].seq
def test_edge_trim(self):
'It trims the sequence edges'
seq2 = create_random_seqwithquality(250, qual_range=35)
seqs = [seq2]
inseq_fhand = create_temp_seq_file(seqs, format='fastq')[0]
outseq_fhand = NamedTemporaryFile()
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', '454', '-f', 'fastq', '-e', '10,10']
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
format='fastq'))
assert len(seq2.seq) - len(out_seqs[0].seq) == 20
def test_trim_as_mask(self):
'It masks the regions to trim'
seq2 = create_random_seqwithquality(250, qual_range=35)
seqs = [seq2]
inseq_fhand = create_temp_seq_file(seqs, format='fastq')[0]
outseq_fhand = NamedTemporaryFile()
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', '454', '-f', 'fastq', '-e', '10,10', '--mask_no_trim']
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
format='fastq'))
assert len(seq2.seq) == len(out_seqs[0].seq)
seq = str(out_seqs[0].seq)
assert seq[0:9].islower()
assert seq[10:len(seq) - 10].isupper()
assert seq[-10:].islower()
def test_min_length(self):
'Filtering by length'
seq1 = create_random_seqwithquality(250, qual_range=35)
seq2 = create_random_seqwithquality(50, qual_range=35)
seq3 = create_random_seqwithquality(250, qual_range=35)
seqs = [seq1, seq2, seq3]
inseq_fhand = create_temp_seq_file(seqs, format='fastq')[0]
outseq_fhand = NamedTemporaryFile()
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', '454', '-f', 'fastq', '-m', '51']
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
format='fastq'))
assert len(out_seqs) == 2
assert len(out_seqs[0]) == 250
assert len(out_seqs[1]) == 250
def test_filter(self):
'Filtering by blast similarity'
seq1 = create_random_seqwithquality(150, qual_range=35)
seq2 = 'CACTATCTCCGACGACGGCGATTTCACCGTTGACCTGATTTCCAGTTGCTACGTCAAGTTCTC'
seq2 += 'TACGGCAAGAATATCGCCGGAAAACTCAGTTACGGATCTGTTAAAGACGTCCGTGGAATCCA'
seq2 += 'AGCTAAAGAAGCTTTCCTTTGGCTACCAATCACCGCCATGGAATCGGATCCAAGCTCTGCCA'
seq2 = SeqWithQuality(Seq(seq2), name='ara', qual=[30]*len(seq2))
seq3 = create_random_seqwithquality(150, qual_range=35)
seqs = [seq1, seq2, seq3]
inseq_fhand = create_temp_seq_file(seqs, format='fastq')[0]
outseq_fhand = NamedTemporaryFile()
ara_db = os.path.join(TEST_DATA_DIR, 'blast', 'arabidopsis_genes+')
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', '454', '-f', 'fastq',
'--filter_dbs', ','.join((ara_db, ara_db))]
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
format='fastq'))
assert len(out_seqs) == 2
class ParallelTest(unittest.TestCase):
'It tests the clean_reads script parallel operation'
def test_fasta_qual(self):
'Cleaning fasta and qual seqs in parallel'
seq1 = create_random_seqwithquality(500, qual_range=55)
seq2 = create_random_seqwithquality(50, qual_range=15)
seq3 = create_random_seqwithquality(500, qual_range=55)
seq4 = create_random_seqwithquality(50, qual_range=15)
seq5 = create_random_seqwithquality(500, qual_range=55)
seq6 = create_random_seqwithquality(50, qual_range=15)
seqs = [seq1 + seq2, seq3 + seq4, seq5 + seq6]
inseq_fhand, inqual_fhand = create_temp_seq_file(seqs, format='qual')
outseq_fhand = NamedTemporaryFile()
outqual_fhand = NamedTemporaryFile()
#we can clean a sanger sequence with quality
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-q', inqual_fhand.name,
'-o', outseq_fhand.name, '-u', outqual_fhand.name,
'-p', 'sanger', '-t', '2']
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
qual_fhand=open(outqual_fhand.name)))
assert out_seqs[0].qual[-1] == 55
def test_fastq(self):
'Cleaning fastq seqs in parallel'
seq1 = create_random_seqwithquality(500, qual_range=55)
seq2 = create_random_seqwithquality(50, qual_range=15)
seq3 = create_random_seqwithquality(500, qual_range=55)
seq4 = create_random_seqwithquality(50, qual_range=15)
seq5 = create_random_seqwithquality(500, qual_range=55)
seq6 = create_random_seqwithquality(50, qual_range=15)
seqs = [seq1 + seq2, seq3 + seq4, seq5 + seq6]
inseq_fhand = create_temp_seq_file(seqs, format='fastq')[0]
outseq_fhand = NamedTemporaryFile()
#we can clean a sanger sequence with quality
cmd = [CLEAN_READS, '-i', inseq_fhand.name, '-o', outseq_fhand.name,
'-p', 'sanger', '-t', '4', '-f', 'fastq']
retcode = _call_python(cmd)[-1]
assert retcode == 0
out_seqs = list(seqs_in_file(seq_fhand=open(outseq_fhand.name),
format='fastq'))
assert out_seqs[0].qual[-1] == 55
if __name__ == "__main__":
#import sys;sys.argv = ['', 'CleanReadsTest.test_tempdir']
unittest.main()
|
JoseBlanca/franklin
|
test/scripts/clean_reads_test.py
|
Python
|
agpl-3.0
| 22,214
|
[
"BLAST"
] |
a47e4dbb16bd81f5c914e7d9d81f10000cb1f14fd727c3ce0ea775582a42cd5e
|
# -*- coding: utf-8 -*-
#
# GCAC-doc documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 30 10:12:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Galaxy Modules for Compound Activity Classification (GCAC)'
copyright = u'2016, Anmol J. Hemrom'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Set the readthedocs theme.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
print 'using readthedocs theme...'
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify
# it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GCAC-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'GCAC.tex', u'GCAC Documentation',
u'Anmol J. Hemrom', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'GCAC', u'GCAC Documentation',
[u'Anmol J. Hemrom'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GCAC', u'GCAC Documentation',
u'Anmol J. Hemrom', 'GCAC', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
LynnLab-JNU/GCAC
|
docs/source/conf.py
|
Python
|
gpl-3.0
| 8,589
|
[
"Galaxy"
] |
10aa0893bf5e516e71a60a4d9323b95ca72f1bb3515820b401850bde1b8b0f2c
|
#!/usr/bin/env python
from sys import argv, stderr, exit
import subprocess
import numpy as np
from argparse import ArgumentParser
try:
from exactP import exactP_list
except:
stderr.write("Please build the exactP module by running 'make exactP'.\n")
exit(1)
try:
from PISMNC import PISMDataset
except:
subprocess.call("ln -sf ../../util/PISMNC.py", shell=True)
from PISMNC import PISMDataset
def parse_options():
stderr.write("reading options ...\n")
parser = ArgumentParser()
parser.description = "Test P (verification of '-hydrology distributed')."
parser.add_argument("--pism_path", dest="PISM_PATH", default=".")
parser.add_argument("--mpiexec", dest="MPIEXEC", default="")
parser.add_argument("--Mx", dest="Mx", help="Horizontal grid size. Default corresponds to a 1km grid.", type=int, default=51)
parser.add_argument("--keep", dest="keep", action="store_true", help="Keep the generated PISM input file.")
return parser.parse_args()
def generate_config():
"""Generates the config file with custom ice softness and hydraulic conductivity."""
stderr.write("generating testPconfig.nc ...\n")
nc = PISMDataset("testPconfig.nc", 'w')
pism_overrides = nc.createVariable("pism_overrides", 'b')
pism_overrides.ice_softness = 3.1689e-24
pism_overrides.ice_softness_doc = "Pa-3 s-1; ice softness; NOT DEFAULT"
pism_overrides.hydrology_hydraulic_conductivity = 1.0e-2 / (1000.0 * 9.81)
pism_overrides.hydrology_hydraulic_conductivity_doc = "= k; NOT DEFAULT"
pism_overrides.hydrology_regularizing_porosity = 0.01
pism_overrides.hydrology_regularizing_porosity_doc = "[pure]; phi_0 in notes"
pism_overrides.hydrology_tillwat_max = 0.0
pism_overrides.hydrology_tillwat_max_doc = "m; turn off till water mechanism"
pism_overrides.hydrology_thickness_power_in_flux = 1.0
pism_overrides.hydrology_thickness_power_in_flux_doc = "; = alpha in notes"
pism_overrides.hydrology_gradient_power_in_flux = 2.0
pism_overrides.hydrology_gradient_power_in_flux_doc = "; = beta in notes"
pism_overrides.hydrology_roughness_scale = 1.0
pism_overrides.hydrology_roughness_scale_doc = "m; W_r in notes; roughness scale"
pism_overrides.yield_stress_model = "constant"
pism_overrides.yield_stress_model_doc = "only the constant yield stress model works without till"
pism_overrides.default_tauc = 1e6
pism_overrides.default_tauc_doc = "set default to 'high tauc'"
nc.close()
def report_drift(name, file1, file2, xx, yy, doshow=False):
"Report on the difference between two files."
nc1 = PISMDataset(file1)
nc2 = PISMDataset(file2)
var1 = nc1.variables[name]
var2 = nc2.variables[name]
diff = np.abs(np.squeeze(var1[:]) - np.squeeze(var2[:]))
rr = np.sqrt(xx ** 2 + yy ** 2)
diff[rr >= 0.89 * 25000.0] = 0.0
if (doshow):
import matplotlib.pyplot as plt
plt.pcolormesh(xx, yy, diff)
plt.axis('equal')
plt.axis('tight')
plt.colorbar()
plt.show()
#stderr.write("Drift in %s: average = %f, max = %f [%s]" % (name, np.average(diff), np.max(diff), var1.units) + "\n")
return np.average(diff), np.max(diff)
def create_grid(Mx):
Lx = 25.0e3 # outside L = 22.5 km
x = np.linspace(-Lx, Lx, Mx)
xx, yy = np.meshgrid(x, x)
return x, x, xx, yy
def radially_outward(mag, x, y):
"""return components of a vector field V(x,y) which is radially-outward from
the origin and has magnitude mag"""
r = np.sqrt(x * x + y * y)
if r == 0.0:
return (0.0, 0.0)
vx = mag * x / r
vy = mag * y / r
return (vx, vy)
def compute_sorted_radii(xx, yy):
stderr.write("sorting radial variable ...\n")
Mx = xx.shape[0]
# create 1D array of tuples (r,j,k), sorted by r-value
dtype = [('r', float), ('j', int), ('k', int)]
rr = np.empty((Mx, Mx), dtype=dtype)
for j in range(Mx):
for k in range(Mx):
rr[j, k] = (np.sqrt(xx[j, k] ** 2 + yy[j, k] ** 2), j, k)
r = np.sort(rr.flatten(), order='r')
return np.flipud(r)
def generate_pism_input(x, y, xx, yy):
stderr.write("calling exactP_list() ...\n")
EPS_ABS = 1.0e-12
EPS_REL = 1.0e-15
# Wrapping r[:]['r'] in np.array() forces NumPy to make a C-contiguous copy.
h_r, magvb_r, _, W_r, P_r = exactP_list(np.array(r[:]['r']), EPS_ABS, EPS_REL, 1)
stderr.write("creating gridded variables ...\n")
# put on grid
h = np.zeros_like(xx)
W = np.zeros_like(xx)
P = np.zeros_like(xx)
magvb = np.zeros_like(xx)
ussa = np.zeros_like(xx)
vssa = np.zeros_like(xx)
for n, pt in enumerate(r):
j = pt['j']
k = pt['k']
h[j, k] = h_r[n] # ice thickness in m
magvb[j, k] = magvb_r[n] # sliding speed in m s-1
ussa[j, k], vssa[j, k] = radially_outward(magvb[j, k], xx[j, k], yy[j, k])
W[j, k] = W_r[n] # water thickness in m
P[j, k] = P_r[n] # water pressure in Pa
stderr.write("creating inputforP.nc ...\n")
nc = PISMDataset("inputforP.nc", 'w')
nc.create_dimensions(x, y, time_dependent=True, use_time_bounds=True)
nc.define_2d_field("thk", time_dependent=False,
attrs={"long_name": "ice thickness",
"units": "m",
"valid_min": 0.0,
"standard_name": "land_ice_thickness"})
nc.define_2d_field("topg", time_dependent=False,
attrs={"long_name": "bedrock topography",
"units": "m",
"standard_name": "bedrock_altitude"})
nc.define_2d_field("climatic_mass_balance", time_dependent=False,
attrs={"long_name": "climatic mass balance for -surface given",
"units": "kg m-2 year-1",
"standard_name": "land_ice_surface_specific_mass_balance"})
nc.define_2d_field("ice_surface_temp", time_dependent=False,
attrs={"long_name": "ice surface temp (K) for -surface given",
"units": "Kelvin",
"valid_min": 0.0})
nc.define_2d_field("bmelt", time_dependent=False,
attrs={"long_name": "basal melt rate",
"units": "m year-1",
"standard_name": "land_ice_basal_melt_rate"})
nc.define_2d_field("bwat", time_dependent=False,
attrs={"long_name": "thickness of basal water layer",
"units": "m",
"valid_min": 0.0})
nc.define_2d_field("bwp", time_dependent=False,
attrs={"long_name": "water pressure in basal water layer",
"units": "Pa",
"valid_min": 0.0})
nc.define_2d_field("bc_mask", time_dependent=False,
attrs={"long_name": "if =1, apply u_ssa_bc and v_ssa_bc as sliding velocity"})
nc.define_2d_field("u_ssa_bc", time_dependent=False,
attrs={"long_name": "x-component of prescribed sliding velocity",
"units": "m s-1"})
nc.define_2d_field("v_ssa_bc", time_dependent=False,
attrs={"long_name": "y-component of prescribed sliding velocity",
"units": "m s-1"})
Phi0 = 0.20 # 20 cm/year basal melt rate
T_surface = 260 # ice surface temperature, K
variables = {"topg": np.zeros_like(xx),
"climatic_mass_balance": np.zeros_like(xx),
"ice_surface_temp": np.ones_like(xx) + T_surface,
"bmelt": np.zeros_like(xx) + Phi0,
"thk": h,
"bwat": W,
"bwp": P,
"bc_mask": np.ones_like(xx),
"u_ssa_bc": ussa,
"v_ssa_bc": vssa}
for name in variables.keys():
nc.write(name, variables[name])
nc.history = subprocess.list2cmdline(argv)
nc.close()
stderr.write("NetCDF file %s written\n" % "inputforP.nc")
def run_pism(opts):
stderr.write("Testing: Test P verification of '-hydrology distributed'.\n")
cmd = "%s %s/pismr -config_override testPconfig.nc -i inputforP.nc -bootstrap -Mx %d -My %d -Mz 11 -Lz 4000 -hydrology distributed -report_mass_accounting -y 0.08333333333333 -max_dt 0.01 -no_mass -energy none -stress_balance ssa+sia -ssa_dirichlet_bc -o end.nc" % (opts.MPIEXEC, opts.PISM_PATH, opts.Mx, opts.Mx)
stderr.write(cmd + "\n")
subprocess.call(cmd, shell=True)
# high-res and parallel example:
# ./runTestP.py --pism_path=../../build --mpiexec="mpiexec -n 4" --Mx=201
# example which should suffice for regression:
# ./runTestP.py --pism_path=../../build --Mx=21
if __name__ == "__main__":
opts = parse_options()
x, y, xx, yy = create_grid(opts.Mx)
r = compute_sorted_radii(xx, yy)
generate_config()
generate_pism_input(x, y, xx, yy)
run_pism(opts)
(bwatav, bwatmax) = report_drift("bwat", "inputforP.nc", "end.nc", xx, yy, doshow=False)
(bwpav, bwpmax) = report_drift("bwp", "inputforP.nc", "end.nc", xx, yy, doshow=False)
print "NUMERICAL ERRORS:"
print "%d %f %f %f %f\n" % (opts.Mx, bwatav, bwatmax, bwpav, bwpmax)
# cleanup:
if opts.keep == False:
subprocess.call("rm testPconfig.nc inputforP.nc end.nc", shell=True)
|
talbrecht/pism_pik07
|
test/test_hydrology/runTestP.py
|
Python
|
gpl-3.0
| 9,613
|
[
"NetCDF"
] |
f9f2e410808936840a4d72cc87f8ee3e822d0423a75f44053b7e0ba172e91f2d
|
"""The Mayavi UI plugin
"""
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import logging
# Enthought library imports.
from traits.api import List, on_trait_change
from envisage.api import Plugin
from pyface.workbench.api import Perspective, PerspectiveItem
from traits.etsconfig.api import ETSConfig
logger = logging.getLogger()
# View IDs.
ENGINE_VIEW = 'mayavi.core.ui.engine_view.EngineView'
CURRENT_SELECTION_VIEW = 'mayavi.core.engine.Engine.current_selection'
SHELL_VIEW = 'envisage.plugins.python_shell_view'
LOGGER_VIEW = 'apptools.logger.plugin.view.logger_view.LoggerView'
###############################################################################
# `MayaviPerspective` class.
###############################################################################
class MayaviPerspective(Perspective):
""" A default perspective for Mayavi. """
# The perspective's name.
name = 'Mayavi'
# Should this perspective be enabled or not?
enabled = True
# Should the editor area be shown in this perspective?
show_editor_area = True
# The contents of the perspective.
contents = List()
def _contents_default(self):
contents = [
PerspectiveItem(id=ENGINE_VIEW, position='left'),
PerspectiveItem(id=CURRENT_SELECTION_VIEW, position='bottom',
relative_to=ENGINE_VIEW),
PerspectiveItem(id=SHELL_VIEW, position='bottom'),
]
show_logger = True
if ETSConfig.toolkit == 'wx':
# XXX: Bugware: avoid a crash in Wx with the logger
import wx
if wx.__version__.split('.')[:2] == ['2', '6']:
show_logger = False
if show_logger:
contents.append(PerspectiveItem(id=LOGGER_VIEW, position='with',
relative_to=SHELL_VIEW))
return contents
###############################################################################
# `MayaviUIPlugin` class.
###############################################################################
class MayaviUIPlugin(Plugin):
# Extension point Ids.
VIEWS = 'envisage.ui.workbench.views'
PERSPECTIVES = 'envisage.ui.workbench.perspectives'
PREFERENCES_PAGES = 'envisage.ui.workbench.preferences_pages'
ACTION_SETS = 'envisage.ui.workbench.action_sets'
BANNER = 'envisage.plugins.ipython_shell.banner'
# The plugins name.
name = 'Mayavi UI plugin'
# Our ID.
id = 'mayavi_ui'
###### Contributions to extension points made by this plugin ######
# Views.
views = List(contributes_to=VIEWS)
# Perspectives.
perspectives = List(contributes_to=PERSPECTIVES)
# Preferences pages.
preferences_pages = List(contributes_to=PREFERENCES_PAGES)
# Our action sets.
action_sets = List(contributes_to=ACTION_SETS)
# IPython banner
banner = List(contributes_to=BANNER)
def _views_default(self):
""" Trait initializer. """
return [self._engine_view_factory,
self._current_selection_view_factory]
def _perspectives_default(self):
""" Trait initializer. """
return [MayaviPerspective]
def _preferences_pages_default(self):
""" Trait initializer. """
from mayavi.preferences.mayavi_preferences_page import (
MayaviRootPreferencesPage, MayaviMlabPreferencesPage)
return [MayaviRootPreferencesPage, MayaviMlabPreferencesPage]
def _action_sets_default(self):
""" Trait initializer. """
from mayavi.plugins.mayavi_ui_action_set import (
MayaviUIActionSet
)
return [MayaviUIActionSet]
def _banner_default(self):
"""Trait initializer """
return ["""Welcome to Mayavi, this is the interactive IPython shell.
If this is your first time using Mayavi, take a quick look at the tutorial examples section of the user guide, accessible via the help menu.
To use Mayavi, you need to load your data in "data sources" and apply "visualization modules" to it.
"""]
######################################################################
# Private methods.
def _engine_view_factory(self, window, **traits):
""" Factory method for engine views. """
from pyface.workbench.traits_ui_view import \
TraitsUIView
from mayavi.core.ui.engine_view import \
EngineView
engine_view = EngineView(engine=self._get_engine(window))
tui_engine_view = TraitsUIView(obj=engine_view,
id=ENGINE_VIEW,
name='Mayavi',
window=window,
position='left',
**traits
)
return tui_engine_view
def _current_selection_view_factory(self, window, **traits):
""" Factory method for the current selection of the engine. """
from pyface.workbench.traits_ui_view import \
TraitsUIView
engine = self._get_engine(window)
tui_engine_view = TraitsUIView(obj=engine,
view='current_selection_view',
id=CURRENT_SELECTION_VIEW,
name='Mayavi object editor',
window=window,
position='bottom',
relative_to=ENGINE_VIEW,
**traits
)
return tui_engine_view
def _get_engine(self, window):
"""Return the Mayavi engine of the particular window."""
from mayavi.core.engine import Engine
return window.get_service(Engine)
def _get_script(self, window):
"""Return the `mayavi.plugins.script.Script` instance
of the window."""
from mayavi.plugins.script import Script
return window.get_service(Script)
######################################################################
# Trait handlers.
@on_trait_change('application.gui:started')
def _on_application_gui_started(self, obj, trait_name, old, new):
"""This is called when the application's GUI is started. The
method binds the `Script` and `Engine` instance on the
interpreter.
"""
# This is called when the application trait is set but we don't
# want to do anything at that point.
if trait_name != 'started' or not new:
return
# Get the script service.
app = self.application
window = app.workbench.active_window
script = self._get_script(window)
# Get a hold of the Python shell view.
id = SHELL_VIEW
py = window.get_view_by_id(id)
if py is None:
logger.warn('*'*80)
logger.warn("Can't find the Python shell view to bind variables")
return
# Bind the script and engine instances to names on the
# interpreter.
try:
py.bind('mayavi', script)
py.bind('engine', script.engine)
try:
# The following will fail under Qt, as it needs the Pyface
# Tree that has not been ported from Wx yet.
from apptools.naming.ui.api import explore
py.bind('explore', explore)
except ImportError:
pass
except AttributeError as msg:
# This can happen when the shell is not visible.
# FIXME: fix this when the shell plugin is improved.
logger.warn(msg)
logger.warn("Can't find the Python shell to bind variables")
|
dmsurti/mayavi
|
mayavi/plugins/mayavi_ui_plugin.py
|
Python
|
bsd-3-clause
| 8,008
|
[
"Mayavi"
] |
77b40462e987935b5533d5f111d541bd5eee8addd3c1e03b8d8b6b472dc3753e
|
#!/usr/bin/env python
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import vtk
import chigger
camera = vtk.vtkCamera()
camera.SetViewUp(0.1865, 0.6455, 0.7407)
camera.SetPosition(3.7586, -11.8847, 9.5357)
camera.SetFocalPoint(0.0000, 0.0000, 0.1250)
reader = chigger.exodus.ExodusReader('../input/mug_blocks_out.e')
exodus0 = chigger.exodus.ExodusSource(reader, block=['1'])
exodus0.update()
exodus1 = chigger.exodus.ExodusSource(reader, block=['76'], edges=True, edge_color=[1,0,0], edge_width=1)
exodus1.update()
result = chigger.base.ChiggerResult(exodus0, exodus1, variable='diffused', camera=camera)
window = chigger.RenderWindow(result, size=[300, 300], test=True)
window.update(); window.resetCamera()
window.write('edge.png')
window.start()
|
liuwenf/moose
|
python/chigger/tests/edge/edge.py
|
Python
|
lgpl-2.1
| 1,599
|
[
"MOOSE",
"VTK"
] |
ec1b2c8733f0f0f626a2a472910da78939b44c4913781a989de2eff142017ad5
|
import numpy
from PyMca import PyMcaQt as qt
from PyMca import ScanWindow
from PyMca import specfilewrapper as sf
from PyMca import SimpleFitModule as SFM
from PyMca import SpecfitFunctions
from PyMca import Elements
from PyMca import ConfigDict
from PyMca import PyMcaDirs
from PyMca import QSpecFileWidget
from PyMca import SpecFileDataSource
from PyMca.SpecfitFuns import upstep, downstep
from PyMca.Gefit import LeastSquaresFit as LSF
from os.path import isdir as osPathIsDir
try:
from PyMca import Plugin1DBase
except ImportError:
print("WARNING:SumRulesPlugin import from somewhere else")
from . import Plugin1DBase
DEBUG = 1
NEWLINE = '\n'
class Mathematics(object):
def __init__(self):
self.simpleFit = SFM.SimpleFit()
self.simpleFit.importFunctions(SpecfitFunctions)
def ricker(self, points, a):
"""
SciPy implementation of the ricker wavelet
From https://github.com/scipy/scipy/blob/v0.13.0/scipy/signal/wavelets.py
"""
A = 2 / (numpy.sqrt(3 * a) * (numpy.pi**0.25))
wsq = a**2
vec = numpy.arange(0, points) - (points - 1.0) / 2
tsq = vec**2
mod = (1 - tsq / wsq)
gauss = numpy.exp(-tsq / (2 * wsq))
total = A * mod * gauss
return total
def continousWaveletTransform(self, data, widths):
"""
SciPy implementation of cwt
From https://github.com/scipy/scipy/blob/v0.13.0/scipy/signal/wavelets.py
"""
wavelet = self.ricker
nCols = len(data)
nRows = len(widths)
out = numpy.zeros([nRows, nCols], dtype=numpy.float)
for idx, width in enumerate(widths):
waveletData = wavelet(min(10 * width, len(data)), width)
out[idx, :] = numpy.convolve(data, waveletData, mode='same')
return out
#def normalizeXAS(self, x):
def cumtrapz(self, y, x=None, dx=1.0):
y = y[:]
if x is None:
x = numpy.arange(len(y), dtype=y.dtype) * dx
else:
x = x[:]
if not numpy.all(numpy.diff(x) > 0.):
# assure monotonically increasing x
idx = numpy.argsort(x)
x = numpy.take(x, idx)
y = numpy.take(y, idx)
# Avoid dublicates
x.ravel()
idx = numpy.nonzero(numpy.diff(x) > 0)[0]
x = numpy.take(x, idx)
y = numpy.take(y, idx)
return numpy.cumsum(.5 * numpy.diff(x) * (y[1:] + y[:-1]))
def magneticMoment(self, p, q, r, n, econf = '3d'):
'''
Input
-----
p : Float
Integral over the L3 (first) edge of the XMCD
(difference) signal
q : Float
Integral over the L2 (second) edge of the XMCD
(difference) signal
r : Float
Integral over the complete XAS signal
n : Float
Electron occupation number of the sample material
econf : String
Determines if material is of 3d or 4f type and
thus the number of electronic states in the outer
shell
Returns the orbital resp. the spin part of the magnetic moment
(c.f. Chen et al., Phys. Rev. Lett., 75(1), 152)
'''
mOrbt, mSpin, mRatio = None, None, None
# Determine number of states in outer shell
if econf not in ['3d','4f']:
raise ValueError('Element must either be 3d or 4f type!')
elif econf == '3d':
nMax = 10.
else:
nMax = 14.
# Check if r is non-zero
if r == 0.:
raise ZeroDivisionError()
# Calculate Integrals
if q is not None:
mOrbt = -4./3. * q * (nMax - n) / r
if (q is not None) and (p is not None):
mSpin = -(6.*p - 4.*q) * (nMax - n) / r
mRatio = 2*q/(9*p-6*q)
return mOrbt, mSpin, mRatio
def rndDataQuad(self):
x = 5 + numpy.random.rand(500) + numpy.arange(500, dtype=float)
y = 50*numpy.exp(-0.005*(x-250)**2) + 5 + 0.00005*x**2
return x, y
def rndDataLin(self):
x = 5 + numpy.random.rand(500) + numpy.arange(500, dtype=float)
y = 50*numpy.exp(-0.005*(x-250)**2) + 5 + 0.03*x
return x, y
def detrend(self, x, y, order='linear'):
if order not in ['linear', 'quadratic', 'cubic']:
raise ValueError('Order must be linear, quadratic or cubic')
if order == 'linear':
ord = 1
elif order == 'quadratic':
ord = 2
elif order == 'cubic':
ord = 3
coeff = numpy.polyfit(x,y,ord)
poly = numpy.zeros(x.shape)
for a in coeff:
poly *= x
poly += a
return y-poly
def run(self):
from matplotlib import pyplot as plt
xLin, yLin = self.rndDataLin()
xQuad, yQuad = self.rndDataQuad()
yLinCorr = self.detrend(xLin, yLin, 'linear')
yQuadCorr = self.detrend(xQuad, yQuad, 'quadratic')
plt.plot(xLin, yLin, xLin, yLinCorr)
plt.plot(xQuad, yQuad, xQuad, yQuadCorr)
plt.show()
class MarkerSpinBox(qt.QDoubleSpinBox):
valueChangedSignal = qt.pyqtSignal(float)
#intersectionChangedSignal = qt.pyqtSignal(float)
intersectionsChangedSignal = qt.pyqtSignal(object)
#def __init__(self, window, graph, label='', parent=None):
def __init__(self, window, plotWindow, label='', parent=None):
qt.QDoubleSpinBox.__init__(self, parent)
# Attributes
self.label = label
self.window = window
self.plotWindow = plotWindow
#self.graph = graph
#self.markerID = self.graph.insertX1Marker(0., label=label)
self.markerID = self.plotWindow.graph.insertX1Marker(0., label=label)
# Initialize
self.setMinimum(0.)
self.setMaximum(10000.) # TODO: Change init value
self.setValue(0.)
# Connects
#self.connect(self.graph,
self.connect(self.plotWindow.graph,
qt.SIGNAL("QtBlissGraphSignal"),
self._markerMoved)
self.valueChanged['double'].connect(self._valueChanged)
self.valueChanged['QString'].connect(self._valueChanged)
def getIntersections(self):
dataList = self.plotWindow.getAllCurves()
#dataDict = self.graph.curves
resDict = {}
pos = self.value()
if not isinstance(pos, float):
print 'getIntesections -- pos is not of type float'
return
#for listIdx, (x, y, legend, info) in enumerate(dataDict.items()):
for x, y, legend, info in dataList:
res = float('NaN')
if numpy.all(pos < x) or numpy.all(x < pos):
print 'getIntersections -- Marker position outside of data range'
continue
#raise ValueError('Marker outside of data range')
if pos in x:
idx = numpy.where(x == pos)
res = y[idx]
else:
# Intepolation needed, assume well
# behaved data (c.f. copy routine)
lesserIdx = numpy.nonzero(x < pos)[0][-1]
greaterIdx = numpy.nonzero(x > pos)[0][0]
dy = y[lesserIdx] - y[greaterIdx]
dx = x[lesserIdx] - x[greaterIdx]
res = dy/dx * (pos - x[lesserIdx]) + y[lesserIdx]
resDict[legend] = (pos, res)
#print 'getIntersections -- Result:', resDict
return resDict
def hideMarker(self):
graph = self.plotWindow.graph
if self.markerID in graph.markersdict.keys():
marker = graph.markersdict[self.markerID]['marker']
marker.hide()
def showMarker(self):
graph = self.plotWindow.graph
if self.markerID in graph.markersdict.keys():
marker = graph.markersdict[self.markerID]['marker']
marker.show()
def _setMarkerFollowMouse(self, windowTitle):
windowTitle = str(windowTitle)
graph = self.plotWindow.graph
if self.window == windowTitle:
#self.graph.setmarkercolor(self.markerID, 'blue')
#self.graph.setmarkerfollowmouse(self.markerID, True)
#self.graph.replot()
graph.setmarkercolor(self.markerID, 'blue')
graph.setmarkerfollowmouse(self.markerID, True)
graph.replot()
else:
#self.graph.setmarkercolor(self.markerID, 'black')
#self.graph.setmarkerfollowmouse(self.markerID, False)
#self.graph.replot()
graph.setmarkercolor(self.markerID, 'black')
graph.setmarkerfollowmouse(self.markerID, False)
graph.replot()
def _markerMoved(self, ddict):
if 'marker' not in ddict:
return
else:
if ddict['marker'] != self.markerID:
return
#if DEBUG:
# print "_markerMoved -- ddict:\n\t",ddict
if ddict['event'] == 'markerMoving':
self.setValue(ddict['x'])
def _valueChanged(self, val):
try:
val = float(val)
except ValueError:
print '_valueChanged -- Sorry, it ain\'t gonna float:',val
return
graph = self.plotWindow.graph
#self.graph.setMarkerXPos(self.markerID, val)
graph.setMarkerXPos(self.markerID, val)
#self.graph.replot()
graph.replot()
#self.valueChangedSignal.emit(val)
#ddict = self.getIntersections()
#self.intersectionsChangedSignal.emit(ddict)
class LineEditDisplay(qt.QLineEdit):
def __init__(self, controller, ddict={}, unit='', parent=None):
qt.QLineEdit.__init__(self, parent)
self.setReadOnly(True)
self.setAlignment(qt.Qt.AlignRight)
self.ddict = ddict
self.unit = unit
self.setMaximumWidth(120)
self.controller = controller
if isinstance(self.controller, qt.QComboBox):
self.controller.currentIndexChanged['QString'].connect(self.setText)
elif isinstance(self.controller, qt.QDoubleSpinBox):
# Update must be triggered otherwise
#self.controller.valueChanged['QString'].connect(self.setText)
pass
else:
raise ValueError('LineEditDisplay: Controller must be of type QComboBox or QDoubleSpinBox')
#self.controller.destroyed.connect(self.destroy)
def updateDict(self, ddict):
# Only relevant if type(controller) == QComboBox
self.ddict = ddict
def updateUnit(self, unit):
self.unit = unit
def checkController(self):
if isinstance(self.controller, qt.QComboBox):
tmp = self.controller.currentText()
elif isinstance(self.controller, qt.QDoubleSpinBox):
tmp = self.controller.value()
self.setText(tmp)
def setText(self, inp):
inp = str(inp)
if isinstance(self.controller, qt.QComboBox):
if inp == '':
text = ''
else:
tmp = self.ddict.get(inp,None)
if tmp is not None:
try:
text = '%.2f meV'%(1000. * float(tmp))
except ValueError:
text = 'NaN'
else:
text = '---'
elif isinstance(self.controller, qt.QDoubleSpinBox):
text = inp + ' ' + self.unit
qt.QLineEdit.setText(self, text)
#class LineEditDisplay(qt.QLineEdit):
# def __init__(self, combobox, ddict={}, parent=None):
# qt.QLineEdit.__init__(self, parent)
# self.setReadOnly(True)
# self.setAlignment(qt.Qt.AlignRight)
# self.ddict = ddict
# self.setMaximumWidth(120)
# self.combobox = combobox
# self.combobox.currentIndexChanged['QString'].connect(self.setText)
# #self.combobox.destroyed.connect(self.destroy)
#
# def updateDict(self, ddict):
# self.ddict = ddict
#
# def checkComboBox(self):
# tmp = self.combobox.currentText()
# self.setText(tmp)
#
# def setText(self, inp):
# inp = str(inp)
# if inp == '':
# text = ''
# else:
# tmp = self.ddict.get(inp,None)
# if tmp is not None:
# try:
# text = '%.2f meV'%(1000. * float(tmp))
# except ValueError:
# text = 'NaN'
# else:
# text = '---'
# qt.QLineEdit.setText(self, text)
class SumRulesWindow(qt.QMainWindow):
#class SumRulesWindow(qt.QWidget):
# Curve labeling
__xasBGmodel = 'xas BG model'
# Tab names
__tabElem = 'element'
__tabBG = 'background'
__tabInt = 'integration'
# Marker names
__preMin = 'Pre Min'
__preMax = 'Pre Max'
__postMin = 'Post Min'
__postMax = 'Post Max'
__intP = 'p'
__intQ = 'q'
__intR = 'r'
# Lists
tabList = [__tabElem,
__tabBG,
__tabInt]
xasMarkerList = [__preMin,
__preMax,
__postMin,
__postMax]
xmcdMarkerList = [__intP,
__intQ,
__intR]
edgeMarkerList = []
# Elements with 3d final state
transitionMetals = ['Sc', 'Ti', 'V', 'Cr', 'Mn',\
'Fe', 'Co', 'Ni', 'Cu']
# Elements with 4f final state
rareEarths = ['La', 'Ce', 'Pr', 'Nd', 'Pm',\
'Sm', 'Eu', 'Gd', 'Tb', 'Dy',\
'Ho', 'Er', 'Tm', 'Yb']
elementsDict = {
'' : [],
'3d': transitionMetals,
'4f': rareEarths
}
# Electron final states
electronConfs = ['3d','4f']
# Occuring Transitions
occuringTransitions = ['L3M4', 'L3M5', 'L2M4', 'M5O3']
# Signals
tabChangedSignal = qt.pyqtSignal('QString')
modelWidthChangedSignal = qt.pyqtSignal('QString')
def __init__(self, parent=None):
qt.QWidget.__init__(self, parent)
self.setWindowTitle('Sum Rules')
self.plotWindow = ScanWindow.ScanWindow(self)
self.plotWindow.scanWindowInfoWidget.hide()
self.plotWindow.graph.enablemarkermode()
# Hide unnecessary buttons in the toolbar
#self.plotWindow.toolBar.hide()
#self.plotWindow.fitButton.hide()
toolbarChildren = self.plotWindow.toolBar
# QWidget.findChildren(<qt-type>) matches
# all child widgets with the specified type
toolbarButtons = toolbarChildren.findChildren(qt.QToolButton)
toolbarButtons[6].hide() # Simple Fit
toolbarButtons[7].hide() # Average Plotted Curves
toolbarButtons[8].hide() # Derivative
toolbarButtons[9].hide() # Smooth
toolbarButtons[12].hide() # Subtract active curve
toolbarButtons[13].hide() # Save active curve
toolbarButtons[14].hide() # Plugins
self.__savedConf = False
self.__savedData = False
# Marker Handling
# spinboxDict connects marker movement to spinbox
# keys() -> id(MarkerSpinBox)
# values() -> MarkerSpinBox
self.spinboxDict = {}
self.valuesDict = dict(
[(item, {}) for item in self.tabList])
# Tab Widget
self.tabWidget = qt.QTabWidget()
for window in self.tabList:
if window == self.__tabElem:
# BEGIN sampleGB
# electron shell combo box
sampleGB = qt.QGroupBox('Sample definition')
sampleLayout = qt.QVBoxLayout()
sampleGB.setLayout(sampleLayout)
self.elementEConfCB = qt.QComboBox()
self.elementEConfCB.setMinimumWidth(100)
self.elementEConfCB.addItems(['']+self.electronConfs)
self.elementEConfCB.currentIndexChanged['QString'].connect(self.setElectronConf)
elementEConfLayout = qt.QHBoxLayout()
elementEConfLayout.setContentsMargins(0,0,0,0)
elementEConfLayout.addWidget(qt.QLabel('Electron shell'))
elementEConfLayout.addWidget(qt.HorizontalSpacer())
elementEConfLayout.addWidget(self.elementEConfCB)
elementEConfWidget = qt.QWidget()
elementEConfWidget.setLayout(elementEConfLayout)
sampleLayout.addWidget(elementEConfWidget)
# Element selection combo box
self.elementCB = qt.QComboBox()
self.elementCB.setMinimumWidth(100)
self.elementCB.addItems([''])
self.elementCB.currentIndexChanged['QString'].connect(self.getElementInfo)
elementLayout = qt.QHBoxLayout()
elementLayout.setContentsMargins(0,0,0,0)
elementLayout.addWidget(qt.QLabel('Element'))
elementLayout.addWidget(qt.HorizontalSpacer())
elementLayout.addWidget(self.elementCB)
elementWidget = qt.QWidget()
elementWidget.setLayout(elementLayout)
sampleLayout.addWidget(elementWidget)
# electron occupation number
self.electronOccupation = qt.QLineEdit('e.g. 3.14')
self.electronOccupation.setMaximumWidth(120)
electronOccupationValidator = qt.QDoubleValidator()
electronOccupationValidator.setBottom(0.)
electronOccupationValidator.setTop(14.)
self.electronOccupation.setValidator(electronOccupationValidator)
electronOccupationLayout = qt.QHBoxLayout()
electronOccupationLayout.setContentsMargins(0,0,0,0)
electronOccupationLayout.addWidget(qt.QLabel('Electron Occupation Number'))
electronOccupationLayout.addWidget(qt.HorizontalSpacer())
electronOccupationLayout.addWidget(self.electronOccupation)
electronOccupationLayout.addWidget(qt.VerticalSpacer())
electronOccupationWidget = qt.QWidget()
electronOccupationWidget.setLayout(electronOccupationLayout)
sampleLayout.addWidget(electronOccupationWidget)
# END sampleGB
# BEGIN absorptionGB: X-ray absorption edge
# selection combo box by transition (L3M1, etc.)
absorptionGB = qt.QGroupBox('X-ray absorption edges')
absorptionLayout = qt.QVBoxLayout()
absorptionGB.setLayout(absorptionLayout)
self.edge1CB = qt.QComboBox()
self.edge1CB.setMinimumWidth(100)
self.edge1CB.addItems([''])
self.edge1Line = LineEditDisplay(self.edge1CB)
edge1Layout = qt.QHBoxLayout()
edge1Layout.setContentsMargins(0,0,0,0)
edge1Layout.addWidget(qt.QLabel('Edge 1'))
edge1Layout.addWidget(qt.HorizontalSpacer())
edge1Layout.addWidget(self.edge1CB)
edge1Layout.addWidget(self.edge1Line)
edge1Widget = qt.QWidget()
edge1Widget.setLayout(edge1Layout)
absorptionLayout.addWidget(edge1Widget)
self.edge2CB = qt.QComboBox()
self.edge2CB.setMinimumWidth(100)
self.edge2CB.addItems([''])
self.edge2Line = LineEditDisplay(self.edge2CB)
edge2Layout = qt.QHBoxLayout()
edge2Layout.setContentsMargins(0,0,0,0)
edge2Layout.addWidget(qt.QLabel('Edge 2'))
edge2Layout.addWidget(qt.HorizontalSpacer())
edge2Layout.addWidget(self.edge2CB)
edge2Layout.addWidget(self.edge2Line)
edge2Widget = qt.QWidget()
edge2Widget.setLayout(edge2Layout)
absorptionLayout.addWidget(edge2Widget)
absorptionLayout.addWidget(qt.VerticalSpacer())
# END absorptionGB
# Combine sampleGB & absorptionGB in one Line
topLineLayout = qt.QHBoxLayout()
topLineLayout.setContentsMargins(0,0,0,0)
topLineLayout.addWidget(sampleGB)
topLineLayout.addWidget(absorptionGB)
topLine = qt.QWidget()
topLine.setLayout(topLineLayout)
# BEGIN tab layouting
elementTabLayout = qt.QVBoxLayout()
#elementTabLayout.addWidget(elementEConfWidget)
#elementTabLayout.addWidget(elementWidget)
#elementTabLayout.addWidget(electronOccupationWidget)
#elementTabLayout.addWidget(qt.QLabel('X-ray absorption edges'))
#elementTabLayout.addWidget(edge1Widget)
#elementTabLayout.addWidget(edge2Widget)
#elementTabLayout.addWidget(sampleGB)
#elementTabLayout.addWidget(absorptionGB)
elementTabLayout.addWidget(topLine)
elementTabLayout.addWidget(qt.VerticalSpacer())
elementTabWidget = qt.QWidget()
elementTabWidget.setLayout(elementTabLayout)
self.tabWidget.addTab(
elementTabWidget,
window.upper())
# END tab layouting
self.valuesDict[self.__tabElem]\
['element'] = self.elementCB
self.valuesDict[self.__tabElem]\
['electron shell'] = self.elementEConfCB
self.valuesDict[self.__tabElem]\
['electron occupation'] = self.electronOccupation
self.valuesDict[self.__tabElem]\
['edge1Transition'] = self.edge1CB
self.valuesDict[self.__tabElem]\
['edge2Transition'] = self.edge2CB
self.valuesDict[self.__tabElem]\
['edge1Energy'] = self.edge1Line
self.valuesDict[self.__tabElem]\
['edge2Energy'] = self.edge2Line
self.valuesDict[self.__tabElem]['info'] = {}
elif window == self.__tabBG:
# BEGIN Pre/Post edge group box
prePostLayout = qt.QGridLayout()
prePostLayout.setContentsMargins(0,0,0,0)
for idx, markerLabel in enumerate(self.xasMarkerList):
# TODO: Fix intial xpos
markerWidget, spinbox = self.addMarker(window=window,
label=markerLabel,
xpos=0.,
unit='[eV]')
self.valuesDict[self.__tabBG][markerLabel] = spinbox
markerWidget.setContentsMargins(0,-8,0,-8)
if idx == 0: posx, posy = 0,0
if idx == 1: posx, posy = 1,0
if idx == 2: posx, posy = 0,1
if idx == 3: posx, posy = 1,1
prePostLayout.addWidget(markerWidget, posx, posy)
prePostGB = qt.QGroupBox('Pre/Post edge')
prePostGB.setLayout(prePostLayout)
# END Pre/Post edge group box
# BEGIN Edge group box
numberOfEdges = 2
#addDelLayout = qt.QHBoxLayout()
#addDelLayout.setContentsMargins(0,0,0,0)
#buttonAdd = qt.QPushButton('Add')
#buttonDel = qt.QPushButton('Del')
#buttonAdd.clicked.connect(self.addEdgeMarker)
#buttonDel.clicked.connect(self.delEdgeMarker)
#addDelLayout.addWidget(qt.HorizontalSpacer())
#addDelLayout.addWidget(buttonAdd)
#addDelLayout.addWidget(buttonDel)
#addDelWidget = qt.QWidget()
#addDelWidget.setLayout(addDelLayout)
edgeLayout = qt.QVBoxLayout()
edgeLayout.setContentsMargins(0,0,0,0)
#edgeLayout.addWidget(addDelWidget)
for idx in range(numberOfEdges):
markerLabel = 'Edge %d'%(idx+1)
self.edgeMarkerList += [markerLabel]
markerWidget, spinbox = self.addMarker(window=window,
label=markerLabel,
xpos=0.,
unit='[eV]')
self.valuesDict[self.__tabBG][markerLabel] = spinbox
markerWidget.setContentsMargins(0,-8,0,-8)
edgeLayout.addWidget(markerWidget)
markerWidget.setEnabled(False)
edgeGB = qt.QGroupBox('Edge positions')
edgeGB.setLayout(edgeLayout)
# END Edge group box
# BEGIN Fit control group box
#stepRatio = qt.QLineEdit('0.66')
stepRatio = qt.QDoubleSpinBox()
stepRatio.setMaximumWidth(100)
stepRatio.setAlignment(qt.Qt.AlignRight)
#stepRatioValidator = qt.QDoubleValidator()
#stepRatio.setValidator(stepRatioValidator)
#stepRatioValidator.setBottom(0.)
#stepRatioValidator.setTop(1.)
stepRatio.setMinimum(0.)
stepRatio.setMaximum(1.)
stepRatio.setSingleStep(.025)
stepRatio.setValue(.5)
stepRatioLayout = qt.QHBoxLayout()
stepRatioLayout.addWidget(qt.QLabel('Step ratio'))
stepRatioLayout.addWidget(qt.HorizontalSpacer())
stepRatioLayout.addWidget(stepRatio)
stepRatioWidget = qt.QWidget()
stepRatioWidget.setContentsMargins(0,-8,0,-8)
stepRatioWidget.setLayout(stepRatioLayout)
#stepWidth = qt.QLineEdit('5.0')
stepWidth = qt.QDoubleSpinBox()
stepWidth.setMaximumWidth(100)
stepWidth.setAlignment(qt.Qt.AlignRight)
#stepWidthValidator = qt.QDoubleValidator()
#stepWidth.setValidator(stepWidthValidator)
#stepWidthValidator.setBottom(0.)
#stepWidthValidator.setTop(1.)
stepWidth.setMinimum(0.)
stepWidth.setMaximum(1.)
stepWidth.setSingleStep(.01)
stepWidth.setValue(.5)
modelWidthLineEdit = LineEditDisplay(
controller=stepWidth,
unit='eV')
self.modelWidthChangedSignal.connect(modelWidthLineEdit.setText)
stepWidthLayout = qt.QHBoxLayout()
stepWidthLayout.addWidget(qt.QLabel('Step width'))
stepWidthLayout.addWidget(qt.HorizontalSpacer())
stepWidthLayout.addWidget(modelWidthLineEdit)
stepWidthLayout.addWidget(stepWidth)
stepWidthWidget = qt.QWidget()
stepWidthWidget.setContentsMargins(0,-8,0,-8)
stepWidthWidget.setLayout(stepWidthLayout)
fitControlLayout = qt.QVBoxLayout()
fitControlLayout.addWidget(stepRatioWidget)
fitControlLayout.addWidget(stepWidthWidget)
fitControlGB = qt.QGroupBox('Background model control')
fitControlGB.setLayout(fitControlLayout)
# END Fit control group box
# Combine edge position and fit control in single line
sndLine = qt.QWidget()
sndLineLayout = qt.QHBoxLayout()
sndLineLayout.setContentsMargins(0,0,0,0)
sndLine.setLayout(sndLineLayout)
sndLineLayout.addWidget(edgeGB)
sndLineLayout.addWidget(fitControlGB)
# Insert into tab
backgroundTabLayout = qt.QVBoxLayout()
backgroundTabLayout.setContentsMargins(0,0,0,0)
backgroundTabLayout.addWidget(prePostGB)
#backgroundTabLayout.addWidget(edgeGB)
#backgroundTabLayout.addWidget(fitControlGB)
backgroundTabLayout.addWidget(sndLine)
backgroundTabLayout.addWidget(qt.VerticalSpacer())
backgroundWidget = qt.QWidget()
backgroundWidget.setLayout(backgroundTabLayout)
self.tabWidget.addTab(
backgroundWidget,
window.upper())
stepRatio.valueChanged['double'].connect(self.estimateBG)
stepWidth.valueChanged['double'].connect(self.estimateBG)
self.valuesDict[self.__tabBG]\
['Step Ratio'] = stepRatio
self.valuesDict[self.__tabBG]\
['Step Width'] = stepWidth
self.valuesDict[self.__tabBG]\
['Model Width'] = modelWidthLineEdit
elif window == self.__tabInt:
# BEGIN Integral marker groupbox
pqLayout = qt.QVBoxLayout()
pqLayout.setContentsMargins(0,0,0,0)
for markerLabel in self.xmcdMarkerList:
# TODO: Fix intial xpos
markerWidget, spinbox = self.addMarker(window=window,
label=markerLabel,
xpos=0.,
unit='[eV]')
self.valuesDict[self.__tabInt][markerLabel] = spinbox
markerWidget.setContentsMargins(0,-8,0,-8)
integralVal = qt.QLineEdit()
integralVal.setReadOnly(True)
integralVal.setMaximumWidth(120)
#spinbox.valueChanged['QString'].connect(self.getIntegralValue)
valLabel = qt.QLabel('Integral Value:')
mwLayout = markerWidget.layout()
mwLayout.addWidget(valLabel)
mwLayout.addWidget(integralVal)
pqLayout.addWidget(markerWidget)
spinbox.valueChanged.connect(self.calcMagneticMoments)
key = 'Integral ' + markerLabel
self.valuesDict[self.__tabInt][key] = integralVal
pqGB = qt.QGroupBox('XAS/XMCD integrals')
pqGB.setLayout(pqLayout)
# END Integral marker groupbox
# BEGIN magnetic moments groupbox
mmLayout = qt.QVBoxLayout()
mmLayout.setContentsMargins(0,0,0,0)
text = 'Orbital Magnetic Moment'
mmLineLayout = qt.QHBoxLayout()
self.mmOrbt = qt.QLineEdit()
self.mmOrbt.setReadOnly(True)
self.mmOrbt.setMaximumWidth(120)
mmLineLayout.addWidget(qt.QLabel(text))
mmLineLayout.addWidget(qt.HorizontalSpacer())
mmLineLayout.addWidget(qt.QLabel('mO = '))
mmLineLayout.addWidget(self.mmOrbt)
#self.triggerCalcmmLineLayout.setText
mmLineWidget = qt.QWidget()
mmLineWidget.setLayout(mmLineLayout)
mmLineWidget.setContentsMargins(0,-8,0,-8)
mmLayout.addWidget(mmLineWidget)
text = 'Spin Magnetic Moment'
mmLineLayout = qt.QHBoxLayout()
self.mmSpin = qt.QLineEdit()
self.mmSpin.setReadOnly(True)
self.mmSpin.setMaximumWidth(120)
mmLineLayout.addWidget(qt.QLabel(text))
mmLineLayout.addWidget(qt.HorizontalSpacer())
mmLineLayout.addWidget(qt.QLabel('mS = '))
mmLineLayout.addWidget(self.mmSpin)
#self.triggerCalcmmLineLayout.setText
mmLineWidget = qt.QWidget()
mmLineWidget.setLayout(mmLineLayout)
mmLineWidget.setContentsMargins(0,-8,0,-8)
mmLayout.addWidget(mmLineWidget)
text = 'Ratio Magnetic Moments'
mmLineLayout = qt.QHBoxLayout()
self.mmRatio = qt.QLineEdit()
self.mmRatio.setReadOnly(True)
self.mmRatio.setMaximumWidth(120)
mmLineLayout.addWidget(qt.QLabel(text))
mmLineLayout.addWidget(qt.HorizontalSpacer())
mmLineLayout.addWidget(qt.QLabel('mO/mS = '))
mmLineLayout.addWidget(self.mmRatio)
#self.triggerCalcmmLineLayout.setText
mmLineWidget = qt.QWidget()
mmLineWidget.setLayout(mmLineLayout)
mmLineWidget.setContentsMargins(0,-8,0,-8)
mmLayout.addWidget(mmLineWidget)
mmGB = qt.QGroupBox('Magnetic moments')
mmGB.setLayout(mmLayout)
# END magnetic moments groupbox
# BEGIN XMCD correction
self.xmcdDetrend = qt.QCheckBox()
self.xmcdDetrend.stateChanged['int'].connect(self.triggerDetrend)
xmcdDetrendLayout = qt.QVBoxLayout()
xmcdDetrendLayout.setContentsMargins(0,0,0,0)
xmcdDetrendLayout.addWidget(qt.QLabel(
'Detrend XMCD Signal (Subtracts linear fit of pre-edge Region from the signal)'))
#xmcdDetrendLayout.addWidget(qt.HorizontalSpacer())
xmcdDetrendLayout.addWidget(self.xmcdDetrend)
xmcdDetrendWidget = qt.QWidget()
xmcdDetrendWidget.setLayout(xmcdDetrendLayout)
xmcdDetrendGB = qt.QGroupBox('XMCD Data Preprocessing')
xmcdDetrendGB.setLayout(xmcdDetrendLayout)
xmcdDetrendLayout.addWidget(xmcdDetrendWidget)
# END XMCD correction
xmcdTabLayout = qt.QVBoxLayout()
xmcdTabLayout.addWidget(pqGB)
xmcdTabLayout.addWidget(mmGB)
xmcdTabLayout.addWidget(xmcdDetrendGB)
xmcdTabLayout.addWidget(qt.VerticalSpacer())
xmcdWidget = qt.QWidget()
xmcdWidget.setLayout(xmcdTabLayout)
self.tabWidget.addTab(
xmcdWidget,
window.upper())
self.valuesDict[self.__tabInt]\
['Orbital Magnetic Moment'] = self.mmOrbt
self.valuesDict[self.__tabInt]\
['Spin Magnetic Moment'] = self.mmSpin
self.valuesDict[self.__tabInt]\
['Ratio Magnetic Moments'] = self.mmRatio
self.valuesDict[self.__tabInt]\
['XMCD Detrend'] = self.xmcdDetrend
#self.tabWidget.addTab(markerWidget, window.upper())
# Add to self.valuesDict
self.tabWidget.currentChanged['int'].connect(
self._handleTabChangedSignal)
# Add/Remove marker Buttons
#buttonAddMarker = qt.QPushButton('Add')
#buttonDelMarker = qt.QPushButton('Del')
buttonEstimate = qt.QPushButton('Estimate')
#buttonEstimate.clicked.connect(self.estimatePrePostEdgePositions)
buttonEstimate.clicked.connect(self.estimate)
#buttonEstimate.clicked.connect(self.estimateBG)
self.plotWindow.graphBottomLayout.addWidget(qt.HorizontalSpacer())
#self.plotWindow.graphBottomLayout.addWidget(buttonAddMarker)
#self.plotWindow.graphBottomLayout.addWidget(buttonDelMarker)
self.plotWindow.graphBottomLayout.addWidget(buttonEstimate)
self.connect(self.plotWindow.graph,
qt.SIGNAL("QtBlissGraphSignal"),
self._handleGraphSignal)
#self.modelWidthChangedSignal['float'].connect(self._handleModelWidthChangedSignal)
# Layout
mainWidget = qt.QWidget()
mainLayout = qt.QVBoxLayout()
mainLayout.addWidget(self.plotWindow)
mainLayout.addWidget(self.tabWidget)
mainLayout.setContentsMargins(1,1,1,1)
mainWidget.setLayout(mainLayout)
#self.setLayout(mainLayout)
self.setCentralWidget(mainWidget)
#
# Data handling:
#
# Each is Tuple (x,y)
# type(x),type(y) == ndarray
self.xmcdData = None # XMCD Spectrum
self.xasData = None # XAS Spectrum
self.xasDataCorr = None # XAS minus Background model
self.xasDataBG = None # XAS Backgrouns
self.xmcdCorrData = None
# Integrated spectra: Notice that the shape
# diminished by one..
self.xmcdInt = None
self.xasInt = None
#
# File (name) handling
#
self.dataInputFilename = None
self.confFilename = None
self.baseFilename = None
tmpDict = {
# 'background' : {
# 'Pre Min': 658.02,
# 'Pre Max': 703.75,
# 'Post Min': 730.5,
# 'Post Max': 808.7,
# 'Edge 1': 721.44,
# 'Edge 2': 708.7,
# 'Step Ratio': 0.25,
# 'Step Width': 0.25
# },
# 'integration': {
# 'p': 717.3,
# 'q': 740.,
# 'r': 732.
# },
'element': {
'electron shell': '3d',
'electron occupation': '6.6',
'element': 'Fe',
'edge1Transition': 'L3M4',
'edge2Transition': 'L2M4'
}
}
self.setValuesDict(tmpDict)
self._createMenuBar()
def triggerDetrend(self, state):
if (state == qt.Qt.Unchecked) or\
(state == qt.Qt.PartiallyChecked):
print 'triggerDetrend -- Detrend unchecked'
# Replot original data
self.xmcdCorrData = None
else:
print 'triggerDetrend -- Trying to detrend XMCD Signal'
ddict = self.getValuesDict()
if self.xmcdData is None:
print 'triggerDetrend -- No xmcdData present!'
return
x, y = self.xmcdData
preMin = ddict[self.__tabBG][self.__preMin]
preMax = ddict[self.__tabBG][self.__preMax]
mask = numpy.nonzero((preMin <= x) & (x <= preMax))[0]
xFit = x.take(mask)
yFit = y.take(mask)
if (len(xFit) == 0) or (len(yFit) == 0):
return
# Fit linear model y = a*x + b
a, b = numpy.polyfit(xFit, yFit, 1)
print 'a, b =',a,',',b
trend = a*x + b
self.xmcdCorrData = (x, y-trend)
if self.getCurrentTab() == self.__tabInt:
self.plotOnDemand(self.__tabInt)
self.calcMagneticMoments()
def calcMagneticMoments(self):
print 'calcMM -- current tab:', self.tabWidget.currentIndex()
# 0. Get Marker intersections
ddict = self.valuesDict
pqr = []
mathObj = Mathematics()
for marker in self.xmcdMarkerList:
# TODO: Find better way to determine curves..
if marker in [self.__intP, self.__intQ]:
if self.xmcdCorrData is not None:
curve = 'xmcd corr Int Y'
else:
curve = 'xmcd Int Y'
else:
curve = 'xas Int Y'
spinbox = ddict[self.__tabInt][marker]
integralVals = spinbox.getIntersections()
x, y = integralVals.get(curve, (float('NaN'),float('NaN')))
key = 'Integral ' + marker
lineEdit = ddict[self.__tabInt][key]
lineEdit.setText(str(y))
pqr += [y]
# 1. Display intergral values
#def magneticMoment(self, p, q, r, n, econf = '3d'):
# 2. Calculate the moments
p, q, r = pqr
electronOccupation = ddict[self.__tabElem]['electron occupation']
try:
n = float(electronOccupation.text())
except ValueError:
print('calcMM -- Could not convert electron occupation')
return
mmO, mmS, mmR = mathObj.magneticMoment(p,q,r,n)
# 3. Display moments
self.mmOrbt.setText(str(mmO))
self.mmSpin.setText(str(mmS))
self.mmRatio.setText(str(mmR))
def getIntegralValues(self, pos):
dataList = [self.xmcdInt, self.xasInt]
res = float('NaN')
resList = [res] * len(dataList)
if not self.xmcdInt:
print 'getIntegralValues -- self.xmcdInt not present'
return
if not self.xasInt:
print 'getIntegralValues -- self.xasInt not present'
return
for listIdx, data in enumerate(dataList):
x, y = data
if numpy.all(pos < x) or numpy.all(x < pos):
print('getIntegralValues -- Marker position outside of data range')
continue
#raise ValueError('Marker outside of data range')
if pos in x:
idx = numpy.where(x == pos)
res = y[idx]
else:
# Intepolation needed, assume well
# behaved data (c.f. copy routine)
lesserIdx = numpy.nonzero(x < pos)[0][-1]
greaterIdx = numpy.nonzero(x > pos)[0][0]
dy = y[lesserIdx] - y[greaterIdx]
dx = x[lesserIdx] - x[greaterIdx]
res = dy/dx * (pos - x[lesserIdx]) + y[lesserIdx]
resList[listIdx] = res
#return res
print 'getIntegralValues -- Result:', resList
def _createMenuBar(self):
# Creates empty menu bar, if none existed before
menu = self.menuBar()
menu.clear()
#
# 'File' Menu
#
file = menu.addMenu('&File')
openAction = qt.QAction('&Open Spec File', self)
openAction.setShortcut(qt.Qt.CTRL+qt.Qt.Key_O)
openAction.setStatusTip('Opened file')
openAction.setToolTip('Opens a data file (*.spec)')
openAction.triggered.connect(self.loadData)
loadAction = qt.QAction('&Load Configuration', self)
loadAction.setShortcut(qt.Qt.CTRL+qt.Qt.Key_L)
loadAction.setStatusTip('Loaded analysis file')
loadAction.setToolTip('Loads an existing analysis file (*.sra)')
loadAction.triggered.connect(self.loadConfiguration)
saveConfAction = qt.QAction('&Save Configuration', self)
saveConfAction.setShortcut(qt.Qt.CTRL+qt.Qt.Key_S)
saveConfAction.setStatusTip('Saved analysis file')
saveConfAction.setToolTip('Save analysis in file (*.sra)')
saveConfAction.triggered.connect(self.saveConfiguration)
saveConfAsAction = qt.QAction('Save &Configuration as', self)
saveConfAsAction.setShortcut(qt.Qt.SHIFT+qt.Qt.CTRL+qt.Qt.Key_S)
saveConfAsAction.setStatusTip('Saved analysis file')
saveConfAsAction.setToolTip('Save analysis in file (*.sra)')
saveConfAsAction.triggered.connect(self.saveConfigurationAs)
saveDataAction = qt.QAction('Save &Data', self)
saveDataAction.setShortcut(qt.Qt.CTRL+qt.Qt.Key_D)
saveDataAction.setStatusTip('Saved analysis file')
saveDataAction.setToolTip('Save analysis in file (*.sra)')
saveDataAction.triggered.connect(self.saveData)
saveDataAsAction = qt.QAction('Save D&ata as', self)
saveDataAsAction.setShortcut(qt.Qt.SHIFT+qt.Qt.CTRL+qt.Qt.Key_D)
saveDataAsAction.setStatusTip('Saved analysis file')
saveDataAsAction.setToolTip('Save analysis in file (*.sra)')
saveDataAsAction.triggered.connect(self.saveDataAs)
# Populate the 'File' menu
for action in [openAction,
loadAction,
saveConfAction,
saveConfAsAction,
saveDataAction,
saveDataAsAction]:
file.addAction(action)
file.addAction('E&xit', self.close)
def loadData(self):
# Hier gehts weiter:
# 1. Lade beliebige spec Datei und ueberlasse es dem User auszuwaehlen,
# welche Spalten er auswaehlen moechte.
dial = LoadDichorismDataDialog()
dial.setDirectory(PyMcaDirs.outputDir)
if dial.exec_():
print 'Open clicked'
dataDict = dial.dataDict
else:
print 'Cancel clicked'
return
# Reset calculated data
self.xasDataCorr = None
self.xasDataBG = None
self.xmcdCorrData = None
self.xmcdInt = None
self.xasInt = None
# def setRawData(self, x, y, identifier):
x = dataDict['x']
xas = dataDict['xas']
xmcd = dataDict['xmcd']
self.dataInputFilename = dataDict['fn']
self.setRawData(x, xas, 'xas')
self.setRawData(x, xmcd, 'xmcd')
def saveDataAs(self):
self.baseFilename = None
self.__savedData = False
self.saveData()
def saveData(self):
# Saves spectral data that is calculated during
# the evaluation process:
# 1. BG Modell
# 2. XAS-BG
# 3./4. XAS/XMCD integrals
# Integral data goes into separate file
dataList = [self.xasData,
self.xasDataCorr,
self.xasDataBG,
self.xmcdInt,
self.xasInt]
if None in dataList:
msg = qt.QMessageBox()
msg.setWindowTitle('Sum Rules Analysis Error')
msg.setIcon(qt.QMessageBox.Warning)
msg.setText('Analysis incomplete!\nCannot save generated data')
msg.exec_()
return False
if self.__savedData and self.baseFilename:
pass
else:
ddict = self.getValuesDict()
saveDir = PyMcaDirs.outputDir
filter = 'spec File (*.spec);;All files (*.*)'
selectedFilter = 'Sum Rules Analysis files (*.spec)'
baseFilename = qt.QFileDialog.getSaveFileName(self,
'Save Sum Rule Analysis Data',
saveDir,
filter,
selectedFilter)
if len(baseFilename) == 0:
# Leave self.baseFilename as it is..
#self.baseFilename = None
return False
else:
self.baseFilename = str(baseFilename)
if self.baseFilename.endswith('.spec'):
# Append extension later
self.baseFilename.replace('.spec','')
# Create filenames
specFilename = self.baseFilename + '_specData.spec'
intFilename = self.baseFilename + '_intData.spec'
self.__savedData = False
# Acquire filehandles
try:
specFilehandle = open(specFilename, 'wb')
except IOError:
msg = qt.QMessageBox()
msg.setWindowTitle('Sum Rules Analysis Error')
msg.setIcon(qt.QMessageBox.Warning)
msg.setText('Unable to open file \'%s\''%specFilename)
msg.exec_()
return False
try:
intFilehandle = open(intFilename, 'wb')
except IOError:
msg = qt.QMessageBox()
msg.setWindowTitle('Sum Rules Analysis Error')
msg.setIcon(qt.QMessageBox.Warning)
msg.setText('Unable to open file \'%s\''%intFilename)
msg.exec_()
return False
title = 'Sum Rules Analysis on \'%s\''%self.dataInputFilename
header = '#S %d %s'%(1,title) + NEWLINE
delim = ' '
# 1. Background Modell, XAS, XAS-Background
# All share the same x-range
xSpec, yXas = self.xasData
xSpec, yBG = self.xasDataBG
xSpec, yXasBG = self.xasDataCorr
dataSpec = numpy.vstack((xSpec, yXas, yBG, yXasBG)).T
# 2. Integrals
# Also share the same x-range
xInt, yXasInt = self.xasInt
xInt, yXmcdInt = self.xmcdInt
xSpec, xasBG = self.xasDataCorr
dataInt = numpy.vstack((xInt, yXasInt, yXmcdInt)).T
outSpec = header
outSpec += '#N %d'%4 + NEWLINE
outSpec += '#L x XAS BG XAScorr' + NEWLINE
for line in dataSpec:
tmp = delim.join(['%f'%num for num in line])
outSpec += (tmp + NEWLINE)
outInt = header
outInt += '#N %d'%3 + NEWLINE
outInt += '#L x XAS Int XMCD Int' + NEWLINE
for line in dataInt:
tmp = delim.join(['%f'%num for num in line])
outSpec += (tmp + NEWLINE)
for (fh, output) in zip([specFilehandle, intFilehandle],
[outSpec, outInt]):
fh.write(NEWLINE)
fh.write(output)
fh.write(NEWLINE)
fh.close()
self.__savedData = True
return True
def saveConfigurationAs(self, shortcut=False):
self.confFilename = None
self.__savedConf = False
self.saveConfiguration()
def saveConfiguration(self):
ddict = self.getValuesDict()
if self.__savedConf and self.confFilename:
filename = self.confFilename
else:
saveDir = PyMcaDirs.outputDir
filter = 'Sum Rules Analysis files (*.sra);;All files (*.*)'
selectedFilter = 'Sum Rules Analysis files (*.sra)'
filename = qt.QFileDialog.getSaveFileName(self,
'Save Sum Rule Analysis Configuration',
saveDir,
filter,
selectedFilter)
if len(filename) == 0:
return False
else:
filename = str(filename)
if not filename.endswith('.sra'):
filename += '.sra'
self.confFilename = filename
self.__savedConf = False
confDict = ConfigDict.ConfigDict(self.getValuesDict())
try:
confDict.write(filename)
except IOError:
msg = qt.QMessageBox()
msg.setWindowTitle('Sum Rules Analysis Error')
msg.setIcon(qt.QMessageBox.Warning)
msg.setText('Unable to write configuration to \'%s\''%filename)
msg.exec_()
return False
self.__savedConf = True
return True
def loadConfiguration(self):
confDict = ConfigDict.ConfigDict()
ddict = self.getValuesDict()
loadDir = PyMcaDirs.outputDir
filter = 'Sum Rules Analysis files (*.sra);;All files (*.*)'
selectedFilter = 'Sum Rules Analysis files (*.sra)'
filename = qt.QFileDialog.getOpenFileName(self,
'Load Sum Rule Analysis Configuration',
loadDir,
filter,
selectedFilter)
if len(filename) == 0:
return
else:
filename = str(filename)
try:
confDict.read(filename)
except IOError:
msg = qt.QMessageBox()
msg.setTitle('Sum Rules Analysis Error')
msg.setIcon(qt.QMessageBox.Warning)
msg.setText('Unable to read configuration file \'%s\''%filename)
return
try:
self.setValuesDict(confDict)
#keysLoaded = confDict.keys()
#keysValues = self.valuesDict.keys()
except KeyError as e:
if DEBUG:
print('loadConfiguration -- Key Error in \'%s\''%filename)
print('\tMessage:', e)
else:
msg = qt.QMessageBox()
msg.setTitle('Sum Rules Analysis Error')
msg.setIcon(qt.QMessageBox.Warning)
msg.setText('Malformed configuration file \'%s\''%filename)
return
self.__savedConf = True
def close(self):
if not self.__savedConf:
msg = qt.QMessageBox()
msg.setWindowTitle('Sum Rules Tool')
msg.setIcon(qt.QMessageBox.Warning)
msg.setText('The configuration has changed!\nAre you shure you want to close the window?')
msg.setStandardButtons(qt.QMessageBox.Cancel | qt.QMessageBox.Discard)
if msg.exec_() == qt.QMessageBox.Cancel:
return
qt.sQMainWindow.close(self)
def _createStatusBar(self):
pass
def setElectronConf(self, eConf):
# updates the element combo box
eConf = str(eConf)
if len(eConf) == 0:
self.electronOccupation.setDisabled(True)
else:
self.electronOccupation.setDisabled(False)
self.elementCB.clear()
elementsList = self.elementsDict[eConf]
self.elementCB.addItems(['']+elementsList)
def getElementInfo(self, symbol):
ddict = {}
symbol = str(symbol)
if len(symbol) == 0:
self.valuesDict[self.__tabElem]['info'] = {}
return
try:
ddict = Elements.Element[symbol]
except KeyError:
msg = ('setElement -- \'%s\' not found in '%symbol)
msg += 'Elements.Element dictionary'
print(msg)
# Update valuesDict
self.valuesDict[self.__tabElem]['info'] = ddict
# Update the EdgeCBs
# Lookup all keys ending in 'xrays'
keys = [item for item in ddict.keys() if item.endswith('xrays')]
keys.sort()
# keys is list of list, flatten it..
transitions = sum([ddict[key] for key in keys],[])
# Only take transitions that occur in the experiment
transitions = [t for t in transitions if t in self.occuringTransitions]
tmpDict = dict( [(transition, ddict[transition]['energy']) for transition in transitions])
for cb, ed in [(self.edge1CB, self.edge1Line),
(self.edge2CB, self.edge2Line)]:
curr = cb.currentText()
cb.clear()
ed.clear()
ed.updateDict(tmpDict)
cb.addItems(['']+transitions)
# Try to set to old entry
idx = cb.findText(qt.QString(curr))
if idx < 0: idx = 0
cb.setCurrentIndex(idx)
def getCurrentTab(self):
idx = self.tabWidget.currentIndex()
return self.tabList[idx]
def getValuesDict(self):
ddict = {}
for tab, tabDict in self.valuesDict.items():
if tab not in ddict.keys():
ddict[tab] = {}
for key, obj in tabDict.items():
value = None
if isinstance(obj, MarkerSpinBox):
value = obj.value()
elif isinstance(obj, qt.QCheckBox):
state = obj.checkState()
if state == qt.Qt.Checked:
value = True
else:
# Also covers state == qt.Qt.PartiallyChecked
value = False
elif isinstance(obj, qt.QComboBox):
tmp = obj.currentText()
value = str(tmp)
elif isinstance(obj, LineEditDisplay) or\
isinstance(obj, qt.QLineEdit):
tmp = str(obj.text())
try:
value = float(tmp)
except ValueError:
value = tmp
elif isinstance(obj, qt.QDoubleSpinBox):
value = obj.value()
elif isinstance(obj, dict):
value = obj
ddict[tab][key] = value
return ddict
def setValuesDict(self, ddict):
markerList = (self.xasMarkerList + self.xmcdMarkerList)
elementList = (self.transitionMetals
+ self.rareEarths
+ self.electronConfs)
# Check as early as possible if element symbol is present
try:
symbol = ddict[self.__tabElem]['element']
self.getElementInfo(symbol)
except KeyError:
pass
for tab, tabDict in ddict.items():
if tab not in self.valuesDict.keys():
raise KeyError('setValuesDict -- Tab not found')
for key, value in tabDict.items():
print key
if not isinstance(key, str):
raise KeyError('setValuesDict -- Key is not str instance')
obj = self.valuesDict[tab][key]
if isinstance(obj, MarkerSpinBox):
try:
tmp = float(value)
obj.setValue(tmp)
except ValueError:
xmin, xmax = self.plotWindow.graph.getX1AxisLimits()
tmp = xmin + (xmax-xmin)/10.
if DEBUG:
msg = 'setValuesDict -- Float conversion failed'
msg += ' while setting marker positions. Value:', value
print(msg)
elif isinstance(obj, qt.QCheckBox):
if value == True:
state = qt.Qt.Checked
else:
state = qt.Qt.Unchecked
obj.setCheckState(state)
elif isinstance(obj, qt.QDoubleSpinBox):
try:
tmp = float(value)
obj.setValue(tmp)
except ValueError:
if DEBUG:
msg = 'setValuesDict -- Float conversion failed'
msg += ' while setting QDoubleSpinBox value. Value:', value
print(msg)
elif isinstance(obj, qt.QComboBox):
idx = obj.findText(qt.QString(value))
obj.setCurrentIndex(idx)
elif isinstance(obj, LineEditDisplay):
# Must be before isinstance(obj, qt.QLineEdit)
# since LineEditDisplay inherits from QLineEdit
#obj.checkComboBox()
obj.checkController()
elif isinstance(obj, qt.QLineEdit):
if value:
tmp = str(value)
obj.setText(tmp)
else:
obj.setText('???')
elif isinstance(obj, dict):
obj = value
else:
raise KeyError('setValuesDict -- \'%s\' not found'%key)
def addEdgeMarker(self):
print 'addEdgeMarker clicked'
def delEdgeMarker(self):
print 'delEdgeMarker clicked'
def setRawData(self, x, y, identifier):
if identifier not in ['xmcd', 'xas']:
msg = 'Identifier must either be \'xmcd\' or \'xas\''
raise ValueError(msg)
# Sort energy range
sortedIdx = x.argsort()
xSorted = x.take(sortedIdx)[:]
ySorted = y.take(sortedIdx)[:]
# Ensure strictly monotonically increasing energy range
dx = numpy.diff(x)
if not numpy.all(dx > 0.):
mask = numpy.nonzero(dx)
xSorted = numpy.take(xSorted, mask)
ySorted = numpy.take(ySorted, mask)
# Add spectrum to plotWindow using the
if identifier == 'xmcd':
self.xmcdData = (xSorted, ySorted)
#self.plotWindow.graph.mapToY2(intLegend)
elif identifier == 'xas':
self.xasData = (xSorted, ySorted)
# Trigger replot when data is added
currIdx = self.tabWidget.currentIndex()
self._handleTabChangedSignal(currIdx)
def estimate(self):
tab = self.getCurrentTab()
if tab == self.__tabBG:
self.estimatePrePostEdgePositions()
elif tab == self.__tabInt:
self.estimateInt()
else:
# Do nothing
pass
return
def estimatePrePostEdgePositions(self):
if self.xasData is None:
return
ddict = self.getValuesDict()
edgeList = [ddict[self.__tabElem]['edge1Energy'],
ddict[self.__tabElem]['edge2Energy']]
filterEdgeList = lambda x:\
float(x.replace('meV',''))\
if (len(x)>0 and x!='---')\
else 0.0
# Use list comprehension instead of map(filterEdgeList, edgeList)
edgeList = [filterEdgeList(edge) for edge in edgeList]
x, y = self.xasData
xLimMin, xLimMax = self.plotWindow.getGraphXLimits()
xMin = x[0]
xMax = x[-1]
xLen = xMax - xMin
xMiddle = .5 *(xMax + xMin)
# Average step length (Watch out for uneven data!)
xStep = (xMax + xMin) / float(len(x))
# Look for the index closest to the physical middle
mask = numpy.nonzero(x <= xMiddle)[0]
idxMid = mask[-1]
factor = 10./100.
edge1, edge2 = edgeList
maxEdge = max(edgeList)
minEdge = min(edgeList)
if minEdge == 0. and maxEdge == 0.:
# No edge set
preMax = xMiddle - factor*xLen
postMin = xMiddle + factor*xLen
edge1 = xMiddle
edge2 = 0.
elif minEdge == 0.:
# Single edge set
preMax = maxEdge - factor*xLen
postMin = maxEdge + factor*xLen
else:
# Two edges set
preMax = minEdge - factor*xLen
postMin = maxEdge + factor*xLen
ddict[self.__tabBG][self.__preMin] = max(xMin,xLimMin+xStep)
ddict[self.__tabBG][self.__preMax] = preMax
ddict[self.__tabBG][self.__postMin] = postMin
ddict[self.__tabBG][self.__postMax] = min(xMax,xLimMax-xStep)
ddict[self.__tabBG]['Edge 1'] = edge1
ddict[self.__tabBG]['Edge 2'] = edge2
self.setValuesDict(ddict)
self.estimateBG()
def estimateInt(self):
if self.xasDataCorr is None or\
self.xasInt is None or\
self.xmcdInt is None:
# Nothing to do...
return
ddict = self.getValuesDict()
x, y = self.xasData
xLimMin, xLimMax = self.plotWindow.getGraphXLimits()
xMin = x[0]
xMax = x[-1]
xLen = xMax - xMin
xMiddle = .5 *(xMax + xMin)
factor = 10./100.
postMin = ddict[self.__tabBG][self.__postMin]
postMax = ddict[self.__tabBG][self.__postMax]
edge1 = ddict[self.__tabBG]['Edge 1']
edge2 = ddict[self.__tabBG]['Edge 2']
# Estimate intP
if edge1 == 0.:
intP = edge2 + factor * xLen
elif edge2 == 0.:
intP = edge1 + factor * xLen
else:
intP = min(edge1, edge2) + factor * xLen
# Estimate intQ
intQ = postMin + factor * xLen
# Estimate intR
intR = postMax - factor * xLen
# Also estimate the p, q, r Markers:
ddict[self.__tabInt][self.__intP] = intP
ddict[self.__tabInt][self.__intQ] = intQ
ddict[self.__tabInt][self.__intR] = intR
self.setValuesDict(ddict)
def estimateBG(self, val=None):
if self.xasData is None:
return
if self.tabWidget.currentIndex() != 1:
# Only call from tab 1
return
# TODO: Remove all background curves
x, y = self.xasData
#self.estimatePrePostEdgePositions()
ddict = self.getValuesDict()
x01 = ddict[self.__tabBG]['Edge 1']
x02 = ddict[self.__tabBG]['Edge 2']
preMin = ddict[self.__tabBG][self.__preMin]
preMax = ddict[self.__tabBG][self.__preMax]
postMin = ddict[self.__tabBG][self.__postMin]
postMax = ddict[self.__tabBG][self.__postMax]
width = ddict[self.__tabBG]['Step Width']
ratio = ddict[self.__tabBG]['Step Ratio']
if preMin > preMax:
tmp = preMin
preMin = preMax
preMax = tmp
if postMin > postMax:
tmp = preMin
preMin = preMax
preMax = tmp
idxPre = numpy.nonzero((preMin <= x) & (x <= preMax))[0]
idxPost = numpy.nonzero((postMin <= x) & (x <= postMax))[0]
if (len(idxPre) == 0) or (len(idxPost) == 0):
if DEBUG:
print('estimateBG -- Somethings wrong with pre/post edge markers')
return
xPreMax = x[idxPre.max()]
xPostMin = x[idxPost.min()]
gap = abs(xPreMax - xPostMin)
avgPre = numpy.average(y[idxPre])
avgPost = numpy.average(y[idxPost])
bottom = min(avgPre,avgPost)
top = max(avgPre,avgPost)
if avgPost >= avgPre:
sign = 1.
erf = upstep
else:
sign = -1.
erf = downstep
diff = abs(avgPost - avgPre)
ymin = y.min()
ymax = y.max()
if x01 == 0.:
par1 = (ratio, x02, width*gap)
if DEBUG:
print('estimateBG -- x01 == 0, using par1: %s'%str(par1))
model = bottom + sign * diff * erf(par1, x)
elif x02 == 0.:
par1 = (ratio, x01, width*gap)
if DEBUG:
print('estimateBG -- x02 == 0, using par1:'%str(par1))
model = bottom + sign * diff * erf(par1, x)
elif x02 < x01:
par1 = (ratio, x02, width*gap)
par2 = ((1.-ratio), x01, width*gap)
if DEBUG:
print('estimateBG -- x02 < x01, using par1: %s and par2: %s'\
%(str(par1),str(par2)))
model = bottom + sign * diff * (erf(par1, x) + erf(par2, x))
else:
par1 = (ratio, x01, width*gap)
par2 = ((1.-ratio), x02, width*gap)
if DEBUG:
print('estimateBG -- x01 < x02, using par1: %s and par2: %s'\
%(str(par1),str(par2)))
model = bottom + sign * diff * (erf(par1, x) + erf(par2, x))
preModel = numpy.asarray(len(x)*[avgPre])
postModel = numpy.asarray(len(x)*[avgPost])
self.xasDataBG = x, model
self.plotWindow.addCurve(x,
model,
self.__xasBGmodel,
{},
replot=False)
self.plotWindow.addCurve(x,
preModel,
'Pre BG model',
{},
replot=False)
self.plotWindow.addCurve(x,
postModel,
'Post BG model',
{},
replot=False)
self.modelWidthChangedSignal.emit('%.3f'%(width*gap))
self.plotWindow.graph.replot()
def plotOnDemand(self, window, xlabel='ene_st', ylabel='zratio'):
# Remove all curves
legends = self.plotWindow.getAllCurves(just_legend=True)
self.plotWindow.removeCurves(legends, replot=False)
if (self.xmcdData is None) or (self.xasData is None):
# Nothing to do
return
xyList = []
mapToY2 = False
window = window.lower()
if window == self.__tabElem:
if self.xmcdCorrData is not None:
if DEBUG:
print('plotOnDemand -- __tabElem: Using self.xmcdCorrData')
xmcdX, xmcdY = self.xmcdCorrData
xmcdLabel = 'xmcd corr'
else:
if DEBUG:
print('plotOnDemand -- __tabElem: Using self.xmcdData')
xmcdX, xmcdY = self.xmcdData
xmcdLabel = 'xmcd'
xasX, xasY = self.xasData
xyList = [(xmcdX, xmcdY, xmcdLabel),
(xasX, xasY, 'xas')]
# At least one of the curve is going
# to get plotted on secondary y axis
mapToY2 = True
elif window == self.__tabBG:
xasX, xasY= self.xasData
xyList = [(xasX, xasY, 'xas')]
if self.xasDataBG is not None:
xasBGX, xasBGY = self.xasDataBG
xyList += [(xasBGX, xasBGY, self.__xasBGmodel)]
elif window == self.__tabInt:
if (self.xasDataBG is None):
self.xmcdInt = None
self.xasInt = None
return
# Calculate xasDataCorr
xBG, yBG = self.xasDataBG
x, y = self.xasData
self.xasDataCorr = x, y-yBG
if self.xmcdCorrData is not None:
if DEBUG:
print('plotOnDemand -- __tabInt: Using self.xmcdCorrData')
xmcdX, xmcdY = self.xmcdCorrData
xmcdIntLabel = 'xmcd corr Int'
else:
if DEBUG:
print('plotOnDemand -- __tabInt: Using self.xmcdData')
xmcdX, xmcdY = self.xmcdData
xmcdIntLabel = 'xmcd Int'
mathObj = Mathematics()
xasX, xasY = self.xasDataCorr
xmcdIntY = mathObj.cumtrapz(y=xmcdY, x=xmcdX)
xmcdIntX = .5 * (xmcdX[1:] + xmcdX[:-1])
xasIntY = mathObj.cumtrapz(y=xasY, x=xasX)
xasIntX = .5 * (xasX[1:] + xasX[:-1])
ylabel += ' integral'
xyList = [(xmcdIntX, xmcdIntY, xmcdIntLabel),
(xasX, xasY, 'xas corr'),
(xasIntX, xasIntY, 'xas Int')]
self.xmcdInt = xmcdIntX, xmcdIntY
self.xasInt = xasIntX, xasIntY
for x,y,legend in xyList:
self.plotWindow.newCurve(
x=x,
y=y,
legend=legend,
xlabel=xlabel,
#xlabel=xlabel,
ylabel=None,
#ylabel=ylabel,
info={},
replot=False,
replace=False)
if mapToY2:
specLegend = self.plotWindow.dataObjectsList[-1]
self.plotWindow.graph.mapToY2(specLegend)
mapToY2 = False
self.plotWindow.graph.replot()
def addMarker(self, window, label='X MARKER', xpos=None, unit=''):
# Add spinbox controlling the marker
spinbox = MarkerSpinBox(window, self.plotWindow, label)
# Connects
self.tabChangedSignal.connect(spinbox._setMarkerFollowMouse)
if len(unit) > 0:
text = label + ' ' + unit
else:
text = label
# Widget & Layout
spinboxWidget = qt.QWidget()
spinboxLayout = qt.QHBoxLayout()
spinboxLayout.addWidget(qt.QLabel(text))
spinboxLayout.addWidget(qt.HorizontalSpacer())
spinboxLayout.addWidget(spinbox)
spinboxWidget.setLayout(spinboxLayout)
return spinboxWidget, spinbox
def _handleGraphSignal(self, ddict):
#if 'marker' not in ddict:
if ddict['event'] == 'markerMoved':
if self.tabWidget.currentIndex() == 1: # 1 -> BG tab
self.estimateBG()
def _handleTabChangedSignal(self, idx):
if idx >= len(self.tabList):
print('Tab changed -- Index out of range')
return
tab = self.tabList[idx]
self.plotOnDemand(window=tab)
# Hide/Show markers depending on the selected tab
# Problem: MarkerLabels are stored in markerList,
# however the MarkerSpinBoxes are stores in
# self.valuesDict ...
# edgeMarkers & xasMarkers -> BACKGROUND tab
# xmcdMarker -> INTEGRATION tab
markerList = self.xasMarkerList\
+ self.edgeMarkerList\
+ self.xmcdMarkerList
if tab == self.__tabBG:
for marker in markerList:
if (marker in self.xasMarkerList) or\
(marker in self.edgeMarkerList):
sb = self.valuesDict[self.__tabBG][marker]
sb.showMarker()
else:
sb = self.valuesDict[self.__tabInt][marker]
sb.hideMarker()
keys = [key for key in self.valuesDict[self.__tabElem].keys()\
if key.endswith('Transition')]
ratioSB = self.valuesDict[self.__tabBG]['Step Ratio']
for idx, keyElem in enumerate(keys):
keyBG = 'Edge %d'%(idx+1)
sb = self.valuesDict[self.__tabBG][keyBG]
parentWidget = sb.parent()
# If edge combobox does not show empty string ''
transition = str(self.valuesDict[self.__tabElem]\
[keyElem].currentText())
if len(transition) > 0:
parentWidget.setEnabled(True)
else:
parentWidget.setEnabled(False)
sb.hideMarker()
ratioSB.setEnabled(False)
ratioSB.setValue(1.0)
elif tab == self.__tabInt:
for marker in markerList:
if marker in self.xmcdMarkerList:
sb = self.valuesDict[self.__tabInt][marker]
sb.showMarker()
else:
sb = self.valuesDict[self.__tabBG][marker]
#sb.setValue(0.0) # Should be consistent with estimateBG
sb.hideMarker()
else: # tab == self.__tabElem:
for marker in markerList:
if marker in self.xmcdMarkerList:
sb = self.valuesDict[self.__tabInt][marker]
else:
sb = self.valuesDict[self.__tabBG][marker]
sb.showMarker()
self.tabChangedSignal.emit(tab)
def keyPressEvent(self, event):
if event.key() == qt.Qt.Key_F2:
# Switch to tab Element
idx = self.tabList.index(self.__tabElem)
self.tabWidget.setCurrentIndex(idx)
elif event.key() == qt.Qt.Key_F3:
# Switch to tab Background
idx = self.tabList.index(self.__tabBG)
self.tabWidget.setCurrentIndex(idx)
elif event.key() == qt.Qt.Key_F4:
# Switch to tab Integration
idx = self.tabList.index(self.__tabInt)
self.tabWidget.setCurrentIndex(idx)
elif event.key() == qt.Qt.Key_F5:
# Trigger estimation
self.estimate()
else:
qt.QWidget.keyPressEvent(self, event)
def getData(fn='/home/truter/lab/datasets/sum_rules/Fe_L23/xld_analysis.spec'):
analysis = sf.specfile.Specfile(fn)
xmcdArr = []
xasArr = []
avgA, avgB = [], []
spec = analysis[0]
x = spec[0][:]
avgA = spec[1][:]
avgB = spec[2][:]
xmcdArr = spec[3][:]
xasArr = spec[4][:]
return x, avgA, avgB, xmcdArr, xasArr
class LoadDichorismDataDialog(qt.QFileDialog):
dataInputSignal = qt.pyqtSignal(object)
def __init__(self, parent=None):
#qt.QDialog.__init__(self, parent)
qt.QFileDialog.__init__(self, parent)
self.dataDict = {}
self.validated = False
self.setWindowTitle('Load Dichorism Data')
self.setFilter('Spec Files (*.spec);;'
+'All Files (*.*)')
# Take the QSpecFileWidget class as used
# in the main window to select data and
# insert it into a QFileDialog. Emit the
# selected data at acceptance
self.specFileWidget = QSpecFileWidget.QSpecFileWidget(
parent=parent,
autoreplace=False)
# Hide the widget containing the Auto Add/Replace
# checkboxes
self.specFileWidget.autoAddBox.parent().hide()
# Remove the tab widget, only the counter widget
# is needed. Remember: close() only hides a widget
# however the widget persists in the memory.
#self.specFileWidget.mainTab.removeTab(1)
self.specFileWidget.mainTab.hide()
#self.counterTab = self.specFileWidget.mainTab.widget(0)
self.specFileWidget.mainLayout.addWidget(self.specFileWidget.cntTable)
self.specFileWidget.cntTable.show()
# Change the table headers in cntTable
# Note: By conicidence, the original SpecFileCntTable
# has just enough columns as we need. Here, we rename
# the last two:
# 'y' -> 'XAS'
# 'mon' -> 'XMCD'
labels = ['Counter', 'X', 'XAS', 'XMCD']
table = self.specFileWidget.cntTable
for idx in range(len(labels)):
item = table.horizontalHeaderItem(idx)
if item is None:
item = qt.QTableWidgetItem(labels[idx],
qt.QTableWidgetItem.Type)
item.setText(labels[idx])
table.setHorizontalHeaderItem(idx,item)
# Hide the widget containing the Add, Replace, ...
# PushButtons
self.specFileWidget.buttonBox.hide()
# Change selection behavior/mode in the scan list so
# that only a single scan can be selected at a time
self.specFileWidget.list.setSelectionBehavior(qt.QAbstractItemView.SelectRows)
self.specFileWidget.list.setSelectionMode(qt.QAbstractItemView.SingleSelection)
# Tinker with the native layout of QFileDialog
mainLayout = self.layout()
mainLayout.addWidget(self.specFileWidget, 0, 4, 4, 1)
#
# Signals
#
self.currentChanged.connect(self.setDataSource)
def setDataSource(self, filename):
# Opens a spec file and allows to browse its
# contents in the top right widget
filename = str(filename)
if osPathIsDir(filename) or (not filename.endswith('.spec')):
return
src = SpecFileDataSource.SpecFileDataSource(filename)
self.specFileWidget.setDataSource(src)
def accept(self):
llist = self.selectedFiles()
if len(llist) == 1:
filename = str(llist[0])
else:
return
self.processSelectedFile(filename)
if self.validated:
qt.QDialog.accept(self)
def processSelectedFile(self, filename):
self.dataDict = {}
filename = str(filename)
if (not filename.endswith('.spec')):
return
scanList = self.specFileWidget.list.selectedItems()
if len(scanList) == 0:
self.errorMessageBox('No scan selected!')
return
else:
scan = scanList[0]
scanNo = str(scan.text(1))
table = self.specFileWidget.cntTable
# ddict['x'] -> 'X'
# ddict['y'] -> 'XAS'
# ddict['m'] -> 'XMCD'
ddict = table.getCounterSelection()
colX = ddict['x']
colXas = ddict['y']
colXmcd = ddict['m']
# Check if only one is selected
if len(colX) != 1:
self.errorMessageBox('Single counter must be set as X')
return
else:
colX = colX[0]
if len(colXas) != 1:
self.errorMessageBox('Single counter must be set as XAS')
return
else:
colXas = colXas[0]
if len(colXmcd) != 1:
self.errorMessageBox('Single counter must be set as XMCD')
return
else:
colXmcd = colXmcd[0]
if colXas == colX:
self.errorMessageBox('X and XAS use the same counter')
return
elif colX == colXmcd:
self.errorMessageBox('X and XMCD use the same counter')
return
elif colXmcd == colXas:
self.errorMessageBox('XAS and XMCD use the same counter')
return
# Extract data
dataObj = self.specFileWidget.data.getDataObject(scanNo)
# data has format (rows, cols) -> (steps, counters)
self.dataDict['fn'] = filename
self.dataDict['x'] = dataObj.data[:, colX]
self.dataDict['xas'] = dataObj.data[:, colXas]
self.dataDict['xmcd'] = dataObj.data[:, colXmcd]
self.validated = True
self.dataInputSignal.emit(self.dataDict)
#self.destroy() ?
#self.close()
def errorMessageBox(self, msg):
box = qt.QMessageBox()
box.setWindowTitle('Sum Rules Load Data Error')
box.setIcon(qt.QMessageBox.Warning)
box.setText(msg)
box.exec_()
if __name__ == '__main__':
app = qt.QApplication([])
win = SumRulesWindow()
x, avgA, avgB, xmcd, xas = getData()
#win.plotWindow.newCurve(x,xmcd, legend='xmcd', xlabel='ene_st', ylabel='zratio', info={}, replot=False, replace=False)
win.setRawData(x,xmcd, identifier='xmcd')
#win.plotWindow.newCurve(x,xas, legend='xas', xlabel='ene_st', ylabel='zratio', info={}, replot=False, replace=False)
win.setRawData(x,xas, identifier='xas')
#win = LoadDichorismDataDialog()
win.show()
app.exec_()
|
tonnrueter/pymca_devel
|
PyMca/PyMcaPlugins/SumRulesPlugin.py
|
Python
|
gpl-2.0
| 83,508
|
[
"TINKER"
] |
986d3851f1d23f17489095fbfffcc1354ed7941cde2e5fa681c270b4e050488d
|
from __future__ import print_function, division
import os
import tempfile
import mdtraj as md
import pandas as pd
import numpy as np
from numpy.testing import assert_approx_equal
from mdtraj.testing import eq
from sklearn.externals.joblib import load, dump
import sklearn.pipeline
from six import PY3
from msmbuilder.utils import map_drawn_samples
from msmbuilder import cluster
from msmbuilder.msm.core import _transition_counts
from msmbuilder.msm import MarkovStateModel
def test_counts_1():
# test counts matrix without trimming
model = MarkovStateModel(reversible_type=None, ergodic_cutoff=0)
model.fit([[1, 1, 1, 1, 1, 1, 1, 1, 1]])
eq(model.countsmat_, np.array([[8.0]]))
eq(model.mapping_, {1: 0})
def test_counts_2():
# test counts matrix with trimming
model = MarkovStateModel(reversible_type=None, ergodic_cutoff=1)
model.fit([[1, 1, 1, 1, 1, 1, 1, 1, 1, 2]])
eq(model.mapping_, {1: 0})
eq(model.countsmat_, np.array([[8]]))
def test_counts_3():
# test counts matrix scaling
seq = [1] * 4 + [2] * 4 + [1] * 4
model1 = MarkovStateModel(reversible_type=None, lag_time=2,
sliding_window=True).fit([seq])
model2 = MarkovStateModel(reversible_type=None, lag_time=2,
sliding_window=False).fit([seq])
model3 = MarkovStateModel(reversible_type=None, lag_time=2,
ergodic_cutoff='off').fit([seq])
eq(model1.countsmat_, model2.countsmat_)
eq(model1.countsmat_, model3.countsmat_)
eq(model2.countsmat_, model3.countsmat_)
def test_3():
model = MarkovStateModel(reversible_type='mle')
model.fit([[0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0]])
counts = np.array([[8, 1, 1], [1, 3, 0], [1, 0, 3]])
eq(model.countsmat_, counts)
assert np.sum(model.populations_) == 1.0
model.timescales_
# test pickleable
try:
dir = tempfile.mkdtemp()
fn = os.path.join(dir, 'test-msm-temp.npy')
dump(model, fn, compress=1)
model2 = load(fn)
eq(model2.timescales_, model.timescales_)
finally:
os.unlink(fn)
os.rmdir(dir)
def test_4():
data = [np.random.randn(10, 1), np.random.randn(100, 1)]
print(cluster.KMeans(n_clusters=3).fit_predict(data))
print(cluster.MiniBatchKMeans(n_clusters=3).fit_predict(data))
print(cluster.AffinityPropagation().fit_predict(data))
print(cluster.MeanShift().fit_predict(data))
print(cluster.SpectralClustering(n_clusters=2).fit_predict(data))
print(cluster.Ward(n_clusters=2).fit_predict(data))
def test_5():
# test score_ll
model = MarkovStateModel(reversible_type='mle')
sequence = ['a', 'a', 'b', 'b', 'a', 'a', 'b', 'b']
model.fit([sequence])
assert model.mapping_ == {'a': 0, 'b': 1}
score_aa = model.score_ll([['a', 'a']])
assert score_aa == np.log(model.transmat_[0, 0])
score_bb = model.score_ll([['b', 'b']])
assert score_bb == np.log(model.transmat_[1, 1])
score_ab = model.score_ll([['a', 'b']])
assert score_ab == np.log(model.transmat_[0, 1])
score_abb = model.score_ll([['a', 'b', 'b']])
assert score_abb == (np.log(model.transmat_[0, 1]) +
np.log(model.transmat_[1, 1]))
assert model.state_labels_ == ['a', 'b']
assert np.sum(model.populations_) == 1.0
def test_51():
# test score_ll
model = MarkovStateModel(reversible_type='mle')
sequence = ['a', 'a', 'b', 'b', 'a', 'a', 'b', 'b', 'c', 'c', 'c', 'a', 'a']
model.fit([sequence])
assert model.mapping_ == {'a': 0, 'b': 1, 'c': 2}
score_ac = model.score_ll([['a', 'c']])
assert score_ac == np.log(model.transmat_[0, 2])
def test_6():
# test score_ll with novel entries
model = MarkovStateModel(reversible_type='mle')
sequence = ['a', 'a', 'b', 'b', 'a', 'a', 'b', 'b']
model.fit([sequence])
assert not np.isfinite(model.score_ll([['c']]))
assert not np.isfinite(model.score_ll([['c', 'c']]))
assert not np.isfinite(model.score_ll([['a', 'c']]))
def test_7():
# test timescales
model = MarkovStateModel()
model.fit([[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1]])
assert np.all(np.isfinite(model.timescales_))
assert len(model.timescales_) == 1
model.fit([[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 2, 2, 0, 0]])
assert np.all(np.isfinite(model.timescales_))
assert len(model.timescales_) == 2
assert model.n_states_ == 3
model = MarkovStateModel(n_timescales=1)
model.fit([[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 2, 2, 0, 0]])
assert len(model.timescales_) == 1
model = MarkovStateModel(n_timescales=100)
model.fit([[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 2, 2, 0, 0]])
assert len(model.timescales_) == 2
assert np.sum(model.populations_) == 1.0
def test_8():
# test transform
model = MarkovStateModel()
model.fit([['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a']])
assert model.mapping_ == {'a': 0, 'b': 1, 'c': 2}
v = model.transform([['a', 'b', 'c']])
assert isinstance(v, list)
assert len(v) == 1
assert v[0].dtype == np.int
np.testing.assert_array_equal(v[0], [0, 1, 2])
v = model.transform([['a', 'b', 'c', 'd']], 'clip')
assert isinstance(v, list)
assert len(v) == 1
assert v[0].dtype == np.int
np.testing.assert_array_equal(v[0], [0, 1, 2])
v = model.transform([['a', 'b', 'c', 'd']], 'clip')
assert isinstance(v, list)
assert len(v) == 1
assert v[0].dtype == np.int
np.testing.assert_array_equal(v[0], [0, 1, 2])
v = model.transform([['a', 'b', 'c', 'd']], 'fill')
assert isinstance(v, list)
assert len(v) == 1
assert v[0].dtype == np.float
np.testing.assert_array_equal(v[0], [0, 1, 2, np.nan])
v = model.transform([['a', 'a', 'SPLIT', 'b', 'b', 'b']], 'clip')
assert isinstance(v, list)
assert len(v) == 2
assert v[0].dtype == np.int
assert v[1].dtype == np.int
np.testing.assert_array_equal(v[0], [0, 0])
np.testing.assert_array_equal(v[1], [1, 1, 1])
def test_9():
# what if the input data contains NaN? They should be ignored
model = MarkovStateModel(ergodic_cutoff=0)
seq = [0, 1, 0, 1, np.nan]
model.fit(seq)
assert model.n_states_ == 2
assert model.mapping_ == {0: 0, 1: 1}
if not PY3:
model = MarkovStateModel()
seq = [0, 1, 0, None, 0, 1]
model.fit(seq)
assert model.n_states_ == 2
assert model.mapping_ == {0: 0, 1: 1}
def test_10():
# test inverse transform
model = MarkovStateModel(reversible_type=None, ergodic_cutoff=0)
model.fit([['a', 'b', 'c', 'a', 'a', 'b']])
v = model.inverse_transform([[0, 1, 2]])
assert len(v) == 1
np.testing.assert_array_equal(v[0], ['a', 'b', 'c'])
def test_11():
# test sample
model = MarkovStateModel()
model.fit([[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 2, 2, 0, 0]])
sample = model.sample_discrete(n_steps=1000, random_state=0)
assert isinstance(sample, np.ndarray)
assert len(sample) == 1000
bc = np.bincount(sample)
diff = model.populations_ - (bc / np.sum(bc))
assert np.sum(np.abs(diff)) < 0.1
def test_12():
# test eigtransform
model = MarkovStateModel(n_timescales=1)
model.fit([[4, 3, 0, 0, 0, 1, 2, 1, 0, 0, 0, 1, 0, 1, 1, 2, 2, 0, 0]])
assert model.mapping_ == {0: 0, 1: 1, 2: 2}
assert len(model.eigenvalues_) == 2
t = model.eigtransform([[0, 1]], right=True)
assert t[0][0] == model.right_eigenvectors_[0, 1]
assert t[0][1] == model.right_eigenvectors_[1, 1]
s = model.eigtransform([[0, 1]], right=False)
assert s[0][0] == model.left_eigenvectors_[0, 1]
assert s[0][1] == model.left_eigenvectors_[1, 1]
def test_eigtransform_2():
model = MarkovStateModel(n_timescales=2)
traj = [4, 3, 0, 0, 0, 1, 2, 1, 0, 0, 0, 1, 0, 1, 1, 2, 2, 0, 0]
model.fit([traj])
transformed_0 = model.eigtransform([traj], mode='clip')
# clip off the first two states (not ergodic)
assert transformed_0[0].shape == (len(traj) - 2, model.n_timescales)
transformed_1 = model.eigtransform([traj], mode='fill')
assert transformed_1[0].shape == (len(traj), model.n_timescales)
assert np.all(np.isnan(transformed_1[0][:2, :]))
assert not np.any(np.isnan(transformed_1[0][2:]))
def test_13():
model = MarkovStateModel(n_timescales=2)
model.fit([[0, 0, 0, 1, 2, 1, 0, 0, 0, 1, 3, 3, 3, 1, 1, 2, 2, 0, 0]])
left_right = np.dot(model.left_eigenvectors_.T, model.right_eigenvectors_)
# check biorthonormal
np.testing.assert_array_almost_equal(
left_right,
np.eye(3))
# check that the stationary left eigenvector is normalized to be 1
np.testing.assert_almost_equal(model.left_eigenvectors_[:, 0].sum(), 1)
# the left eigenvectors satisfy <\phi_i, \phi_i>_{\mu^{-1}} = 1
for i in range(3):
np.testing.assert_almost_equal(
np.dot(model.left_eigenvectors_[:, i],
model.left_eigenvectors_[:, i] / model.populations_), 1)
# and that the right eigenvectors satisfy <\psi_i, \psi_i>_{\mu} = 1
for i in range(3):
np.testing.assert_almost_equal(
np.dot(model.right_eigenvectors_[:, i],
model.right_eigenvectors_[:, i] *
model.populations_), 1)
def test_14():
from msmbuilder.example_datasets import load_doublewell
from msmbuilder.cluster import NDGrid
from sklearn.pipeline import Pipeline
ds = load_doublewell(random_state=0)
p = Pipeline([
('ndgrid', NDGrid(n_bins_per_feature=100)),
('msm', MarkovStateModel(lag_time=100))
])
p.fit(ds.trajectories)
p.named_steps['msm'].summarize()
def test_sample_1():
# Test that the code actually runs and gives something non-crazy
# Make an ergodic dataset with two gaussian centers offset by 25 units.
chunk = np.random.normal(size=(20000, 3))
data = [np.vstack((chunk, chunk + 25)), np.vstack((chunk + 25, chunk))]
clusterer = cluster.KMeans(n_clusters=2)
msm = MarkovStateModel()
pipeline = sklearn.pipeline.Pipeline(
[("clusterer", clusterer), ("msm", msm)]
)
pipeline.fit(data)
trimmed_assignments = pipeline.transform(data)
# Now let's make make the output assignments start with
# zero at the first position.
i0 = trimmed_assignments[0][0]
if i0 == 1:
for m in trimmed_assignments:
m *= -1
m += 1
pairs = msm.draw_samples(trimmed_assignments, 2000)
samples = map_drawn_samples(pairs, data)
mu = np.mean(samples, axis=1)
eq(mu, np.array([[0., 0., 0.0], [25., 25., 25.]]), decimal=1)
# We should make sure we can sample from Trajectory objects too...
# Create a fake topology with 1 atom to match our input dataset
top = md.Topology.from_dataframe(
pd.DataFrame({
"serial": [0], "name": ["HN"], "element": ["H"], "resSeq": [1],
"resName": "RES", "chainID": [0]
}), bonds=np.zeros(shape=(0, 2), dtype='int')
)
# np.newaxis reshapes the data to have a 40000 frames, 1 atom, 3 xyz
trajectories = [md.Trajectory(x[:, np.newaxis], top)
for x in data]
trj_samples = map_drawn_samples(pairs, trajectories)
mu = np.array([t.xyz.mean(0)[0] for t in trj_samples])
eq(mu, np.array([[0., 0., 0.0], [25., 25., 25.]]), decimal=1)
def test_score_1():
# test that GMRQ is equal to the sum of the first n eigenvalues,
# when testing and training on the same dataset.
sequence = [0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1,
0, 0, 0, 1, 2, 2, 2, 1, 1, 1, 0, 0]
for n in [0, 1, 2]:
model = MarkovStateModel(verbose=False, n_timescales=n)
model.fit([sequence])
assert_approx_equal(model.score([sequence]), model.eigenvalues_.sum())
assert_approx_equal(model.score([sequence]), model.score_)
|
rmcgibbo/msmbuilder
|
msmbuilder/tests/test_msm.py
|
Python
|
lgpl-2.1
| 11,980
|
[
"Gaussian",
"MDTraj"
] |
aa7d110b13ead67ca9f5db4b393083448cf35503b50165822f22c145c087e886
|
#!/usr/bin/env python
########################################################################
# File : dirac-wms-get-queue-cpu-time.py
# Author : Federico Stagni
########################################################################
""" Report CPU length of queue, in seconds
This script is used by the dirac-pilot script to set the CPUTime left, which is a limit for the matching
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.registerSwitch("C:", "CPUNormalizationFactor=", "CPUNormalizationFactor, in case it is known")
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s [option|cfgfile]' % Script.scriptName]))
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
CPUNormalizationFactor = 0.0
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ("C", "CPUNormalizationFactor"):
CPUNormalizationFactor = float(unprocSw[1])
if __name__ == "__main__":
from DIRAC.WorkloadManagementSystem.Client.CPUNormalization import getCPUTime
cpuTime = getCPUTime(CPUNormalizationFactor)
# I hate this kind of output... PhC
print "CPU time left determined as", cpuTime
DIRAC.exit(0)
|
andresailer/DIRAC
|
WorkloadManagementSystem/scripts/dirac-wms-get-queue-cpu-time.py
|
Python
|
gpl-3.0
| 1,263
|
[
"DIRAC"
] |
b435e719c4a20e42eb6be8b1c62d9f50128fec80186d10eeea963c56cb8190f2
|
"""
.. _statsrefmanual:
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. currentmodule:: scipy.stats
This module contains a large number of probability distributions,
summary and frequency statistics, correlation functions and statistical
tests, masked statistics, kernel density estimation, quasi-Monte Carlo
functionality, and more.
Statistics is a very large area, and there are topics that are out of scope
for SciPy and are covered by other packages. Some of the most important ones
are:
- `statsmodels <https://www.statsmodels.org/stable/index.html>`__:
regression, linear models, time series analysis, extensions to topics
also covered by ``scipy.stats``.
- `Pandas <https://pandas.pydata.org/>`__: tabular data, time series
functionality, interfaces to other statistical languages.
- `PyMC3 <https://docs.pymc.io/>`__: Bayesian statistical
modeling, probabilistic machine learning.
- `scikit-learn <https://scikit-learn.org/>`__: classification, regression,
model selection.
- `Seaborn <https://seaborn.pydata.org/>`__: statistical data visualization.
- `rpy2 <https://rpy2.github.io/>`__: Python to R bridge.
Probability distributions
=========================
Each univariate distribution is an instance of a subclass of `rv_continuous`
(`rv_discrete` for discrete distributions):
.. autosummary::
:toctree: generated/
rv_continuous
rv_discrete
rv_histogram
Continuous distributions
------------------------
.. autosummary::
:toctree: generated/
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
argus -- Argus
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr (Type III)
burr12 -- Burr (Type XII)
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
crystalball -- Crystalball
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponnorm -- Exponentially Modified Normal
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
genlogistic -- Generalized Logistic
gennorm -- Generalized normal
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
genhyperbolic -- Generalized Hyperbolic
geninvgauss -- Generalized Inverse Gaussian
gilbrat -- Gilbrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
halfgennorm -- Generalized Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
kappa4 -- Kappa 4 parameter
kappa3 -- Kappa 3 parameter
ksone -- Distribution of Kolmogorov-Smirnov one-sided test statistic
kstwo -- Distribution of Kolmogorov-Smirnov two-sided test statistic
kstwobign -- Limiting Distribution of scaled Kolmogorov-Smirnov two-sided test statistic.
laplace -- Laplace
laplace_asymmetric -- Asymmetric Laplace
levy -- Levy
levy_l
levy_stable
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
loguniform -- Log-Uniform
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
moyal -- Moyal
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
norm -- Normal (Gaussian)
norminvgauss -- Normal Inverse Gaussian
pareto -- Pareto
pearson3 -- Pearson type III
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
rayleigh -- Rayleigh
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
skewcauchy -- Skew Cauchy
skewnorm -- Skew normal
studentized_range -- Studentized Range
t -- Student's T
trapezoid -- Trapezoidal
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
vonmises_line -- Von-Mises (Line)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
Multivariate distributions
--------------------------
.. autosummary::
:toctree: generated/
multivariate_normal -- Multivariate normal distribution
matrix_normal -- Matrix normal distribution
dirichlet -- Dirichlet
wishart -- Wishart
invwishart -- Inverse Wishart
multinomial -- Multinomial distribution
special_ortho_group -- SO(N) group
ortho_group -- O(N) group
unitary_group -- U(N) group
random_correlation -- random correlation matrices
multivariate_t -- Multivariate t-distribution
multivariate_hypergeom -- Multivariate hypergeometric distribution
Discrete distributions
----------------------
.. autosummary::
:toctree: generated/
bernoulli -- Bernoulli
betabinom -- Beta-Binomial
binom -- Binomial
boltzmann -- Boltzmann (Truncated Discrete Exponential)
dlaplace -- Discrete Laplacian
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
nbinom -- Negative Binomial
nchypergeom_fisher -- Fisher's Noncentral Hypergeometric
nchypergeom_wallenius -- Wallenius's Noncentral Hypergeometric
nhypergeom -- Negative Hypergeometric
planck -- Planck (Discrete Exponential)
poisson -- Poisson
randint -- Discrete Uniform
skellam -- Skellam
yulesimon -- Yule-Simon
zipf -- Zipf (Zeta)
zipfian -- Zipfian
An overview of statistical functions is given below. Many of these functions
have a similar version in `scipy.stats.mstats` which work for masked arrays.
Summary statistics
==================
.. autosummary::
:toctree: generated/
describe -- Descriptive statistics
gmean -- Geometric mean
hmean -- Harmonic mean
kurtosis -- Fisher or Pearson kurtosis
mode -- Modal value
moment -- Central moment
skew -- Skewness
kstat --
kstatvar --
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin --
tmax --
tstd --
tsem --
variation -- Coefficient of variation
find_repeats
trim_mean
gstd -- Geometric Standard Deviation
iqr
sem
bayes_mvs
mvsdist
entropy
differential_entropy
median_absolute_deviation
median_abs_deviation
bootstrap
Frequency statistics
====================
.. autosummary::
:toctree: generated/
cumfreq
itemfreq
percentileofscore
scoreatpercentile
relfreq
.. autosummary::
:toctree: generated/
binned_statistic -- Compute a binned statistic for a set of data.
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
Correlation functions
=====================
.. autosummary::
:toctree: generated/
f_oneway
alexandergovern
pearsonr
spearmanr
pointbiserialr
kendalltau
weightedtau
somersd
linregress
siegelslopes
theilslopes
multiscale_graphcorr
Statistical tests
=================
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
cramervonmises
cramervonmises_2samp
power_divergence
kstest
ks_1samp
ks_2samp
epps_singleton_2samp
mannwhitneyu
tiecorrect
rankdata
ranksums
wilcoxon
kruskal
friedmanchisquare
brunnermunzel
combine_pvalues
jarque_bera
page_trend_test
.. autosummary::
:toctree: generated/
ansari
bartlett
levene
shapiro
anderson
anderson_ksamp
binom_test
binomtest
fligner
median_test
mood
skewtest
kurtosistest
normaltest
Quasi-Monte Carlo
=================
.. toctree::
:maxdepth: 4
stats.qmc
Masked statistics functions
===========================
.. toctree::
stats.mstats
Other statistical functionality
===============================
Transformations
---------------
.. autosummary::
:toctree: generated/
boxcox
boxcox_normmax
boxcox_llf
yeojohnson
yeojohnson_normmax
yeojohnson_llf
obrientransform
sigmaclip
trimboth
trim1
zmap
zscore
Statistical distances
---------------------
.. autosummary::
:toctree: generated/
wasserstein_distance
energy_distance
Random variate generation / CDF Inversion
=========================================
.. autosummary::
:toctree: generated/
rvs_ratio_uniforms
NumericalInverseHermite
Circular statistical functions
------------------------------
.. autosummary::
:toctree: generated/
circmean
circvar
circstd
Contingency table functions
---------------------------
.. autosummary::
:toctree: generated/
chi2_contingency
contingency.crosstab
contingency.expected_freq
contingency.margins
contingency.relative_risk
contingency.association
fisher_exact
barnard_exact
boschloo_exact
Plot-tests
----------
.. autosummary::
:toctree: generated/
ppcc_max
ppcc_plot
probplot
boxcox_normplot
yeojohnson_normplot
Univariate and multivariate kernel density estimation
-----------------------------------------------------
.. autosummary::
:toctree: generated/
gaussian_kde
Warnings used in :mod:`scipy.stats`
-----------------------------------
.. autosummary::
:toctree: generated/
F_onewayConstantInputWarning
F_onewayBadInputSizesWarning
PearsonRConstantInputWarning
PearsonRNearConstantInputWarning
SpearmanRConstantInputWarning
"""
from .stats import *
from .distributions import *
from .morestats import *
from ._binomtest import binomtest
from ._binned_statistic import *
from .kde import gaussian_kde
from . import mstats
from . import qmc
from ._multivariate import *
from . import contingency
from .contingency import chi2_contingency
from ._bootstrap import bootstrap
from ._entropy import *
from ._hypotests import *
from ._rvs_sampling import rvs_ratio_uniforms, NumericalInverseHermite
from ._page_trend_test import page_trend_test
from ._mannwhitneyu import mannwhitneyu
__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders.
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
e-q/scipy
|
scipy/stats/__init__.py
|
Python
|
bsd-3-clause
| 12,444
|
[
"Gaussian"
] |
97097865763060ca772ab2f1630ea864184b7f7bf8a27da3d8b84bd41a357f24
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This module has methods for parsing names and versions of packages from URLs.
The idea is to allow package creators to supply nothing more than the
download location of the package, and figure out version and name information
from there.
**Example:** when spack is given the following URL:
https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.12/src/hdf-4.2.12.tar.gz
It can figure out that the package name is ``hdf``, and that it is at version
``4.2.12``. This is useful for making the creation of packages simple: a user
just supplies a URL and skeleton code is generated automatically.
Spack can also figure out that it can most likely download 4.2.6 at this URL:
https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.6/src/hdf-4.2.6.tar.gz
This is useful if a user asks for a package at a particular version number;
spack doesn't need anyone to tell it where to get the tarball even though
it's never been told about that version before.
"""
import os
import re
from six import StringIO
from six.moves.urllib.parse import urlsplit, urlunsplit
import llnl.util.tty as tty
from llnl.util.tty.color import colorize
import spack.error
import spack.util.compression as comp
from spack.version import Version
#
# Note: We call the input to most of these functions a "path" but the functions
# work on paths and URLs. There's not a good word for both of these, but
# "path" seemed like the most generic term.
#
def find_list_urls(url):
r"""Find good list URLs for the supplied URL.
By default, returns the dirname of the archive path.
Provides special treatment for the following websites, which have a
unique list URL different from the dirname of the download URL:
========= =======================================================
GitHub https://github.com/<repo>/<name>/releases
GitLab https://gitlab.\*/<repo>/<name>/tags
BitBucket https://bitbucket.org/<repo>/<name>/downloads/?tab=tags
CRAN https://\*.r-project.org/src/contrib/Archive/<name>
========= =======================================================
Parameters:
url (str): The download URL for the package
Returns:
set: One or more list URLs for the package
"""
url_types = [
# GitHub
# e.g. https://github.com/llnl/callpath/archive/v1.0.1.tar.gz
(r'(.*github\.com/[^/]+/[^/]+)',
lambda m: m.group(1) + '/releases'),
# GitLab API endpoint
# e.g. https://gitlab.dkrz.de/api/v4/projects/k202009%2Flibaec/repository/archive.tar.gz?sha=v1.0.2
(r'(.*gitlab[^/]+)/api/v4/projects/([^/]+)%2F([^/]+)',
lambda m: m.group(1) + '/' + m.group(2) + '/' + m.group(3) + '/tags'),
# GitLab non-API endpoint
# e.g. https://gitlab.dkrz.de/k202009/libaec/uploads/631e85bcf877c2dcaca9b2e6d6526339/libaec-1.0.0.tar.gz
(r'(.*gitlab[^/]+/(?!api/v4/projects)[^/]+/[^/]+)',
lambda m: m.group(1) + '/tags'),
# BitBucket
# e.g. https://bitbucket.org/eigen/eigen/get/3.3.3.tar.bz2
(r'(.*bitbucket.org/[^/]+/[^/]+)',
lambda m: m.group(1) + '/downloads/?tab=tags'),
# CRAN
# e.g. https://cran.r-project.org/src/contrib/Rcpp_0.12.9.tar.gz
# e.g. https://cloud.r-project.org/src/contrib/rgl_0.98.1.tar.gz
(r'(.*\.r-project\.org/src/contrib)/([^_]+)',
lambda m: m.group(1) + '/Archive/' + m.group(2)),
]
list_urls = set([os.path.dirname(url)])
for pattern, fun in url_types:
match = re.search(pattern, url)
if match:
list_urls.add(fun(match))
return list_urls
def strip_query_and_fragment(path):
try:
components = urlsplit(path)
stripped = components[:3] + (None, None)
query, frag = components[3:5]
suffix = ''
if query:
suffix += '?' + query
if frag:
suffix += '#' + frag
return (urlunsplit(stripped), suffix)
except ValueError:
tty.debug("Got error parsing path %s" % path)
return (path, '') # Ignore URL parse errors here
def strip_version_suffixes(path):
"""Some tarballs contain extraneous information after the version:
* ``bowtie2-2.2.5-source``
* ``libevent-2.0.21-stable``
* ``cuda_8.0.44_linux.run``
These strings are not part of the version number and should be ignored.
This function strips those suffixes off and returns the remaining string.
The goal is that the version is always the last thing in ``path``:
* ``bowtie2-2.2.5``
* ``libevent-2.0.21``
* ``cuda_8.0.44``
Args:
path (str): The filename or URL for the package
Returns:
str: The ``path`` with any extraneous suffixes removed
"""
# NOTE: This could be done with complicated regexes in parse_version_offset
# NOTE: The problem is that we would have to add these regexes to the end
# NOTE: of every single version regex. Easier to just strip them off
# NOTE: permanently
suffix_regexes = [
# Download type
r'[Ii]nstall',
r'all',
r'code',
r'[Ss]ources?',
r'file',
r'full',
r'single',
r'with[a-zA-Z_-]+',
r'rock',
r'src(_0)?',
r'public',
r'bin',
r'binary',
r'run',
r'[Uu]niversal',
r'jar',
r'complete',
r'dynamic',
r'oss',
r'gem',
r'tar',
r'sh',
# Download version
r'release',
r'bin',
r'stable',
r'[Ff]inal',
r'rel',
r'orig',
r'dist',
r'\+',
# License
r'gpl',
# Arch
# Needs to come before and after OS, appears in both orders
r'ia32',
r'intel',
r'amd64',
r'linux64',
r'x64',
r'64bit',
r'x86[_-]64',
r'i586_64',
r'x86',
r'i[36]86',
r'ppc64(le)?',
r'armv?(7l|6l|64)',
# Other
r'cpp',
r'gtk',
r'incubating',
# OS
r'[Ll]inux(_64)?',
r'LINUX',
r'[Uu]ni?x',
r'[Ss]un[Oo][Ss]',
r'[Mm]ac[Oo][Ss][Xx]?',
r'[Oo][Ss][Xx]',
r'[Dd]arwin(64)?',
r'[Aa]pple',
r'[Ww]indows',
r'[Ww]in(64|32)?',
r'[Cc]ygwin(64|32)?',
r'[Mm]ingw',
r'centos',
# Arch
# Needs to come before and after OS, appears in both orders
r'ia32',
r'intel',
r'amd64',
r'linux64',
r'x64',
r'64bit',
r'x86[_-]64',
r'i586_64',
r'x86',
r'i[36]86',
r'ppc64(le)?',
r'armv?(7l|6l|64)?',
# PyPI
r'[._-]py[23].*\.whl',
r'[._-]cp[23].*\.whl',
r'[._-]win.*\.exe',
]
for regex in suffix_regexes:
# Remove the suffix from the end of the path
# This may be done multiple times
path = re.sub(r'[._-]?' + regex + '$', '', path)
return path
def strip_name_suffixes(path, version):
"""Most tarballs contain a package name followed by a version number.
However, some also contain extraneous information in-between the name
and version:
* ``rgb-1.0.6``
* ``converge_install_2.3.16``
* ``jpegsrc.v9b``
These strings are not part of the package name and should be ignored.
This function strips the version number and any extraneous suffixes
off and returns the remaining string. The goal is that the name is
always the last thing in ``path``:
* ``rgb``
* ``converge``
* ``jpeg``
Args:
path (str): The filename or URL for the package
version (str): The version detected for this URL
Returns:
str: The ``path`` with any extraneous suffixes removed
"""
# NOTE: This could be done with complicated regexes in parse_name_offset
# NOTE: The problem is that we would have to add these regexes to every
# NOTE: single name regex. Easier to just strip them off permanently
suffix_regexes = [
# Strip off the version and anything after it
# name-ver
# name_ver
# name.ver
r'[._-][rvV]?' + str(version) + '.*',
# namever
r'V?' + str(version) + '.*',
# Download type
r'install',
r'[Ss]rc',
r'(open)?[Ss]ources?',
r'[._-]open',
r'[._-]archive',
r'[._-]std',
r'[._-]bin',
r'Software',
# Download version
r'release',
r'snapshot',
r'distrib',
r'everywhere',
r'latest',
# Arch
r'Linux(64)?',
r'x86_64',
# VCS
r'0\+bzr',
# License
r'gpl',
# Needs to come before and after gpl, appears in both orders
r'[._-]x11',
r'gpl',
]
for regex in suffix_regexes:
# Remove the suffix from the end of the path
# This may be done multiple times
path = re.sub('[._-]?' + regex + '$', '', path)
return path
def split_url_extension(path):
"""Some URLs have a query string, e.g.:
1. https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true
2. http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin.tar.gz
3. https://gitlab.kitware.com/vtk/vtk/repository/archive.tar.bz2?ref=v7.0.0
In (1), the query string needs to be stripped to get at the
extension, but in (2) & (3), the filename is IN a single final query
argument.
This strips the URL into three pieces: ``prefix``, ``ext``, and ``suffix``.
The suffix contains anything that was stripped off the URL to
get at the file extension. In (1), it will be ``'?raw=true'``, but
in (2), it will be empty. In (3) the suffix is a parameter that follows
after the file extension, e.g.:
1. ``('https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7', '.tgz', '?raw=true')``
2. ``('http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin', '.tar.gz', None)``
3. ``('https://gitlab.kitware.com/vtk/vtk/repository/archive', '.tar.bz2', '?ref=v7.0.0')``
"""
prefix, ext, suffix = path, '', ''
# Strip off sourceforge download suffix.
# e.g. https://sourceforge.net/projects/glew/files/glew/2.0.0/glew-2.0.0.tgz/download
match = re.search(r'(.*(?:sourceforge\.net|sf\.net)/.*)(/download)$', path)
if match:
prefix, suffix = match.groups()
ext = comp.extension(prefix)
if ext is not None:
prefix = comp.strip_extension(prefix)
else:
prefix, suf = strip_query_and_fragment(prefix)
ext = comp.extension(prefix)
prefix = comp.strip_extension(prefix)
suffix = suf + suffix
if ext is None:
ext = ''
return prefix, ext, suffix
def determine_url_file_extension(path):
"""This returns the type of archive a URL refers to. This is
sometimes confusing because of URLs like:
(1) https://github.com/petdance/ack/tarball/1.93_02
Where the URL doesn't actually contain the filename. We need
to know what type it is so that we can appropriately name files
in mirrors.
"""
match = re.search(r'github.com/.+/(zip|tar)ball/', path)
if match:
if match.group(1) == 'zip':
return 'zip'
elif match.group(1) == 'tar':
return 'tar.gz'
prefix, ext, suffix = split_url_extension(path)
return ext
def parse_version_offset(path):
"""Try to extract a version string from a filename or URL.
Args:
path (str): The filename or URL for the package
Returns:
tuple of (Version, int, int, int, str): A tuple containing:
version of the package,
first index of version,
length of version string,
the index of the matching regex
the matching regex
Raises:
UndetectableVersionError: If the URL does not match any regexes
"""
original_path = path
# path: The prefix of the URL, everything before the ext and suffix
# ext: The file extension
# suffix: Any kind of query string that begins with a '?'
path, ext, suffix = split_url_extension(path)
# stem: Everything from path after the final '/'
original_stem = os.path.basename(path)
# Try to strip off anything after the version number
stem = strip_version_suffixes(original_stem)
# Assumptions:
#
# 1. version always comes after the name
# 2. separators include '-', '_', and '.'
# 3. names can contain A-Z, a-z, 0-9, '+', separators
# 4. versions can contain A-Z, a-z, 0-9, separators
# 5. versions always start with a digit
# 6. versions are often prefixed by a 'v' or 'r' character
# 7. separators are most reliable to determine name/version boundaries
# List of the following format:
#
# [
# (regex, string),
# ...
# ]
#
# The first regex that matches string will be used to determine
# the version of the package. Thefore, hyperspecific regexes should
# come first while generic, catch-all regexes should come last.
# With that said, regular expressions are slow, so if possible, put
# ones that only catch one or two URLs at the bottom.
version_regexes = [
# 1st Pass: Simplest case
# Assume name contains no digits and version contains no letters
# e.g. libpng-1.6.27
(r'^[a-zA-Z+._-]+[._-]v?(\d[\d._-]*)$', stem),
# 2nd Pass: Version only
# Assume version contains no letters
# ver
# e.g. 3.2.7, 7.0.2-7, v3.3.0, v1_6_3
(r'^v?(\d[\d._-]*)$', stem),
# 3rd Pass: No separator characters are used
# Assume name contains no digits
# namever
# e.g. turbolinux702, nauty26r7
(r'^[a-zA-Z+]*(\d[\da-zA-Z]*)$', stem),
# 4th Pass: A single separator character is used
# Assume name contains no digits
# name-name-ver-ver
# e.g. panda-2016-03-07, gts-snapshot-121130, cdd-061a
(r'^[a-zA-Z+-]*(\d[\da-zA-Z-]*)$', stem),
# name_name_ver_ver
# e.g. tinyxml_2_6_2, boost_1_55_0, tbb2017_20161128
(r'^[a-zA-Z+_]*(\d[\da-zA-Z_]*)$', stem),
# name.name.ver.ver
# e.g. prank.source.150803, jpegsrc.v9b, atlas3.11.34, geant4.10.01.p03
(r'^[a-zA-Z+.]*(\d[\da-zA-Z.]*)$', stem),
# 5th Pass: Two separator characters are used
# Name may contain digits, version may contain letters
# name-name-ver.ver
# e.g. m4-1.4.17, gmp-6.0.0a, launchmon-v1.0.2
(r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z.]*)$', stem),
# name-name-ver_ver
# e.g. icu4c-57_1
(r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z_]*)$', stem),
# name_name_ver.ver
# e.g. superlu_dist_4.1, pexsi_v0.9.0
(r'^[a-zA-Z\d+_]+_v?(\d[\da-zA-Z.]*)$', stem),
# name_name.ver.ver
# e.g. fer_source.v696
(r'^[a-zA-Z\d+_]+\.v?(\d[\da-zA-Z.]*)$', stem),
# name_ver-ver
# e.g. Bridger_r2014-12-01
(r'^[a-zA-Z\d+]+_r?(\d[\da-zA-Z-]*)$', stem),
# name-name-ver.ver-ver.ver
# e.g. sowing-1.1.23-p1, bib2xhtml-v3.0-15-gf506, 4.6.3-alpha04
(r'^(?:[a-zA-Z\d+-]+-)?v?(\d[\da-zA-Z.-]*)$', stem),
# namever.ver-ver.ver
# e.g. go1.4-bootstrap-20161024
(r'^[a-zA-Z+]+v?(\d[\da-zA-Z.-]*)$', stem),
# 6th Pass: All three separator characters are used
# Name may contain digits, version may contain letters
# name_name-ver.ver
# e.g. the_silver_searcher-0.32.0, sphinx_rtd_theme-0.1.10a0
(r'^[a-zA-Z\d+_]+-v?(\d[\da-zA-Z.]*)$', stem),
# name.name_ver.ver-ver.ver
# e.g. TH.data_1.0-8, XML_3.98-1.4
(r'^[a-zA-Z\d+.]+_v?(\d[\da-zA-Z.-]*)$', stem),
# name-name-ver.ver_ver.ver
# e.g. pypar-2.1.5_108
(r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z._]*)$', stem),
# name.name_name-ver.ver
# e.g. tap.py-1.6, backports.ssl_match_hostname-3.5.0.1
(r'^[a-zA-Z\d+._]+-v?(\d[\da-zA-Z.]*)$', stem),
# name-namever.ver_ver.ver
# e.g. STAR-CCM+11.06.010_02
(r'^[a-zA-Z+-]+(\d[\da-zA-Z._]*)$', stem),
# name-name_name-ver.ver
# e.g. PerlIO-utf8_strict-0.002
(r'^[a-zA-Z\d+_-]+-v?(\d[\da-zA-Z.]*)$', stem),
# 7th Pass: Specific VCS
# bazaar
# e.g. libvterm-0+bzr681
(r'bzr(\d[\da-zA-Z._-]*)$', stem),
# 8th Pass: Query strings
# e.g. https://gitlab.cosma.dur.ac.uk/api/v4/projects/swift%2Fswiftsim/repository/archive.tar.gz?sha=v0.3.0
# e.g. https://gitlab.kitware.com/api/v4/projects/icet%2Ficet/repository/archive.tar.bz2?sha=IceT-2.1.1
# e.g. http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0
# e.g. http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1
# e.g. https://software.broadinstitute.org/gatk/download/auth?package=GATK-archive&version=3.8-1-0-gf15c1c3ef
(r'[?&](?:sha|ref|version)=[a-zA-Z\d+-]*[_-]?v?(\d[\da-zA-Z._-]*)$', suffix), # noqa: E501
# e.g. http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz
# e.g. http://laws-green.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.1_r20171213193219.tgz
# e.g. https://evtgen.hepforge.org/downloads?f=EvtGen-01.07.00.tar.gz
# e.g. http://wwwpub.zih.tu-dresden.de/%7Emlieber/dcount/dcount.php?package=otf&get=OTF-1.12.5salmon.tar.gz
(r'[?&](?:filename|f|get)=[a-zA-Z\d+-]+[_-]v?(\d[\da-zA-Z.]*)', stem),
# 9th Pass: Version in path
# github.com/repo/name/releases/download/vver/name
# e.g. https://github.com/nextflow-io/nextflow/releases/download/v0.20.1/nextflow
(r'github\.com/[^/]+/[^/]+/releases/download/[a-zA-Z+._-]*v?(\d[\da-zA-Z._-]*)/', path), # noqa: E501
# e.g. ftp://ftp.ncbi.nlm.nih.gov/blast/executables/legacy.NOTSUPPORTED/2.2.26/ncbi.tar.gz
(r'(\d[\da-zA-Z._-]*)/[^/]+$', path),
]
for i, version_regex in enumerate(version_regexes):
regex, match_string = version_regex
match = re.search(regex, match_string)
if match and match.group(1) is not None:
version = match.group(1)
start = match.start(1)
# If we matched from the stem or suffix, we need to add offset
offset = 0
if match_string is stem:
offset = len(path) - len(original_stem)
elif match_string is suffix:
offset = len(path)
if ext:
offset += len(ext) + 1 # .tar.gz is converted to tar.gz
start += offset
return version, start, len(version), i, regex
raise UndetectableVersionError(original_path)
def parse_version(path):
"""Try to extract a version string from a filename or URL.
Args:
path (str): The filename or URL for the package
Returns:
spack.version.Version: The version of the package
Raises:
UndetectableVersionError: If the URL does not match any regexes
"""
version, start, length, i, regex = parse_version_offset(path)
return Version(version)
def parse_name_offset(path, v=None):
"""Try to determine the name of a package from its filename or URL.
Args:
path (str): The filename or URL for the package
v (str): The version of the package
Returns:
tuple of (str, int, int, int, str): A tuple containing:
name of the package,
first index of name,
length of name,
the index of the matching regex
the matching regex
Raises:
UndetectableNameError: If the URL does not match any regexes
"""
original_path = path
# We really need to know the version of the package
# This helps us prevent collisions between the name and version
if v is None:
try:
v = parse_version(path)
except UndetectableVersionError:
# Not all URLs contain a version. We still want to be able
# to determine a name if possible.
v = 'unknown'
# path: The prefix of the URL, everything before the ext and suffix
# ext: The file extension
# suffix: Any kind of query string that begins with a '?'
path, ext, suffix = split_url_extension(path)
# stem: Everything from path after the final '/'
original_stem = os.path.basename(path)
# Try to strip off anything after the package name
stem = strip_name_suffixes(original_stem, v)
# List of the following format:
#
# [
# (regex, string),
# ...
# ]
#
# The first regex that matches string will be used to determine
# the name of the package. Thefore, hyperspecific regexes should
# come first while generic, catch-all regexes should come last.
# With that said, regular expressions are slow, so if possible, put
# ones that only catch one or two URLs at the bottom.
name_regexes = [
# 1st Pass: Common repositories
# GitHub: github.com/repo/name/
# e.g. https://github.com/nco/nco/archive/4.6.2.tar.gz
(r'github\.com/[^/]+/([^/]+)', path),
# GitLab API endpoint: gitlab.*/api/v4/projects/NAMESPACE%2Fname/
# e.g. https://gitlab.cosma.dur.ac.uk/api/v4/projects/swift%2Fswiftsim/repository/archive.tar.gz?sha=v0.3.0
(r'gitlab[^/]+/api/v4/projects/[^/]+%2F([^/]+)', path),
# GitLab non-API endpoint: gitlab.*/repo/name/
# e.g. http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0
(r'gitlab[^/]+/(?!api/v4/projects)[^/]+/([^/]+)', path),
# Bitbucket: bitbucket.org/repo/name/
# e.g. https://bitbucket.org/glotzer/hoomd-blue/get/v1.3.3.tar.bz2
(r'bitbucket\.org/[^/]+/([^/]+)', path),
# PyPI: pypi.(python.org|io)/packages/source/first-letter/name/
# e.g. https://pypi.python.org/packages/source/m/mpmath/mpmath-all-0.19.tar.gz
# e.g. https://pypi.io/packages/source/b/backports.ssl_match_hostname/backports.ssl_match_hostname-3.5.0.1.tar.gz
(r'pypi\.(?:python\.org|io)/packages/source/[A-Za-z\d]/([^/]+)', path),
# 2nd Pass: Query strings
# ?filename=name-ver.ver
# e.g. http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz
(r'\?filename=([A-Za-z\d+-]+)$', stem),
# ?f=name-ver.ver
# e.g. https://evtgen.hepforge.org/downloads?f=EvtGen-01.07.00.tar.gz
(r'\?f=([A-Za-z\d+-]+)$', stem),
# ?package=name
# e.g. http://wwwpub.zih.tu-dresden.de/%7Emlieber/dcount/dcount.php?package=otf&get=OTF-1.12.5salmon.tar.gz
(r'\?package=([A-Za-z\d+-]+)', stem),
# ?package=name-version
(r'\?package=([A-Za-z\d]+)', suffix),
# download.php
# e.g. http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1
(r'([^/]+)/download.php$', path),
# 3rd Pass: Name followed by version in archive
(r'^([A-Za-z\d+\._-]+)$', stem),
]
for i, name_regex in enumerate(name_regexes):
regex, match_string = name_regex
match = re.search(regex, match_string)
if match:
name = match.group(1)
start = match.start(1)
# If we matched from the stem or suffix, we need to add offset
offset = 0
if match_string is stem:
offset = len(path) - len(original_stem)
elif match_string is suffix:
offset = len(path)
if ext:
offset += len(ext) + 1 # .tar.gz is converted to tar.gz
start += offset
return name, start, len(name), i, regex
raise UndetectableNameError(original_path)
def parse_name(path, ver=None):
"""Try to determine the name of a package from its filename or URL.
Args:
path (str): The filename or URL for the package
ver (str): The version of the package
Returns:
str: The name of the package
Raises:
UndetectableNameError: If the URL does not match any regexes
"""
name, start, length, i, regex = parse_name_offset(path, ver)
return name
def parse_name_and_version(path):
"""Try to determine the name of a package and extract its version
from its filename or URL.
Args:
path (str): The filename or URL for the package
Returns:
tuple of (str, Version)A tuple containing:
The name of the package
The version of the package
Raises:
UndetectableVersionError: If the URL does not match any regexes
UndetectableNameError: If the URL does not match any regexes
"""
ver = parse_version(path)
name = parse_name(path, ver)
return (name, ver)
def insensitize(string):
"""Change upper and lowercase letters to be case insensitive in
the provided string. e.g., 'a' becomes '[Aa]', 'B' becomes
'[bB]', etc. Use for building regexes."""
def to_ins(match):
char = match.group(1)
return '[%s%s]' % (char.lower(), char.upper())
return re.sub(r'([a-zA-Z])', to_ins, string)
def cumsum(elts, init=0, fn=lambda x: x):
"""Return cumulative sum of result of fn on each element in elts."""
sums = []
s = init
for i, e in enumerate(elts):
sums.append(s)
s += fn(e)
return sums
def find_all(substring, string):
"""Returns a list containing the indices of
every occurrence of substring in string."""
occurrences = []
index = 0
while index < len(string):
index = string.find(substring, index)
if index == -1:
break
occurrences.append(index)
index += len(substring)
return occurrences
def substitution_offsets(path):
"""This returns offsets for substituting versions and names in the
provided path. It is a helper for :func:`substitute_version`.
"""
# Get name and version offsets
try:
ver, vs, vl, vi, vregex = parse_version_offset(path)
name, ns, nl, ni, nregex = parse_name_offset(path, ver)
except UndetectableNameError:
return (None, -1, -1, (), ver, vs, vl, (vs,))
except UndetectableVersionError:
try:
name, ns, nl, ni, nregex = parse_name_offset(path)
return (name, ns, nl, (ns,), None, -1, -1, ())
except UndetectableNameError:
return (None, -1, -1, (), None, -1, -1, ())
# Find the index of every occurrence of name and ver in path
name_offsets = find_all(name, path)
ver_offsets = find_all(ver, path)
return (name, ns, nl, name_offsets,
ver, vs, vl, ver_offsets)
def wildcard_version(path):
"""Find the version in the supplied path, and return a regular expression
that will match this path with any version in its place.
"""
# Get version so we can replace it with a wildcard
version = parse_version(path)
# Split path by versions
vparts = path.split(str(version))
# Replace each version with a generic capture group to find versions
# and escape everything else so it's not interpreted as a regex
result = r'(\d.*)'.join(re.escape(vp) for vp in vparts)
return result
def substitute_version(path, new_version):
"""Given a URL or archive name, find the version in the path and
substitute the new version for it. Replace all occurrences of
the version *if* they don't overlap with the package name.
Simple example:
.. code-block:: python
substitute_version('http://www.mr511.de/software/libelf-0.8.13.tar.gz', '2.9.3')
>>> 'http://www.mr511.de/software/libelf-2.9.3.tar.gz'
Complex example:
.. code-block:: python
substitute_version('https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.12/src/hdf-4.2.12.tar.gz', '2.3')
>>> 'https://www.hdfgroup.org/ftp/HDF/releases/HDF2.3/src/hdf-2.3.tar.gz'
"""
(name, ns, nl, noffs,
ver, vs, vl, voffs) = substitution_offsets(path)
new_path = ''
last = 0
for vo in voffs:
new_path += path[last:vo]
new_path += str(new_version)
last = vo + vl
new_path += path[last:]
return new_path
def color_url(path, **kwargs):
"""Color the parts of the url according to Spack's parsing.
Colors are:
| Cyan: The version found by :func:`parse_version_offset`.
| Red: The name found by :func:`parse_name_offset`.
| Green: Instances of version string from :func:`substitute_version`.
| Magenta: Instances of the name (protected from substitution).
Args:
path (str): The filename or URL for the package
errors (bool): Append parse errors at end of string.
subs (bool): Color substitutions as well as parsed name/version.
"""
errors = kwargs.get('errors', False)
subs = kwargs.get('subs', False)
(name, ns, nl, noffs,
ver, vs, vl, voffs) = substitution_offsets(path)
nends = [no + nl - 1 for no in noffs]
vends = [vo + vl - 1 for vo in voffs]
nerr = verr = 0
out = StringIO()
for i in range(len(path)):
if i == vs:
out.write('@c')
verr += 1
elif i == ns:
out.write('@r')
nerr += 1
elif subs:
if i in voffs:
out.write('@g')
elif i in noffs:
out.write('@m')
out.write(path[i])
if i == vs + vl - 1:
out.write('@.')
verr += 1
elif i == ns + nl - 1:
out.write('@.')
nerr += 1
elif subs:
if i in vends or i in nends:
out.write('@.')
if errors:
if nerr == 0:
out.write(" @r{[no name]}")
if verr == 0:
out.write(" @r{[no version]}")
if nerr == 1:
out.write(" @r{[incomplete name]}")
if verr == 1:
out.write(" @r{[incomplete version]}")
return colorize(out.getvalue())
class UrlParseError(spack.error.SpackError):
"""Raised when the URL module can't parse something correctly."""
def __init__(self, msg, path):
super(UrlParseError, self).__init__(msg)
self.path = path
class UndetectableVersionError(UrlParseError):
"""Raised when we can't parse a version from a string."""
def __init__(self, path):
super(UndetectableVersionError, self).__init__(
"Couldn't detect version in: " + path, path)
class UndetectableNameError(UrlParseError):
"""Raised when we can't parse a package name from a string."""
def __init__(self, path):
super(UndetectableNameError, self).__init__(
"Couldn't parse package name in: " + path, path)
|
rspavel/spack
|
lib/spack/spack/url.py
|
Python
|
lgpl-2.1
| 31,271
|
[
"BLAST",
"HOOMD-blue",
"VTK"
] |
b4954db5173718405ad7051c7f7275a43acea2db13b43bed09edc1cf48bc0aac
|
#!/usr/bin/env python
# This shows how to probe a dataset with a plane. The probed data is
# then contoured.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Read data.
pl3d = vtk.vtkPLOT3DReader()
pl3d.SetXYZFileName(VTK_DATA_ROOT + "/Data/combxyz.bin")
pl3d.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
# We create three planes and position them in the correct position
# using transform filters. They are then appended together and used as
# a probe.
plane = vtk.vtkPlaneSource()
plane.SetResolution(50, 50)
transP1 = vtk.vtkTransform()
transP1.Translate(3.7, 0.0, 28.37)
transP1.Scale(5, 5, 5)
transP1.RotateY(90)
tpd1 = vtk.vtkTransformPolyDataFilter()
tpd1.SetInputConnection(plane.GetOutputPort())
tpd1.SetTransform(transP1)
outTpd1 = vtk.vtkOutlineFilter()
outTpd1.SetInputConnection(tpd1.GetOutputPort())
mapTpd1 = vtk.vtkPolyDataMapper()
mapTpd1.SetInputConnection(outTpd1.GetOutputPort())
tpd1Actor = vtk.vtkActor()
tpd1Actor.SetMapper(mapTpd1)
tpd1Actor.GetProperty().SetColor(0, 0, 0)
transP2 = vtk.vtkTransform()
transP2.Translate(9.2, 0.0, 31.20)
transP2.Scale(5, 5, 5)
transP2.RotateY(90)
tpd2 = vtk.vtkTransformPolyDataFilter()
tpd2.SetInputConnection(plane.GetOutputPort())
tpd2.SetTransform(transP2)
outTpd2 = vtk.vtkOutlineFilter()
outTpd2.SetInputConnection(tpd2.GetOutputPort())
mapTpd2 = vtk.vtkPolyDataMapper()
mapTpd2.SetInputConnection(outTpd2.GetOutputPort())
tpd2Actor = vtk.vtkActor()
tpd2Actor.SetMapper(mapTpd2)
tpd2Actor.GetProperty().SetColor(0, 0, 0)
transP3 = vtk.vtkTransform()
transP3.Translate(13.27, 0.0, 33.30)
transP3.Scale(5, 5, 5)
transP3.RotateY(90)
tpd3 = vtk.vtkTransformPolyDataFilter()
tpd3.SetInputConnection(plane.GetOutputPort())
tpd3.SetTransform(transP3)
outTpd3 = vtk.vtkOutlineFilter()
outTpd3.SetInputConnection(tpd3.GetOutputPort())
mapTpd3 = vtk.vtkPolyDataMapper()
mapTpd3.SetInputConnection(outTpd3.GetOutputPort())
tpd3Actor = vtk.vtkActor()
tpd3Actor.SetMapper(mapTpd3)
tpd3Actor.GetProperty().SetColor(0, 0, 0)
appendF = vtk.vtkAppendPolyData()
appendF.AddInput(tpd1.GetOutput())
appendF.AddInput(tpd2.GetOutput())
appendF.AddInput(tpd3.GetOutput())
# The vtkProbeFilter takes two inputs. One is a dataset to use as the
# probe geometry (SetInput); the other is the data to probe
# (SetSource). The output dataset structure (geometry and topology) of
# the probe is the same as the structure of the input. The probing
# process generates new data values resampled from the source.
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(appendF.GetOutputPort())
probe.SetSource(pl3d.GetOutput())
contour = vtk.vtkContourFilter()
contour.SetInputConnection(probe.GetOutputPort())
contour.GenerateValues(50, pl3d.GetOutput().GetScalarRange())
contourMapper = vtk.vtkPolyDataMapper()
contourMapper.SetInputConnection(contour.GetOutputPort())
contourMapper.SetScalarRange(pl3d.GetOutput().GetScalarRange())
planeActor = vtk.vtkActor()
planeActor.SetMapper(contourMapper)
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputConnection(pl3d.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0, 0, 0)
# Create the RenderWindow, Renderer and both Actors
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(outlineActor)
ren.AddActor(planeActor)
ren.AddActor(tpd1Actor)
ren.AddActor(tpd2Actor)
ren.AddActor(tpd3Actor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(400, 400)
ren.ResetCamera()
cam1 = ren.GetActiveCamera()
cam1.SetClippingRange(3.95297, 50)
cam1.SetFocalPoint(8.88908, 0.595038, 29.3342)
cam1.SetPosition(-12.3332, 31.7479, 41.2387)
cam1.SetViewUp(0.060772, -0.319905, 0.945498)
iren.Initialize()
renWin.Render()
iren.Start()
|
CMUSV-VisTrails/WorkflowRecommendation
|
examples/vtk_examples/VisualizationAlgorithms/probeComb.py
|
Python
|
bsd-3-clause
| 4,014
|
[
"VTK"
] |
b0a73daffe9f74c229e89ee6097679d114af8a0571a01af2bd4a6d2e26d8c430
|
#===============================================================================
# LICENSE XOT-Framework - CC BY-NC-ND
#===============================================================================
# This work is licenced under the Creative Commons
# Attribution-Non-Commercial-No Derivative Works 3.0 Unported License. To view a
# copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/3.0/
# or send a letter to Creative Commons, 171 Second Street, Suite 300,
# San Francisco, California 94105, USA.
#===============================================================================
import datetime
import time
import random
import xbmc
import xbmcgui
from helpers import htmlentityhelper
from helpers import prefixhelper
from helpers import encodinghelper
import addonsettings
from logger import Logger
class MediaItem:
"""Main class that represent items that are retrieved in XOT. They are used
to fill the lists and have MediaItemParts which have MediaStreams in this
hierarchy:
MediaItem
+- MediaItemPart
| +- MediaStream
| +- MediaStream
| +- MediaStream
+- MediaItemPart
| +- MediaStream
| +- MediaStream
| +- MediaStream
"""
#noinspection PyShadowingBuiltins
def __init__(self, title, url, type="folder", parent=None): # @ReservedAssignment
"""Creates a new MediaItem
Arguments:
title : string - the title of the item, used for appearance in lists.
url : string - url that used for further information retrieval.
Keyword Arguments:
type : [opt] string - type of MediaItem (folder, video, audio).
Defaults to 'folder'.
parent : [opt] MediaItem - the parent of the current item. None is
the default.
The <url> can contain an url to a site more info about the item can be
retrieved, for instance for a video item to retrieve the media url, or
in case of a folder where child items can be retrieved.
The tile will be de-prefixed using the prefixhelper.PrefixHelper class.
Essential is that no encoding (like UTF8) is specified in the title of
the item. This is all taken care of when creating XBMC items in the
different methods.
"""
dePrefixer = prefixhelper.PrefixHelper()
name = title.strip()
if addonsettings.AddonSettings.GetPluginMode():
self.name = name
else:
self.name = dePrefixer.GetDePrefixedName(name)
self.url = url
self.MediaItemParts = []
self.description = ""
self.thumb = "" # : Local image for the thumbnail of episode
self.thumbUrl = "" # : The url of the thumb
self.fanart = "" # : The fanart url
self.icon = "" # : low quality icon for list
self.__date = "" # : value show in interface
self.__timestamp = datetime.datetime.min # : value for sorting, this one is set to minimum
# so if non is set, it's shown at the bottom
self.type = type # : video, audio, folder, append, page, playlist
self.parent = parent
self.complete = False
self.error = False
self.downloaded = False
self.downloadable = False
self.items = []
self.httpHeaders = dict()
self.rating = None
# GUID used for identifcation of the object. Do not set from script, MD5 needed
# to prevent UTF8 issues
try:
self.guid = "%s%s" % (encodinghelper.EncodingHelper.EncodeMD5(title), encodinghelper.EncodingHelper.EncodeMD5(url or ""))
# self.guid = ("%s-%s" % (encodinghelper.EncodingHelper.EncodeMD5(title), url)).replace(" ", "")
except:
Logger.Error("Error setting GUID for title:'%s' and url:'%s'. Falling back to UUID", title, url, exc_info=True)
self.guid = self.__GetUUID()
self.guidValue = int("0x%s" % (self.guid,), 0)
self.channels = [] # only needed for Kanalenkiezer
def AppendSingleStream(self, url, bitrate=0, subtitle=None):
"""Appends a single stream to a new MediaPart of this MediaItem
Arguments:
url : string - url of the stream.
Keyword Arguments:
bitrate : [opt] integer - bitrate of the stream (default = 0)
subtitle : [opt] string - url of the subtitle of the mediapart
Returns a reference to the created MediaPart
This methods creates a new MediaPart item and adds the provided
stream to its MediaStreams collection. The newly created MediaPart
is then added to the MediaItem's MediaParts collection.
"""
newPart = MediaItemPart(self.name, url, bitrate, subtitle)
self.MediaItemParts.append(newPart)
return newPart
def CreateNewEmptyMediaPart(self):
"""Adds an empty MediaPart to the MediaItem
Returns:
The new MediaPart object (as a reference) that was appended.
This method is used to create an empty MediaPart that can be used to
add new stream to. The newly created MediaPart is appended to the
MediaItem.MediaParts list.
"""
newPart = MediaItemPart(self.name)
self.MediaItemParts.append(newPart)
return newPart
def HasMediaItemParts(self):
"""Return True if there are any MediaItemParts present with streams for
this MediaItem
"""
for part in self.MediaItemParts:
if len(part.MediaStreams) > 0:
return True
return False
def IsPlayable(self):
"""Returns True if the item can be played in a Media Player.
At this moment it returns True for:
* type = 'video'
* type = 'audio'
"""
return self.type.lower() in ('video', 'audio', 'playlist')
def IsResolvable(self):
"""Returns True if the item can be played directly stream (using setResolveUrl).
At this moment it returns True for:
* type = 'video'
* type = 'audio'
"""
return self.type.lower() in ('video', 'audio')
def HasDate(self):
"""Returns if a date was set """
return self.__timestamp > datetime.datetime.min
def SetDate(self, year, month, day, hour=None, minutes=None, seconds=None, onlyIfNewer=False, text=None):
"""Sets the datetime of the MediaItem
Arguments:
year : integer - the year of the datetime
month : integer - the month of the datetime
day : integer - the day of the datetime
Keyword Arguments:
hour : [opt] integer - the hour of the datetime
minutes : [opt] integer - the minutes of the datetime
seconds : [opt] integer - the seconds of the datetime
onlyIfNewer: [opt] integer - update only if the new date is more
recent then the currently set one
text : [opt] string - if set it will overwrite the text in the
date label the datetime is also set.
Sets the datetime of the MediaItem in the self.__date and the
corresponding text representation of that datetime.
<hour>, <minutes> and <seconds> can be optional and will be set to 0 in
that case. They must all be set or none of them. Not just one or two of
them.
If <onlyIfNewer> is set to True, the update will only occur if the set
datetime is newer then the currently set datetime.
The text representation can be overwritten by setting the <text> keyword
to a specific value. In that case the timestamp is set to the given time
values but the text representation will be overwritten.
If the values form an invalid datetime value, the datetime value will be
reset to their default values.
"""
# dateFormat = xbmc.getRegion('dateshort')
# correct a small bug in XBMC
# dateFormat = dateFormat[1:].replace("D-M-", "%D-%M")
# dateFormatLong = xbmc.getRegion('datelong')
# timeFormat = xbmc.getRegion('time')
# dateTimeFormat = "%s %s" % (dateFormat, timeFormat)
try:
dateFormat = "%Y-%m-%d" # "%x"
dateTimeFormat = dateFormat + " %H:%M"
if hour is None and minutes is None and seconds is None:
timeStamp = datetime.datetime(int(year), int(month), int(day))
date = timeStamp.strftime(dateFormat)
else:
timeStamp = datetime.datetime(int(year), int(month), int(day), int(hour), int(minutes), int(seconds))
date = timeStamp.strftime(dateTimeFormat)
if onlyIfNewer and self.__timestamp > timeStamp:
return
self.__timestamp = timeStamp
if text is None:
self.__date = date
else:
self.__date = text
except ValueError:
Logger.Error("Error setting date: Year=%s, Month=%s, Day=%s, Hour=%s, Minutes=%s, Seconds=%s", year, month, day, hour, minutes, seconds, exc_info=True)
self.__timestamp = datetime.datetime.min
self.date = ""
#noinspection PyUnusedLocal
def SetErrorState(self, errorMessage=None, error=True, complete=False): # @UnusedVariable
"""Sets the item in error
Keyword Arguments:
errorMessage : [opt] string - error message
error : [opt] bool - error is set. If set to false, error is reset
complete : [opt] bool - sets the complete bit to false
"""
self.error = error
self.complete = complete
return
def GetXBMCItem(self, pluginMode=False, name=None):
"""Creates an XBMC item with the same data is the MediaItem.
Keyword Arguments:
pluginMode : [opt] boolean - Indication if it's called from a plugin.
name : [opt] string - Overwrites the name of the XBMC item.
Returns:
A complete XBMC ListItem
This item is used for displaying purposes only and changes to it will
not be passed on to the MediaItem.
If pluginMode = True date labels will be slightly different because
for folder items the second label cannot be used in XBMC. The date
will therefore be shown in the title. If the MediaItem is of type 'page'
the prefix "Page " will be added.
Eventually the self.UpdateXBMCItem is called to set all the parameters.
For the mapping and Encoding of MediaItem properties to XBMCItem
properties the __doc__ can be used.
"""
# Logger.Debug("Creating XBMC ListItem: ListItem(%s, %s, %s, %s)",self.name, self.__date, self.icon, self.thumb)
if not name:
itemName = self.name
else:
itemName = name
# name = self.__FullDecodeText(name) This is done in the update. Saves CPU
if pluginMode and self.type == 'page':
# in plugin mode we need to add the Page prefix to the item
itemName = "Page %s" % (itemName,)
Logger.Debug("GetXBMCItem :: Adding Page Prefix")
elif pluginMode and self.__date != '' and not self.IsPlayable():
# not playable items should always show date
itemName = "%s (%s)" % (itemName, self.__date)
folderPrefix = addonsettings.AddonSettings().GetFolderPrefix()
if pluginMode and self.type == "folder" and not folderPrefix == "":
itemName = "%s %s" % (folderPrefix, itemName)
# if there was a thumbUrl and we are in pluginMode, just pass it to XBMC
if pluginMode and not self.thumbUrl == "":
self.thumb = self.thumbUrl
item = xbmcgui.ListItem(itemName, self.__date, self.icon, self.thumb)
# set a flag to indicate it is a item that can be used with setResolveUrl.
if self.IsResolvable() and pluginMode:
Logger.Trace("Setting IsPlayable to True")
item.setProperty("IsPlayable", "true")
# now just call the update XBMCItem
self.UpdateXBMCItem(item, name=itemName)
return item
def UpdateXBMCItem(self, item, name=None):
"""Updates an existing XBMC ListItem with properties and InfoLabels
Arguments:
item : ListItem - The XBMC ListItem to update.
Keyword Arguments:
name : [opt] string - Can be used to overwrite the name of the item.
Returns:
Nothing! The update of the XBMC ListItem is done by reference!
See for the InfoLabels: http://wiki.xbmc.org/index.php?title=InfoLabels
Mapping:
* ListItem.Type -> self.type
* ListItem.Label -> self.name
* ListItem.Title -> self.name
* ListItem.Date -> self.__timestamp the format "%d.%m.%Y"
* ListItem.PlotOutline -> self.description
* ListItem.Plot -> self.description
* ListItem.Label2 -> self.__date
* ListItem.ThumbnailImage -> self.thumb
Besides these mappings, the following XOT mappings are set which are
by the XOT skin only:
* XOT_Description -> self.description
* XOT_Complete -> self.complete
* XOT_Type -> self.type
* XOT_Rating -> self.rating (-1 if self.rating is None)
* XOT_Error -> self.error
Encoding:
All string values are set in UTF8 encoding and with the HTML characters
converted to UTF8 characters. This is done by the self.__FullDecodeText
method.
"""
if not name:
name = self.name
# the likelihood of getting an name with both HTML entities and Unicode is very low. So do both
# conversions, one will be unnecessary
name = self.__FullDecodeText(name)
description = self.__FullDecodeText(self.description)
if description is None:
description = ""
# the XBMC ListItem date
# date : string (%d.%m.%Y / 01.01.2009) - file date
if self.__timestamp > datetime.datetime.min:
xbmcDate = self.__timestamp.strftime("%d.%m.%Y")
xbmcYear = self.__timestamp.year
else:
xbmcDate = ""
xbmcYear = 0
# specific items
infoLabels = dict()
infoLabels["Label"] = name
if xbmcDate:
infoLabels["Date"] = xbmcDate
infoLabels["Year"] = xbmcYear
if self.type != "Audio":
infoLabels["PlotOutline"] = description
infoLabels["Outline"] = description
if self.type == "audio":
item.setInfo(type="Audio", infoLabels=infoLabels)
else:
item.setInfo(type="Video", infoLabels=infoLabels)
# all items
item.setLabel(name)
item.setLabel2(self.__date)
item.setProperty("XOT_Description", description)
item.setProperty("XOT_Complete", str(self.complete))
item.setProperty("XOT_Type", str(self.type))
item.setProperty("XOT_Error", str(self.error))
if self.fanart:
item.setProperty('fanart_image', self.fanart)
if not self.rating:
item.setProperty("XOT_Rating", str(-1))
else:
item.setProperty("XOT_Rating", "xot_rating%s.png" % (self.rating,))
item.setThumbnailImage("") # this one forces the update of the complete item, so always do this
item.setThumbnailImage(self.thumb) # this one forces the update of the complete item, so always do this
def GetXBMCPlayList(self, bitrate=None, updateItemUrls=False, proxy=None):
""" Creates a XBMC Playlist containing the MediaItemParts in this MediaItem
Keyword Arguments:
bitrate : [opt] integer - The bitrate of the streams that should be in
the playlist. Given in kbps
updateItemUrls : [opt] boolean - If specified, the Playlist items will
have a path pointing to the actual stream
proxy : [opt] ProxyInfo - The proxy to set
Returns:
a XBMC Playlist for this MediaItem
If the Bitrate keyword is omitted the the bitrate is retrieved using the
default bitrate settings:
"""
playList = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
srt = None
playListItems = []
if not updateItemUrls:
# if we are not using the resolveUrl method, we need to clear the playlist and set the index
playList.clear()
currentIndex = 0
else:
# copy into a list so we can add stuff in between (we can't do that in an
# XBMC PlayList) and then create a new playlist item
currentIndex = playList.getposition() # this is the location at which we are now.
if currentIndex < 0:
# no items where there, so we can just start at position 0
currentIndex = 0
Logger.Info("Updating the playlist for item at position %s and trying to preserve other playlist items", currentIndex)
for i in range(0, len(playList)):
Logger.Trace("Copying playList item %s out of %s", i + 1, len(playList))
playListItems.append((playList[i].getfilename(), playList[i]))
startList = reduce(lambda x, y: "%s\n%s" % (x, y[0]), playListItems, "Starting with Playlist Items (%s)" % (len(playListItems),))
Logger.Debug(startList)
playList.clear()
logText = "Creating playlist for Bitrate: %s kbps\n%s\nSelected Streams:\n" % (bitrate, self)
# for each MediaItemPart get the URL, starting at the current index
index = currentIndex
for part in self.MediaItemParts:
if len(part.MediaStreams) == 0:
Logger.Warning("Ignoring empty MediaPart: %s", part)
continue
# get the playlist item
(stream, xbmcItem) = part.GetXBMCPlayListItem(self, bitrate=bitrate, updateItemUrls=updateItemUrls)
logText = "%s\n + %s" % (logText, stream)
streamUrl = stream.Url
if proxy:
if stream.Downloaded:
logText = "%s\n + Not adding proxy as the stream is already downloaded" % (logText, )
elif proxy.Scheme == "http" and not stream.Url.startswith("http"):
logText = "%s\n + Not adding proxy due to scheme mismatch" % (logText, )
elif not proxy.UseProxyForUrl(streamUrl):
logText = "%s\n + Not adding proxy due to filter mismatch" % (logText, )
else:
streamUrl = "%s|HttpProxy=%s" % (stream.Url, proxy.GetProxyAddress())
logText = "%s\n + Adding %s" % (logText, proxy)
if index == currentIndex and index < len(playListItems):
# We need to replace the current item.
Logger.Trace("Replacing current XBMC ListItem at Playlist index %s (of %s)", index, len(playListItems))
playListItems[index] = (streamUrl, xbmcItem)
else:
# We need to add at the current index
Logger.Trace("Inserting XBMC ListItem at Playlist index %s", index)
playListItems.insert(index, (streamUrl, xbmcItem))
index += 1
# for now we just add the last subtitle, this will not work if each
# part has it's own subtitles.
srt = part.Subtitle
Logger.Info(logText)
endList = reduce(lambda x, y: "%s\n%s" % (x, y[0]), playListItems, "Ended with Playlist Items (%s)" % (len(playListItems),))
Logger.Debug(endList)
for playListItem in playListItems:
playList.add(playListItem[0], playListItem[1])
return playList, srt
def __GetUUID(self):
"""Generates a Unique Identifier based on Time and Random Integers"""
t = long(time.time() * 1000)
r = long(random.random() * 100000000000000000L)
a = random.random() * 100000000000000000L
data = str(t) + ' ' + str(r) + ' ' + str(a)
data = encodinghelper.EncodingHelper.EncodeMD5(data)
return data
def __FullDecodeText(self, stringValue):
""" Decodes a byte encoded string with HTML content into Unicode String
Arguments:
stringValue : string - The byte encoded string to decode
Returns:
An Unicode String with all HTML entities replaced by their UTF8 characters
The decoding is done by first decode the string to UTF8 and then replace
the HTML entities to their UTF8 characters.
"""
if stringValue is None:
return None
if stringValue == "":
return ""
# then get rid of the HTML entities
stringValue = htmlentityhelper.HtmlEntityHelper.ConvertHTMLEntities(stringValue)
return stringValue
def __str__(self):
""" String representation """
value = self.name
if self.IsPlayable():
if len(self.MediaItemParts) > 0:
value = "MediaItem: %s [Type=%s, Complete=%s, Error=%s, Date=%s, Downloadable=%s]" % (value, self.type, self.complete, self.error, self.__date, self.downloadable)
for mediaPart in self.MediaItemParts:
value = "%s\n%s" % (value, mediaPart)
value = "%s" % (value,)
else:
value = "%s [Type=%s, Complete=%s, unknown urls, Error=%s, Date=%s, Downloadable=%s]" % (value, self.type, self.complete, self.error, self.__date, self.downloadable)
else:
value = "%s [Type=%s, Url=%s, Date=%s]" % (value, self.type, self.url, self.__date)
return value
def __eq__(self, item):
""" checks 2 items for Equality
Arguments:
item : MediaItem - The item to check for equality.
Returns:
the output of self.Equals(item).
"""
return self.Equals(item)
def __ne__(self, item):
""" returns NOT Equal
Arguments:
item : MediaItem - The item to check for equality.
Returns:
the output of not self.Equals(item).
"""
return not self.Equals(item)
def __cmp__(self, other):
""" Compares 2 items based on their appearance order
Arguments:
other : MediaItem - The item to compare to
Returns:
* -1 : If the item is lower than the current one
* 0 : If the item is order is equal
* 1 : If the item is higher than the current one
The comparison is done base on:
* the type of the item. Non-playable items appear first.
* the defined sorting algorithm. This is a Add-on setting retrieved
using the AddonSettings() method. Options are: Name or Timestamp.
"""
if other is None:
return -1
if self.type == other.type:
# Logger.Debug("Comparing :: same types")
sortMethod = addonsettings.AddonSettings().GetSortAlgorithm()
# date sorting
if sortMethod == "date":
# Logger.Debug("Comparing :: Settings: Sorting by date")
# at this point both have timestamps or dates, so we can compare
if self.__timestamp == other.__timestamp:
# same timestamps, compare names
return cmp(self.name, other.name)
else:
# compare timestamps
return cmp(other.__timestamp, self.__timestamp)
# name sorting
elif sortMethod == "name":
# Logger.Debug("Comparing :: Settings: Sorting by name")
return cmp(self.name, other.name)
else:
return 0
else:
# one is folder other one is playable. Folders are always sorted first
# Logger.Debug("Comparing :: different types, none playable first")
if self.IsPlayable():
return -1
else:
return 1
def __hash__(self):
""" returns the hash value """
return hash(self.guidValue)
def Equals(self, item):
""" Compares two items
Arguments:
item : MediaItem - The item to compare to
Returns:
True if the item's GUID's match.
"""
if not item:
return False
# if self.name == item.name and self.guid != item.guid:
# Logger.Debug("Duplicate names, but different guid: %s (%s), %s (%s)", self.name, self.url, item.name, item.url)
return self.guidValue == item.guidValue
class MediaItemPart:
"""Class that represents a MediaItemPart"""
def __init__(self, name, url="", bitrate=0, subtitle=None, *args):
""" Creates a MediaItemPart with <name> with at least one MediaStream
instantiated with the values <url> and <bitrate>.
The MediaPart could also have a <subtitle> or Properties in the <*args>
Arguments:
name : string - the name of the MediaItemPart
url : string - the URL of the stream of the MediaItemPart
args : list[string] - a list of arguments that will be set as properties
when getting an XBMC Playlist Item
Keyword Arguments:
bitrate : [opt] integer - The bitrate of the stream of the MediaItemPart
subtitle : [opt] string - The url of the subtitle of this MediaItemPart
If a subtitles was provided, the subtitle will be downloaded and stored
in the XOT cache. When played, the subtitle is shown. Due to the XBMC
limitation only one subtitle can be set on a playlist, this will be
the subtitle of the first MediaPartItem
"""
Logger.Trace("Creating MediaItemPart '%s' for '%s'", name, url)
self.Name = name
self.MediaStreams = []
self.Subtitle = ""
self.CanStream = True
self.UserAgent = None # : Used for downloading a stream is needed
# set a subtitle
if not subtitle is None:
self.Subtitle = subtitle
if not url == "":
# set the stream that was passed
self.AppendMediaStream(url, bitrate)
# set properties
self.Properties = []
for prop in args:
self.Properties.append(prop)
return
def AppendMediaStream(self, url, bitrate):
"""Appends a mediastream item to the current MediaPart
Arguments:
url : string - the url of the MediaStream
bitrate : integer - the bitrate of the MediaStream
Returns:
the newly added MediaStream by reference.
The bitrate could be set to None.
"""
stream = MediaStream(url, bitrate)
self.MediaStreams.append(stream)
return stream
def AddProperty(self, name, value):
"""Adds a property to the MediaPart
Arguments:
name : string - the name of the property
value : stirng - the value of the property
Appends a new property to the self.Properties dictionary. On playback
these properties will be set to the XBMC PlaylistItem as properties.
"""
Logger.Debug("Adding property: %s = %s", name, value)
self.Properties.append((name, value))
def GetXBMCPlayListItem(self, parent, bitrate=None, pluginMode=False, name=None, updateItemUrls=False):
"""Returns a XBMC List Item than can be played or added to an XBMC
PlayList.
Arguments:
parent : MediaItem - the parent MediaItem
Keyword Arguments:
quality : [opt] integer - The quality of the requested XBMC
PlayListItem streams.
pluginMode : [opt] boolean - Indicates if it was called from a
plugin instead of script.
name : [opt] string - If set, it overrides the original
name of the MediaItem (mainly used
in the plugin.
updateItemUrls : [opt] boolean - If set, the xbmc items will have a path
that corresponds with the actual stream.
Returns:
A tuple with (stream url, XBMC PlayListItem). The XBMC PlayListItem
can be used to add to a XBMC Playlist. The stream url can be used
to set as the stream for the PlayListItem using xbmc.PlayList.add()
If quality is not specified the quality is retrieved from the add-on
settings.
"""
if not name:
Logger.Debug("Creating XBMC ListItem '%s' [PluginMode=%s]", self.Name, pluginMode)
else:
Logger.Debug("Creating XBMC ListItem '%s' [PluginMode=%s]", name, pluginMode)
item = parent.GetXBMCItem(pluginMode=pluginMode, name=name)
if not bitrate:
bitrate = addonsettings.AddonSettings().GetMaxStreamBitrate()
for prop in self.Properties:
Logger.Trace("Adding property: %s", prop)
item.setProperty(prop[0], prop[1])
# now find the correct quality stream
stream = self.GetMediaStreamForBitrate(bitrate)
if self.UserAgent and "|User-Agent" not in stream.Url:
url = "%s|User-Agent=%s" % (stream.Url, htmlentityhelper.HtmlEntityHelper.UrlEncode(self.UserAgent))
stream.Url = url
if updateItemUrls:
Logger.Info("Updating xbmc playlist-item path: %s", stream.Url)
item.setProperty("path", stream.Url)
return stream, item
def GetMediaStreamForBitrate(self, bitrate):
"""Returns the MediaStream for the requested bitrate.
Arguments:
bitrate : integer - The bitrate of the stream in kbps
Returns:
The url of the stream with the requested bitrate.
If bitrate is not specified the highest bitrate stream will be used.
"""
# order the items by bitrate
self.MediaStreams.sort()
bestStream = None
bestDistance = None
for stream in self.MediaStreams:
if stream.Bitrate is None:
# no bitrate set, see if others are available
continue
# this is the bitrate-as-max-limit-method
if stream.Bitrate > bitrate:
# if the bitrate is higher, continue for more
continue
# if commented ^^ , we get the closest-match-method
# determine the distance till the bitrate
distance = abs(bitrate - stream.Bitrate)
if bestDistance is None or bestDistance > distance:
# this stream is better, so store it.
bestDistance = distance
bestStream = stream
if bestStream is None:
# no match, take the lowest bitrate
return self.MediaStreams[0]
return bestStream
def __cmp__(self, other):
""" Compares 2 items based on their appearance order
Arguments:
other : MediaItemPart - The part to compare to
Returns:
* -1 : If the item is lower than the current one
* 0 : If the item is order is equal
* 1 : If the item is higher than the current one
The comparison is done base on the Name only.
"""
if other is None:
return -1
# compare names
return cmp(self.Name, other.Name)
def __eq__(self, other):
""" checks 2 items for Equality
Arguments:
item : MediaItemPart - The part to check for equality.
Returns:
the True if the items are equal. Equality takes into consideration:
* Name
* Subtitle
* Length of the MediaStreams
* Compares all the MediaStreams in the slef.MediaStreams
"""
if other is None:
return False
if not other.Name == self.Name:
return False
if not other.Subtitle == self.Subtitle:
return False
# now check the strea
if not len(self.MediaStreams) == len(other.MediaStreams):
return False
for i in range(0, len(self.MediaStreams)):
if not self.MediaStreams[i] == other.MediaStreams[i]:
return False
# if we reach this point they are equal.
return True
def __str__(self):
""" String representation """
text = "MediaPart: %s [CanStream=%s, UserAgent=%s]" % (self.Name, self.CanStream, self.UserAgent)
if self.Subtitle != "":
text = "%s\n + Subtitle: %s" % (text, self.Subtitle)
for prop in self.Properties:
text = "%s\n + Property: %s=%s" % (text, prop[0], prop[1])
for stream in self.MediaStreams:
text = "%s\n + %s" % (text, stream)
return text
class MediaStream:
"""Class that represents a Mediastream with <url> and a specific <bitrate>"""
def __init__(self, url, bitrate=0):
"""Initialises a new MediaStream
Arguments:
url : string - the URL of the stream
Keyworkd Arguments:
bitrate : [opt] integer - the bitrate of the stream (defaults to 0)
"""
Logger.Trace("Creating MediaStream '%s' with bitrate '%s'", url, bitrate)
self.Url = url
self.Bitrate = int(bitrate)
self.Downloaded = False
return
def __cmp__(self, other):
"""Compares two MediaStream based on the bitrate
Arguments:
other : MediaStream - The stream to compare to
Returns:
* -1 : If the item is lower than the current one
* 0 : If the item is order is equal
* 1 : If the item is higher than the current one
The comparison is done base on the bitrate only.
"""
if other is None:
return -1
return cmp(self.Bitrate, other.Bitrate)
def __eq__(self, other):
"""Checks 2 items for Equality
Arguments:
other : MediaStream - The stream to check for equality.
Returns:
the True if the items are equal. Equality takes into consideration:
* The url of the MediaStream
"""
# also check for URL
if other is None:
return False
return self.Url == other.Url
def __str__(self):
"""String representation"""
text = "MediaStream: %s [bitrate=%s, downloaded=%s]" % (self.Url, self.Bitrate, self.Downloaded)
return text
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/net.rieter.xot.smallplayer/resources/libs/mediaitem.py
|
Python
|
gpl-2.0
| 36,369
|
[
"VisIt"
] |
e542bce3d0290c01f71f27d79ac017935a01bfdddd6306f6502b4b0ec64fe595
|
#!/usr/bin/env python
#author: Peter Thorpe September 2016. The James Hutton Insitute,Dundee,UK.
#Title:
#script perform stats on the coverage files already generated"
#imports
import os
import sys
import numpy
from sys import stdin,argv
import sys
import datetime
from optparse import OptionParser
###########################################################################
# functions
###########################################################################
try:
# New in Python 3.4
from statistics import mean
except ImportError:
def mean(list_of_values):
"""Calculate the mean average of a list of numbers."""
# Quick and dirty, assumes already a list not an interator
# so don't have to worry about getting the divisor.
# Explicit float(...) to allow for Python 2 division.
return sum(list_of_values) / float(len(list_of_values))
assert mean([1,2,3,4,5]) == 3
def parse_result_file(blast):
"""read in the blast tab file. Reads whole file into memeroy.
returns a list, one list item per blast hit.
"""
with open(blast) as file:
data= file.read().split("\n")
data1 = [line.rstrip("\n") for line in (data)
if line.strip() != ""]
return data1
def convert_to_int(results):
"""function to convert list of string to list
of intergers"""
results = [int(i) for i in results if i != ""]
return results
def stat_tests(in_list):
"""function to return stats on a given list.
returns min_cov, max_cov, mean_cov, standard_dev
"""
min_cov = min(in_list)
max_cov = max(in_list)
mean_cov = mean(in_list)
standard_dev = numpy.std(in_list)
median=numpy.median(in_list)
assert min_cov <= mean_cov <= max_cov
return min_cov, max_cov, mean_cov, median, standard_dev
def write_out_stats(ITS_cov, GFF, all_genes_cov, out_file):
"""function to write out summary stats. """
# call function to get list of coverage per file.
number_of_ITS_blast_hits = len(parse_result_file(GFF))
number_of_all_genes_hits = len(parse_result_file(all_genes_cov))
try:
ITS_cov_str = parse_result_file(ITS_cov)
#print ITS_cov_str
ITS_cov = convert_to_int(ITS_cov_str)
except:
raise ValueError("something wrong with ITS cov. file")
try:
all_genes_cov_str = parse_result_file(all_genes_cov)
all_genes_cov = convert_to_int(all_genes_cov_str)
except:
raise ValueError("something wrong with all genes cov. file")
# out file to write to
summary_stats_out = open(out_file, "w")
title = "#gene_class\tmin_cov\tmax_cov\tmean_cov\tstandard_dev\tmedian\n"
summary_stats_out.write(title)
# call stats function
ITSmin_cov, ITSmax_cov, ITSmean_cov, ITSmedian, ITSstandard_dev = stat_tests(ITS_cov)
ITS_data_formatted = "ITS:\t%s\t%s\t%s\t%s\t%s\n" %(ITSmin_cov,\
ITSmax_cov, ITSmean_cov, ITSstandard_dev, ITSmedian)
#write out ITS results
summary_stats_out.write(ITS_data_formatted)
GENEmin_cov, GENEmax_cov, GENEmean_cov, GENEmedian, GENEstandard_dev = stat_tests(all_genes_cov)
GENE_data_formatted = "allGenes:\t%s\t%s\t%.1f\t%.1f\t%s\n" %(GENEmin_cov,\
GENEmax_cov, float(GENEmean_cov), float(GENEstandard_dev), GENEmedian)
summary_stats_out.write(GENE_data_formatted)
blast_hits_info = "\nnumber of ITS_blast_hit = %s \n" %(number_of_ITS_blast_hits)
number_of_all_genes_hits_out = "\nnumber of 'all genes' = %s \n" %(number_of_all_genes_hits)
ratio_info = "\nITS to gene ratio = %.1f \n" %(float(ITSmean_cov) / GENEmean_cov)
summary_stats_out.write(blast_hits_info)
summary_stats_out.write(number_of_all_genes_hits_out)
#results based on mean coverage values
summary_stats_out.write("\n#BASED on MEAN coverage values")
summary_stats_out.write(ratio_info)
final_count_info = "There may be %.1f ITS regions\n" %((int(number_of_ITS_blast_hits)\
*(float(ITSmean_cov) / GENEmean_cov)))
#print final_count_info
summary_stats_out.write(final_count_info)
#results based on median coverage values
summary_stats_out.write("\n#BASED on MEDIAN coverage values")
ratio_info = "\nITS to gene ratio = %.1f \n" %(float(ITSmedian) / GENEmedian)
summary_stats_out.write(ratio_info)
final_count_info = "There may be %.1f ITS regions\n" %((int(number_of_ITS_blast_hits)\
*(float(ITSmedian) / GENEmedian)))
summary_stats_out.write(final_count_info)
#close the write file
summary_stats_out.close()
###########################################################################
if "-v" in sys.argv or "--version" in sys.argv:
print ("v0.0.1")
sys.exit(0)
usage = """Use as follows:
Title:
script to generate aummary stats for ITS coverage and all genes coverage
$ summary_stats.py --ITS ITS.cov --all all_gene.cov -o summary.out
ITS GFF file needed to count the number of ITS blast hits
"""
parser = OptionParser(usage=usage)
parser.add_option("-i", "--ITS", dest="ITS_cov", default=None,
help="coverage file for ITS regions",
metavar="FILE")
parser.add_option("-g", "--GFF", dest="GFF", default=None,
help="ITS GFF file",
metavar="FILE")
parser.add_option("-a", "--all_genes_cov", dest="all_genes_cov",
default=None,
help="the coverage file for all genes",
metavar="FILE")
parser.add_option("-o", "--out_file", dest="out_file",
default="stats.out",
help="outfile for the ITS and allgene stats")
(options, args) = parser.parse_args()
ITS_cov = options.ITS_cov
GFF = options.GFF
all_genes_cov = options.all_genes_cov
out_file = options.out_file
#run the program
file_list = [ITS_cov, GFF, all_genes_cov]
for i in file_list:
if not os.path.isfile(i):
print("sorry, couldn't open the file: ", "\n")
print ("current working directory is :", os.getcwd() + "\n")
print ("files are :", [f for f in os.listdir('.')])
sys.exit("\n\nInput ITS file not found: %s" % i)
# call the top function
write_out_stats(ITS_cov, GFF, all_genes_cov, out_file)
|
widdowquinn/THAPBI
|
ITS_region_genomic_coverage/summary_stats.py
|
Python
|
mit
| 6,353
|
[
"BLAST"
] |
cd63f1a6bcba223dce8163f00bffba55f16c4ed8e39bae710012ed4ef46d74f2
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
from glob import glob
class Bowtie2(Package):
"""Bowtie 2 is an ultrafast and memory-efficient tool for aligning
sequencing reads to long reference sequences"""
homepage = "bowtie-bio.sourceforge.net/bowtie2/index.shtml"
url = "http://downloads.sourceforge.net/project/bowtie-bio/bowtie2/2.3.1/bowtie2-2.3.1-source.zip"
version('2.3.1', 'b4efa22612e98e0c23de3d2c9f2f2478')
version('2.2.5', '51fa97a862d248d7ee660efc1147c75f')
depends_on('tbb', when='@2.3.1')
depends_on('readline')
depends_on('zlib')
patch('bowtie2-2.2.5.patch', when='@2.2.5', level=0)
patch('bowtie2-2.3.1.patch', when='@2.3.1', level=0)
# seems to have trouble with 6's -std=gnu++14
conflicts('%gcc@6:')
def install(self, spec, prefix):
make()
mkdirp(prefix.bin)
for bow in glob("bowtie2*"):
install(bow, prefix.bin)
# install('bowtie2',prefix.bin)
# install('bowtie2-align-l',prefix.bin)
# install('bowtie2-align-s',prefix.bin)
# install('bowtie2-build',prefix.bin)
# install('bowtie2-build-l',prefix.bin)
# install('bowtie2-build-s',prefix.bin)
# install('bowtie2-inspect',prefix.bin)
# install('bowtie2-inspect-l',prefix.bin)
# install('bowtie2-inspect-s',prefix.bin)
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/bowtie2/package.py
|
Python
|
lgpl-2.1
| 2,575
|
[
"Bowtie"
] |
2ab8d26c4088d792d08505b6ca57de8d4790f8816d95d74b574e363e602b995a
|
#! /usr/bin/env python2
import sys
import vtk
import vtkDICOMPython
# put everything into the vtk namespace
for a in dir(vtkDICOMPython):
if a[0] != '_':
setattr(vtk, a, getattr(vtkDICOMPython, a))
m = vtk.vtkDICOMMetaData()
if vtk.vtkVersion.GetVTKMajorVersion() < 6:
sys.stderr.write("This test requires VTK 6 or higher.\n");
sys.exit(0)
m.SetAttributeValue(vtk.vtkDICOMTag(0x0008, 0x0005), 'ISO_IR 100')
v = m.GetAttributeValue(vtk.vtkDICOMTag(0x0008, 0x0005))
if v.AsString() != 'ISO_IR 100':
sys.exit(1)
|
hendradarwin/vtk-dicom
|
Testing/TestDICOMPython.py
|
Python
|
bsd-3-clause
| 540
|
[
"VTK"
] |
ca1ddbc722b746c21bc14760b14974d1b2b74f47504a6931db3d41c28e7887bf
|
"""
Copyright 2015 Google, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Extract documentation about elements in YANG data modules.
"""
import optparse
import sys
import os.path
import re
#from collections import OrderedDict
#from lxml import etree
import xml
from util.markdown_emitter import MarkdownEmitter
from util.html_emitter import HTMLEmitter
from util import yangpath
from util.yangdoc_defs import YangDocDefs
from pyang import plugin
from pyang import statements
from pyang import error
def pyang_plugin_init():
plugin.register_plugin(DocsPlugin())
class DocsPlugin(plugin.PyangPlugin):
def add_output_format(self, fmts):
self.multiple_modules = True
fmts['docs'] = self
def add_opts(self, optparser):
optlist = [
optparse.make_option("--meta-only",
dest="meta_only",
action="store_true",
help="""Only produce documentation based on the
module metadata"""),
optparse.make_option("--doc-format",
dest="doc_format",
action="store",
type="string",
default="markdown",
help="""Doc output format: markdown, html"""),
optparse.make_option("--strip-ns",
dest="strip_namespace",
action="store_true",
help="""Strip namespace prefixes from
displayed paths"""),
optparse.make_option("--no-structure",
dest="no_structure",
action="store_true",
help="""Do not generate docs for structure-only nodes (e.g., containers)"""),
optparse.make_option("--doc-title",
dest="doc_title",
action="store",
type="string",
help="""Set the title of the output documentation page"""),
]
g = optparser.add_option_group("docs output specific options")
g.add_options(optlist)
def emit(self, ctx, modules, fd):
modulenames = [m.arg for m in modules]
if not ctx.opts.ignore_errors:
for (epos, etag, eargs) in ctx.errors:
if (epos.top.arg in modulenames and
error.is_error(error.err_level(etag))):
raise error.EmitError("%s contains errors" % epos.top.arg)
emit_docs(ctx, modules, fd)
class ModuleDoc:
"""This class serves as a container for a module's documentation.
It includes the typedef and identity definitions and a reference to
the top-level StatementDoc object (i.e., with the 'module' statement).
"""
def __init__(self, name):
self.module_name = name
# module is a reference to a StatementDoc object corresponding
# to the top-level module
self.module = None
# identities contains a dict of
# <identity name> : <StatementDoc object>
self.identities = {}
# base_identites stores a list of the base identity definitions
# TODO: this does not support multi-level identity derivations
self.base_identities = []
# typedefs is a dict of the user-defined type definitions in
# the module, each stored as a name:StatementDoc entry
self.typedefs = {}
def __str__ (self):
# this is not particularly useful info in its current form --
# primarily used for debugging
s = "%s:\n" % self.module_name
s += " top-level elements: %d\n" % len(self.module.children)
s += " base identities: %d\n" % len(self.identities)
s += " type definitions: %d\n" % len(self.typedefs)
return s
class TypeStatementDoc:
"""This class holds information about the types of an
YANG element. Compound types like unions may contain other
types -- this class contains the hierarchy of types attached
to a single StatementDoc object."""
def __init__(self, typename=None):
self.typename = typename
self.attrs = {}
self.attrs['restrictions'] = {}
self.childtypes = []
def __str__(self):
s = "type %s:\n" % self.typename
for attr in self.attrs:
s += " %s : %s\n" % (attr, self.attrs[attr])
if self.childtypes:
s += "child types: "
for child in self.childtypes:
s += "%s: " % child.typename
s += "\n"
for child in self.childtypes:
s += str(child)
s += "\n"
return s
class StatementDoc:
"""This class holds information about an element, i.e.
a specific statement (e.g., leaf, container, list, etc.) The
StatementDoc object is associated with its module"""
def __init__(self, name, keyword):
self.name = name
self.keyword = keyword
# dict with attributes of the statements, e.g., description,
# etc.
self.attrs ={}
# reference to the top-level type ojbect that stores types
self.typedoc = None
# list of child statements
self.children = []
# reference to the parent StatementDoc object of the current statement
self.parent = None
# reference to the ModuleDoc object that this statement belongs to
self.module_doc = None
def __str__ (self):
# recursively prints the statement and its children -- primarily for
# debugging
s = "%s:\n" % self.name
for attr in self.attrs:
s += " %s : %s\n" % (attr, self.attrs[attr])
s += "parent: "
if self.parent is not None:
s += "%s:%s\n" % (self.parent.name, self.parent.type)
else:
s += "%s\n" % self.parent
if self.children:
s += "subs: "
for child in self.children:
s += "%s:%s " % (child.name, child.type)
s += "\n"
for child in self.children:
s += str(child)
s += "\n"
return s
def emit_docs(ctx, modules, fd):
"""Top-level function to collect and print documentation"""
ctx.mod_docs = []
ctx.skip_keywords = []
for module in modules:
mod = collect_docs(module, ctx)
ctx.mod_docs.append(mod)
if ctx.opts.no_structure:
ctx.skip_keywords = ['container', 'list']
if ctx.opts.doc_format == "html":
emitter = HTMLEmitter()
else:
emitter = MarkdownEmitter()
# write top level module and types
for mod in ctx.mod_docs:
emitter.genModuleDoc(mod, ctx)
# visit each child element recursively and write its docs
for child in mod.module.children:
emit_child (child, emitter, ctx, fd, 1)
# emit docs for all of the current modules
docs = emitter.emitDocs(ctx)
fd.write(docs)
def emit_child(node, emitter, ctx, fd, level=1):
emitter.genStatementDoc(node, ctx, level)
if len(node.children) > 0:
level += 1
for child in node.children:
emit_child(child, emitter, ctx, fd, level)
# gen_docs_html(mod, ctx, fd)
def collect_docs(module, ctx):
"""Extract documentation for the supplied module -- module parameter is a
pyang Statement object"""
# create the top level container for this module
modtop = ModuleDoc(module.i_modulename)
# create the root StatementDoc object for the module
mod = StatementDoc(module.i_modulename, module.keyword)
modtop.module = mod
# get the description text
description = module.search_one('description')
if description:
mod.attrs['desc'] = description.arg
else:
mod.attrs['desc'] = ""
# get the prefix used by the module
mod.attrs['prefix'] = module.i_prefix
# get the list of imported modules
imports = module.search('import')
mod.attrs['imports'] = []
for imp in imports:
mod.attrs['imports'].append(imp.arg)
# get the module version number if it exists
# since this uses an extension in OpenConfig models,
# must look for a keyword that is a tuple
version = module.search_one(('openconfig-extensions','openconfig-version'))
if version is not None:
mod.attrs['version'] = version.arg
# collect identities
for (name, identity) in module.i_identities.items():
collect_identity_doc(identity, modtop)
# collect typedefs
for (name, typedef) in module.i_typedefs.items():
collect_typedef_doc(typedef, modtop)
# collect elements
for child in module.i_children:
collect_child_doc(child, mod, modtop)
return modtop
def collect_identity_doc(identity, mod):
"""Collect documentation fields for YANG identities"""
id = StatementDoc (identity.arg, identity.keyword)
desc = identity.search_one('description')
if desc is not None:
id.attrs['desc'] = desc.arg
base = identity.search_one('base')
if base is not None:
# this is derived identity
id.attrs['base'] = base.arg
else:
# this is a base identity
id.attrs['base'] = None
mod.base_identities.append(id.name)
reference = identity.search_one('reference')
if reference is not None:
id.attrs['reference'] = reference.arg
# add the identity to the module object
mod.identities[id.name] = id
def collect_typedef_doc(typedef, mod):
"""Collect documentation fields for YANG typedefs"""
td = StatementDoc(typedef.arg, typedef.keyword)
desc = typedef.search_one('description')
if desc is not None:
td.attrs['desc'] = desc.arg
for p in YangDocDefs.type_leaf_properties:
prop = typedef.search_one(p)
if prop is not None:
td.attrs[p] = prop.arg
typest = typedef.search_one('type')
if typest is not None:
typedoc = TypeStatementDoc()
td.typedoc = typedoc
collect_type_docs(typest, typedoc)
# add the typedef to the module object
mod.typedefs[td.name] = td
def collect_child_doc(node, parent, top):
"""Collect documentation fields for a statement. node
is a PYANG statement object, while parent is a ModuleDoc
or StatementDoc object. top is the top level ModuleDoc
object"""
statement = StatementDoc(node.arg, node.keyword)
statement.parent = parent
statement.module_doc = top
parent.children.append(statement)
# fill in some attributes if they exist
# type information
type = node.search_one('type')
if type is not None:
# create the Type object
statement.typedoc = TypeStatementDoc()
collect_type_docs(type, statement.typedoc)
# node description
desc = node.search_one('description')
if desc is not None:
statement.attrs['desc'] = desc.arg
# reference statement
reference = node.search_one('reference')
if reference is not None:
statement.attrs['reference'] = reference.arg
# default statement
default = node.search_one('default')
if default is not None:
statement.attrs['default'] = default.arg
# units statement
units = node.search_one('units')
if units is not None:
statement.attrs['units'] = units.arg
# schema path for the current node
path = statements.mk_path_str(node, True)
statement.attrs['path'] = path
# id based on schema path
node_id = node_to_id(statement)
statement.attrs['id'] = node_id
# rw or ro info
if hasattr(node, 'i_config'):
statement.attrs['config'] = node.i_config
# for list nodes, record the keys
if statement.keyword == 'list':
statement.attrs['is_list'] = True
keys = []
for key in node.i_key:
keypath = statements.mk_path_str(key, True)
keys.append((key.arg, path_to_id(keypath)))
statement.attrs['keys'] = keys
else:
statement.attrs['is_list'] = False
# note nodes that are list keys
if hasattr(node, 'i_is_key'):
statement.attrs['is_key'] = node.i_is_key
else:
statement.attrs['is_key'] = False
# collect data from children, i.e., depth-first
if hasattr(node, 'i_children'):
for child in node.i_children:
collect_child_doc(child, statement,top)
def collect_type_docs (typest, typedoc):
"""Given a pyang type statement object, populates information
about the type in the TypeStatementDoc object. Some types may
require recursive resolution for compound types, e.g.,
unions, enumeration"""
typedoc.typename = typest.arg
# based on the type, collect further properties
if typest.arg == 'identityref':
# base must be set for an identityref type
base = typest.search_one('base')
typedoc.attrs['base'] = base.arg
elif typest.arg == 'enumeration':
# collect the enums into a dict of enumvalue:description
typedoc.attrs['enums'] = {}
enums = typest.search('enum')
for enum in enums:
enumdesc = enum.search_one('description')
# generally expect a description substatement, but it might be None
if enumdesc is not None:
typedoc.attrs['enums'][enum.arg] = enumdesc.arg
elif typest.arg == 'leafref':
ref_path = typest.search_one('path')
typedoc.attrs['leafref_path'] = yangpath.strip_namespace(ref_path.arg)
elif typest.arg == 'string':
pattern = typest.search_one('pattern')
if pattern:
typedoc.attrs['restrictions']['pattern'] = pattern.arg
elif typest.arg in YangDocDefs.integer_types:
rng = typest.search_one('range')
if rng:
typedoc.attrs['restrictions']['range'] = rng.arg
elif typest.arg == 'union':
# collect member types of the union
types = typest.search('type')
for type in types:
# create a new typedoc
utype = TypeStatementDoc(type.arg)
typedoc.childtypes.append(utype)
collect_type_docs(type, utype)
# TODO(aashaikh): should collect substatements as they are usually
# restrictions on the value, which are useful to document.
def node_to_id(node):
"""Given a node, return a string suitable as an HTML id attribute based on the
node's path"""
return path_to_id(node.attrs['path'])
def path_to_id(nodepath):
"""Given a path, return a string suitable as an HTML id attribute"""
path = yangpath.strip_namespace(nodepath)
# remove leading slash
path = path.lstrip('/')
path = re.sub(r'\/', r'-', path)
return path.lower()
|
openconfig/oc-pyang
|
openconfig_pyang/plugins/yangdoc.py
|
Python
|
apache-2.0
| 14,380
|
[
"VisIt"
] |
410253d02a74ebdf070341b385422602e2d0fbe3dbdeeeda9687cfdba13b6f8d
|
'''
Random field generation using rft1d.random.Generator1D
Notes:
-- Using Generator1D is faster than rft1d.randn1d for iterative
generation.
-- When FWHM gets large (2FWHM>nNodes), the data should be
padded using the *pad* keyword.
'''
import numpy as np
from matplotlib import pyplot
import rft1d
#(0) Set parameters:
np.random.seed(12345)
nResponses = 5
nNodes = 101
FWHM = 20.0
#(1) Generate Gaussian 1D fields:
generator = rft1d.random.Generator1D(nResponses, nNodes, FWHM, pad=False)
y = generator.generate_sample()
y = generator.generate_sample()
y = generator.generate_sample()
y = generator.generate_sample()
#(2) Plot fields:
pyplot.close('all')
pyplot.plot(y.T)
pyplot.plot([0,100], [0,0], 'k:')
pyplot.xlabel('Field position', size=16)
pyplot.ylabel('z', size=20)
pyplot.title('Random (Gaussian) fields', size=20)
pyplot.show()
|
0todd0000/rft1d
|
rft1d/examples/random_fields_2.py
|
Python
|
gpl-3.0
| 898
|
[
"Gaussian"
] |
53ea50141a275a8d5a72fd7a364d4b3ebad6605ad9ad35402aba061cc6d1cc3b
|
#
# Copyright 2008, 2009, 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import wx
from grid_plotter_base import grid_plotter_base
from OpenGL import GL
import common
import numpy
import gltext
import math
import struct
LEGEND_LEFT_PAD = 7
LEGEND_NUM_BLOCKS = 256
LEGEND_NUM_LABELS = 9
LEGEND_WIDTH = 8
LEGEND_FONT_SIZE = 8
LEGEND_BORDER_COLOR_SPEC = (0, 0, 0) #black
MIN_PADDING = 0, 60, 0, 0 #top, right, bottom, left
ceil_log2 = lambda x: 2**int(math.ceil(math.log(x)/math.log(2)))
pack_color = lambda x: struct.unpack('I', struct.pack('BBBB', *x))[0]
unpack_color = lambda x: struct.unpack('BBBB', struct.pack('I', int(x)))
def _get_rbga(red_pts, green_pts, blue_pts, alpha_pts=[(0, 0), (1, 0)]):
"""
Get an array of 256 rgba values where each index maps to a color.
The scaling for red, green, blue, alpha are specified in piece-wise functions.
The piece-wise functions consist of a set of x, y coordinates.
The x and y values of the coordinates range from 0 to 1.
The coordinates must be specified so that x increases with the index value.
Resulting values are calculated along the line formed between 2 coordinates.
@param *_pts an array of x,y coordinates for each color element
@return array of rbga values (4 bytes) each
"""
def _fcn(x, pw):
for (x1, y1), (x2, y2) in zip(pw, pw[1:]):
#linear interpolation
if x <= x2: return float(y1 - y2)/(x1 - x2)*(x - x1) + y1
raise Exception
return numpy.array([pack_color(map(
lambda pw: int(255*_fcn(i/255.0, pw)),
(red_pts, green_pts, blue_pts, alpha_pts),
)) for i in range(0, 256)], numpy.uint32)
COLORS = {
'rgb1': _get_rbga( #http://www.ks.uiuc.edu/Research/vmd/vmd-1.7.1/ug/img47.gif
red_pts = [(0, 0), (.5, 0), (1, 1)],
green_pts = [(0, 0), (.5, 1), (1, 0)],
blue_pts = [(0, 1), (.5, 0), (1, 0)],
),
'rgb2': _get_rbga( #http://xtide.ldeo.columbia.edu/~krahmann/coledit/screen.jpg
red_pts = [(0, 0), (3.0/8, 0), (5.0/8, 1), (7.0/8, 1), (1, .5)],
green_pts = [(0, 0), (1.0/8, 0), (3.0/8, 1), (5.0/8, 1), (7.0/8, 0), (1, 0)],
blue_pts = [(0, .5), (1.0/8, 1), (3.0/8, 1), (5.0/8, 0), (1, 0)],
),
'rgb3': _get_rbga(
red_pts = [(0, 0), (1.0/3.0, 0), (2.0/3.0, 0), (1, 1)],
green_pts = [(0, 0), (1.0/3.0, 0), (2.0/3.0, 1), (1, 0)],
blue_pts = [(0, 0), (1.0/3.0, 1), (2.0/3.0, 0), (1, 0)],
),
'gray': _get_rbga(
red_pts = [(0, 0), (1, 1)],
green_pts = [(0, 0), (1, 1)],
blue_pts = [(0, 0), (1, 1)],
),
}
##################################################
# Waterfall Plotter
##################################################
class waterfall_plotter(grid_plotter_base):
def __init__(self, parent):
"""
Create a new channel plotter.
"""
#init
grid_plotter_base.__init__(self, parent, MIN_PADDING)
#setup legend cache
self._legend_cache = self.new_gl_cache(self._draw_legend)
#setup waterfall cache
self._waterfall_cache = self.new_gl_cache(self._draw_waterfall, 50)
#setup waterfall plotter
self.register_init(self._init_waterfall)
self._resize_texture(False)
self._minimum = 0
self._maximum = 0
self._fft_size = 1
self._buffer = list()
self._pointer = 0
self._counter = 0
self.set_num_lines(0)
self.set_color_mode(COLORS.keys()[0])
def _init_waterfall(self):
"""
Run gl initialization tasks.
"""
self._waterfall_texture = GL.glGenTextures(1)
def _draw_waterfall(self):
"""
Draw the waterfall from the texture.
The texture is circularly filled and will wrap around.
Use matrix modeling to shift and scale the texture onto the coordinate plane.
"""
#resize texture
self._resize_texture()
#setup texture
GL.glBindTexture(GL.GL_TEXTURE_2D, self._waterfall_texture)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_REPEAT)
GL.glTexEnvi(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE, GL.GL_REPLACE)
#write the buffer to the texture
while self._buffer:
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, self._pointer, self._fft_size, 1, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, self._buffer.pop(0))
self._pointer = (self._pointer + 1)%self._num_lines
#begin drawing
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glPushMatrix()
#matrix scaling
GL.glTranslatef(self.padding_left, self.padding_top, 0)
GL.glScalef(
float(self.width-self.padding_left-self.padding_right),
float(self.height-self.padding_top-self.padding_bottom),
1.0,
)
#draw texture with wrapping
GL.glBegin(GL.GL_QUADS)
prop_y = float(self._pointer)/(self._num_lines-1)
prop_x = float(self._fft_size)/ceil_log2(self._fft_size)
off = 1.0/(self._num_lines-1)
GL.glTexCoord2f(0, prop_y+1-off)
GL.glVertex2f(0, 1)
GL.glTexCoord2f(prop_x, prop_y+1-off)
GL.glVertex2f(1, 1)
GL.glTexCoord2f(prop_x, prop_y)
GL.glVertex2f(1, 0)
GL.glTexCoord2f(0, prop_y)
GL.glVertex2f(0, 0)
GL.glEnd()
GL.glPopMatrix()
GL.glDisable(GL.GL_TEXTURE_2D)
def _populate_point_label(self, x_val, y_val):
"""
Get the text the will populate the point label.
Give the X value for the current point.
@param x_val the current x value
@param y_val the current y value
@return a value string with units
"""
return '%s: %s'%(self.x_label, common.eng_format(x_val, self.x_units))
def _draw_legend(self):
"""
Draw the color scale legend.
"""
if not self._color_mode: return
legend_height = self.height-self.padding_top-self.padding_bottom
#draw each legend block
block_height = float(legend_height)/LEGEND_NUM_BLOCKS
x = self.width - self.padding_right + LEGEND_LEFT_PAD
for i in range(LEGEND_NUM_BLOCKS):
color = unpack_color(COLORS[self._color_mode][int(255*i/float(LEGEND_NUM_BLOCKS-1))])
GL.glColor4f(*numpy.array(color)/255.0)
y = self.height - (i+1)*block_height - self.padding_bottom
self._draw_rect(x, y, LEGEND_WIDTH, block_height)
#draw rectangle around color scale border
GL.glColor3f(*LEGEND_BORDER_COLOR_SPEC)
self._draw_rect(x, self.padding_top, LEGEND_WIDTH, legend_height, fill=False)
#draw each legend label
label_spacing = float(legend_height)/(LEGEND_NUM_LABELS-1)
x = self.width - (self.padding_right - LEGEND_LEFT_PAD - LEGEND_WIDTH)/2
for i in range(LEGEND_NUM_LABELS):
proportion = i/float(LEGEND_NUM_LABELS-1)
dB = proportion*(self._maximum - self._minimum) + self._minimum
y = self.height - i*label_spacing - self.padding_bottom
txt = gltext.Text('%ddB'%int(dB), font_size=LEGEND_FONT_SIZE, centered=True)
txt.draw_text(wx.Point(x, y))
def _resize_texture(self, flag=None):
"""
Create the texture to fit the fft_size X num_lines.
@param flag the set/unset or update flag
"""
if flag is not None:
self._resize_texture_flag = flag
return
if not self._resize_texture_flag: return
self._buffer = list()
self._pointer = 0
if self._num_lines and self._fft_size:
GL.glBindTexture(GL.GL_TEXTURE_2D, self._waterfall_texture)
data = numpy.zeros(self._num_lines*ceil_log2(self._fft_size)*4, numpy.uint8).tostring()
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, ceil_log2(self._fft_size), self._num_lines, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, data)
self._resize_texture_flag = False
def set_color_mode(self, color_mode):
"""
Set the color mode.
New samples will be converted to the new color mode.
Old samples will not be recolorized.
@param color_mode the new color mode string
"""
self.lock()
if color_mode in COLORS.keys():
self._color_mode = color_mode
self._legend_cache.changed(True)
self.update()
self.unlock()
def set_num_lines(self, num_lines):
"""
Set number of lines.
Powers of two only.
@param num_lines the new number of lines
"""
self.lock()
self._num_lines = num_lines
self._resize_texture(True)
self.update()
self.unlock()
def set_samples(self, samples, minimum, maximum):
"""
Set the samples to the waterfall.
Convert the samples to color data.
@param samples the array of floats
@param minimum the minimum value to scale
@param maximum the maximum value to scale
"""
self.lock()
#set the min, max values
if self._minimum != minimum or self._maximum != maximum:
self._minimum = minimum
self._maximum = maximum
self._legend_cache.changed(True)
if self._fft_size != len(samples):
self._fft_size = len(samples)
self._resize_texture(True)
#normalize the samples to min/max
samples = (samples - minimum)*float(255/(maximum-minimum))
samples = numpy.clip(samples, 0, 255) #clip
samples = numpy.array(samples, numpy.uint8)
#convert the samples to RGBA data
data = COLORS[self._color_mode][samples].tostring()
self._buffer.append(data)
self._waterfall_cache.changed(True)
self.unlock()
|
n4hy/gnuradio
|
gr-wxgui/src/python/plotter/waterfall_plotter.py
|
Python
|
gpl-3.0
| 9,471
|
[
"VMD"
] |
9c10aaf319c5cc986511e60e64e085e1748e8af5975620d82e7416cb1eebaf71
|
###########################################################################
# Mean prediction from Gaussian Processes based on
# classifier_libsvm_minimal_modular.py
# plotting functions have been adapted from the pyGP library
# https://github.com/jameshensman/pyGP
###########################################################################
from numpy import *
from numpy.random import randn
from modshogun import *
import pylab as PL
import matplotlib
import logging as LG
import scipy as SP
from modshogun import GradientModelSelection
from modshogun import ModelSelectionParameters, R_EXP, R_LINEAR
from modshogun import ParameterCombination
def plot_training_data(x, y,
shift=None,
replicate_indices=None,
format_data={'alpha':.5,
'marker':'.',
'linestyle':'--',
'lw':1,
'markersize':9},
draw_arrows=0,
plot_old=False):
"""
Plot training data input x and output y into the
active figure (See http://matplotlib.sourceforge.net/ for details of figure).
Instance plot without replicate groups:
.. image:: ../images/plotTraining.png
:height: 8cm
Instance plot with two replicate groups and a shift in x-koords:
.. image:: ../images/plotTrainingShiftX.png
:height: 8cm
**Parameters:**
x : [double]
Input x (e.g. time).
y : [double]
Output y (e.g. expression).
shift : [double]
The shift of each replicate group.
replicate_indices : [int]
Indices of replicates for each x, rexpectively
format_data : {format}
Format of the data points. See http://matplotlib.sourceforge.net/ for details.
draw_arrows : int
draw given number of arrows (if greator than len(replicate) draw all arrows.
Arrows will show the time shift for time points, respectively.
"""
x_shift = SP.array(x.copy())
if shift is not None and replicate_indices is not None:
assert len(shift) == len(SP.unique(replicate_indices)), 'Need one shift per replicate to plot properly'
_format_data = format_data.copy()
if(format_data.has_key('alpha')):
_format_data['alpha'] = .2*format_data['alpha']
else:
_format_data['alpha'] = .2
number_of_groups = len(SP.unique(replicate_indices))
for i in SP.unique(replicate_indices):
x_shift[replicate_indices == i] -= shift[i]
for i in SP.unique(replicate_indices):
col = matplotlib.cm.jet(i / (2. * number_of_groups))
_format_data['color'] = col
if(plot_old):
PL.plot(x[replicate_indices == i], y[replicate_indices == i], **_format_data)
if(draw_arrows):
range = SP.where(replicate_indices == i)[0]
for n in SP.arange(range[0], range[-1], max(1, round(len(range) / draw_arrows))):
offset = round((len(range)-1) / draw_arrows)
n += max(int((i+1)*offset/number_of_groups),1)
PL.text((x_shift[n]+x[n])/2., y[n],
"%.2f"%(-shift[i]),
ha='center',va='center',
fontsize=10)
PL.annotate('', xy=(x_shift[n], y[n]),
xytext=(x[n], y[n]),va='center',
arrowprops=dict(facecolor=col,
alpha=.2,
shrink=.01,
frac=.2,
headwidth=11,
width=11))
#PL.plot(x,y,**_format_data)
if(replicate_indices is not None):
number_of_groups = len(SP.unique(replicate_indices))
#format_data['markersize'] = 13
#format_data['alpha'] = .5
for i in SP.unique(replicate_indices):
col = matplotlib.cm.jet(i / (2. * number_of_groups))
format_data['color'] = col
PL.plot(x_shift[replicate_indices == i], y[replicate_indices == i], **format_data)
else:
print(x_shift.shape)
number_of_groups = x_shift.shape[0]
for i in xrange(number_of_groups):
col = matplotlib.cm.jet(i / (2. * number_of_groups))
format_data['color'] = col
PL.plot(x[i], y[i], **format_data)
# return PL.plot(x_shift,y,**format_data)
def plot_sausage(X, mean, std, alpha=None, format_fill={'alpha':0.3, 'facecolor':'k'}, format_line=dict(alpha=1, color='g', lw=3, ls='dashed')):
"""
plot saussage plot of GP. I.e:
.. image:: ../images/sausage.png
:height: 8cm
**returns:** : [fill_plot, line_plot]
The fill and the line of the sausage plot. (i.e. green line and gray fill of the example above)
**Parameters:**
X : [double]
Interval X for which the saussage shall be plottet.
mean : [double]
The mean of to be plottet.
std : [double]
Pointwise standard deviation.
format_fill : {format}
The format of the fill. See http://matplotlib.sourceforge.net/ for details.
format_line : {format}
The format of the mean line. See http://matplotlib.sourceforge.net/ for details.
"""
X = X.squeeze()
Y1 = (mean + 2 * std)
Y2 = (mean - 2 * std)
if(alpha is not None):
old_alpha_fill = min(1, format_fill['alpha'] * 2)
for i, a in enumerate(alpha[:-2]):
format_fill['alpha'] = a * old_alpha_fill
hf = PL.fill_between(X[i:i + 2], Y1[i:i + 2], Y2[i:i + 2], lw=0, **format_fill)
i += 1
hf = PL.fill_between(X[i:], Y1[i:], Y2[i:], lw=0, **format_fill)
else:
hf = PL.fill_between(X, Y1, Y2, **format_fill)
hp = PL.plot(X, mean, **format_line)
return [hf, hp]
class CrossRect(matplotlib.patches.Rectangle):
def __init__(self, *args, **kwargs):
matplotlib.patches.Rectangle.__init__(self, *args, **kwargs)
#self.ax = ax
# def get_verts(self):
# rectverts = matplotlib.patches.Rectangle.get_verts(self)
# return verts
def get_path(self, *args, **kwargs):
old_path = matplotlib.patches.Rectangle.get_path(self)
verts = []
codes = []
for vert, code in old_path.iter_segments():
verts.append(vert)
codes.append(code)
verts.append([1, 1])
codes.append(old_path.LINETO)
new_path = matplotlib.artist.Path(verts, codes)
return new_path
def create_toy_data():
#0. generate Toy-Data; just samples from a superposition of a sin + linear trend
xmin = 1
xmax = 2.5*SP.pi
x = SP.arange(xmin,xmax,(xmax-xmin)/100.0)
C = 2 #offset
sigma = 0.5
b = 0
y = b*x + C + 1*SP.sin(x)
# dy = b + 1*SP.cos(x)
y += sigma*random.randn(y.shape[0])
y-= y.mean()
x = x[:,SP.newaxis]
return [x,y]
def run_demo():
LG.basicConfig(level=LG.INFO)
random.seed(572)
#1. create toy data
[x,y] = create_toy_data()
feat_train = RealFeatures(transpose(x));
labels = RegressionLabels(y);
n_dimensions = 1
#2. location of unispaced predictions
X = SP.linspace(0,10,10)[:,SP.newaxis]
#new interface with likelihood parametres being decoupled from the covaraince function
likelihood = GaussianLikelihood()
covar_parms = SP.log([2])
hyperparams = {'covar':covar_parms,'lik':SP.log([1])}
#construct covariance function
SECF = GaussianKernel(feat_train, feat_train,2)
covar = SECF
zmean = ZeroMean();
inf = ExactInferenceMethod(SECF, feat_train, zmean, labels, likelihood);
gp = GaussianProcessRegression(inf, feat_train, labels);
root=ModelSelectionParameters();
c1=ModelSelectionParameters("inference_method", inf);
root.append_child(c1);
c2 = ModelSelectionParameters("scale");
c1.append_child(c2);
c2.build_values(0.01, 4.0, R_LINEAR);
c3 = ModelSelectionParameters("likelihood_model", likelihood);
c1.append_child(c3);
c4=ModelSelectionParameters("sigma");
c3.append_child(c4);
c4.build_values(0.001, 4.0, R_LINEAR);
c5 =ModelSelectionParameters("kernel", SECF);
c1.append_child(c5);
c6 =ModelSelectionParameters("width");
c5.append_child(c6);
c6.build_values(0.001, 4.0, R_LINEAR);
crit = GradientCriterion();
grad=GradientEvaluation(gp, feat_train, labels,
crit);
grad.set_function(inf);
gp.print_modsel_params();
root.print_tree();
grad_search=GradientModelSelection(
root, grad);
grad.set_autolock(0);
best_combination=grad_search.select_model(1);
gp.set_return_type(GaussianProcessRegression.GP_RETURN_COV);
St = gp.apply_regression(feat_train);
St = St.get_labels();
gp.set_return_type(GaussianProcessRegression.GP_RETURN_MEANS);
M = gp.apply_regression();
M = M.get_labels();
#create plots
plot_sausage(transpose(x),transpose(M),transpose(SP.sqrt(St)));
plot_training_data(x,y);
PL.show();
if __name__ == '__main__':
run_demo()
|
sanuj/shogun
|
examples/undocumented/python_modular/graphical/regression_gaussian_process_demo.py
|
Python
|
gpl-3.0
| 9,249
|
[
"Gaussian"
] |
0c9e3a88ed78662130a3620157efa7978c13745251418fb754310396aa148628
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
import numpy as np
try:
import pint # pylint: disable=unused-import
except ImportError:
tutorial = importlib_wrapper.MagicMock()
skipIfMissingFeatures = ut.skip(
"Python module pint not available, skipping test!")
else:
tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@TUTORIALS_DIR@/constant_pH/constant_pH.py", script_suffix="ideal")
@skipIfMissingFeatures
class Tutorial(ut.TestCase):
system = tutorial.system
def test(self):
expected_values = 1. / (1 + 10**(tutorial.pK - tutorial.pHs))
simulated_values = tutorial.av_alpha
simulated_values_error = tutorial.err_alpha
# test alpha +/- 0.05 and standard error of alpha less than 0.05
np.testing.assert_allclose(expected_values, simulated_values, rtol=0,
atol=0.05)
self.assertLess(np.max(simulated_values_error), 0.05)
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/scripts/tutorials/test_constant_pH__ideal.py
|
Python
|
gpl-3.0
| 1,723
|
[
"ESPResSo"
] |
9f3f15b38d3578b4444744c5c3f350e25de25bc7a6bb6f0509af7aef6ff5f83f
|
# coding: utf8
""" Implementation of full and banded matrix models for :math:`\beta`-Ensemble:
- Hermite Ensemble (full + tridiagonal)
- Laguerre Ensemble (full + tridiagonal)
- Jacobi Ensemble (full + tridiagonal)
- Circular Ensemble (full + quindiagonal)
- Ginibre Ensemble (full)
.. seealso:
`Documentation on ReadTheDocs <https://dppy.readthedocs.io/en/latest/continuous_dpps/beta_ensembles.sampling.html>`_
"""
import numpy as np
import scipy.linalg as la
import scipy.sparse as sp
from dppy.utils import check_random_state
###########
# Hermite #
###########
def hermite_sampler_full(N, beta=2,
random_state=None):
rng = check_random_state(random_state)
if beta == 1:
A = rng.randn(N, N)
elif beta == 2:
A = rng.randn(N, N) + 1j * rng.randn(N, N)
elif beta == 4:
X = rng.randn(N, N) + 1j * rng.randn(N, N)
Y = rng.randn(N, N) + 1j * rng.randn(N, N)
A = np.block([[X, Y], [-Y.conj(), X.conj()]])
else:
err_print = ('`beta` parameter must be 1, 2 or 4.',
'Given: {}'.format(beta))
raise ValueError('\n'.join(err_print))
return la.eigvalsh(A + A.conj().T) / np.sqrt(2.0)
def semi_circle_law(x, R=2.0):
"""
.. seealso::
- :cite:`DuEd15` Table 1
- https://en.wikipedia.org/wiki/Wigner_semicircle_distribution
"""
return 2 / (np.pi * R**2) * np.sqrt(np.maximum(R**2 - x**2, 0.0))
def mu_ref_normal_sampler_tridiag(loc=0.0, scale=1.0, beta=2, size=10,
random_state=None):
"""Implementation of the tridiagonal model to sample from
.. math::
\\Delta(x_{1}, \\dots, x_{N})^{\\beta}
\\prod_{n=1}^{N} \\exp(-\\frac{(x_i-\\mu)^2}{2\\sigma^2} ) dx_i
.. seealso::
:cite:`DuEd02` II-C
"""
rng = check_random_state(random_state)
if not (beta > 0):
raise ValueError('`beta` must be positive. Given: {}'.format(beta))
# beta/2*[N-1, N-2, ..., 1]
b_2_Ni = 0.5 * beta * np.arange(size - 1, 0, step=-1)
alpha_coef = rng.normal(loc=loc, scale=scale, size=size)
beta_coef = rng.gamma(shape=b_2_Ni, scale=scale**2)
return la.eigvalsh_tridiagonal(alpha_coef, np.sqrt(beta_coef))
############
# Laguerre #
############
def laguerre_sampler_full(M, N, beta=2,
random_state=None):
rng = check_random_state(random_state)
if beta == 1:
A = rng.randn(N, M)
elif beta == 2:
A = rng.randn(N, M) + 1j * rng.randn(N, M)
elif beta == 4:
X = rng.randn(N, M) + 1j * rng.randn(N, M)
Y = rng.randn(N, M) + 1j * rng.randn(N, M)
A = np.block([[X, Y], [-Y.conj(), X.conj()]])
else:
err_print = ('`beta` parameter must be 1, 2 or 4.',
'Given: {}'.format(beta))
raise ValueError('\n'.join(err_print))
return la.eigvalsh(A.dot(A.conj().T))
def marcenko_pastur_law(x, M, N, sigma=1.0):
""" M >= N
.. seealso::
- :cite:`DuEd15` Table 1
- https://en.wikipedia.org/wiki/Marchenko-Pastur_distribution
"""
c = N / M
Lm, Lp = (sigma * (1 - np.sqrt(c)))**2, (sigma * (1 + np.sqrt(c)))**2
return np.sqrt(np.maximum((Lp-x)*(x-Lm),0)) / (c*x) / (2*np.pi*sigma**2)
def mu_ref_gamma_sampler_tridiag(shape=1.0, scale=1.0, beta=2, size=10,
random_state=None):
"""
.. seealso::
:cite:`DuEd02` III-B
"""
rng = check_random_state(random_state)
if not (beta > 0):
raise ValueError('`beta` must be positive. Given: {}'.format(beta))
# beta/2*[N-1, N-2, ..., 1, 0]
b_2_Ni = 0.5 * beta * np.arange(size - 1, -1, step=-1)
# xi_odd = xi_1, ... , xi_2N-1
xi_odd = rng.gamma(shape=b_2_Ni + shape, scale=scale) # odd
# xi_even = xi_0=0, xi_2, ... ,xi_2N-2
xi_even = np.zeros(size)
xi_even[1:] = rng.gamma(shape=b_2_Ni[:-1], scale=scale) # even
# alpha_i = xi_2i-2 + xi_2i-1, xi_0 = 0
alpha_coef = xi_even + xi_odd
# beta_i+1 = xi_2i-1 * xi_2i
beta_coef = xi_odd[:-1] * xi_even[1:]
return la.eigvalsh_tridiagonal(alpha_coef, np.sqrt(beta_coef))
##########
# Jacobi #
##########
def jacobi_sampler_full(M_1, M_2, N, beta=2,
random_state=None):
rng = check_random_state(random_state)
if beta == 1:
X = rng.randn(N, M_1)
Y = rng.randn(N, M_2)
elif beta == 2:
X = rng.randn(N, M_1) + 1j * rng.randn(N, M_1)
Y = rng.randn(N, M_2) + 1j * rng.randn(N, M_2)
elif beta == 4:
X_1 = rng.randn(N, M_1) + 1j * rng.randn(N, M_1)
X_2 = rng.randn(N, M_1) + 1j * rng.randn(N, M_1)
Y_1 = rng.randn(N, M_2) + 1j * rng.randn(N, M_2)
Y_2 = rng.randn(N, M_2) + 1j * rng.randn(N, M_2)
X = np.block([[X_1, X_2], [-X_2.conj(), X_1.conj()]])
Y = np.block([[Y_1, Y_2], [-Y_2.conj(), Y_1.conj()]])
else:
err_print = ('`beta` parameter must be 1, 2 or 4.',
'Given: {}'.format(beta))
raise ValueError('\n'.join(err_print))
X_tmp = X.dot(X.conj().T)
Y_tmp = Y.dot(Y.conj().T)
return la.eigvals(X_tmp.dot(la.inv(X_tmp + Y_tmp))).real
def wachter_law(x, M_1, M_2, N):
""" M_1, M_2>=N
.. seealso::
:cite:`DuEd15` Table 1
"""
a, b = M_1 / N, M_2 / N
Lm = ((np.sqrt(a * (a + b - 1)) - np.sqrt(b)) / (a + b))**2
Lp = ((np.sqrt(a * (a + b - 1)) + np.sqrt(b)) / (a + b))**2
return (a+b)/(2*np.pi) * 1/(x*(1-x)) * np.sqrt(np.maximum((Lp-x)*(x-Lm),0))
def mu_ref_beta_sampler_tridiag(a, b, beta=2, size=10,
random_state=None):
""" Implementation of the tridiagonal model given by Theorem 2 of :cite:`KiNe04` to sample from
.. math::
\\Delta(x_{1}, \\dots, x_{N})^{\\beta}
\\prod_{n=1}^{N} x^{a-1} (1-x)^{b-1} dx
.. seealso::
:cite:`KiNe04` Theorem 2
"""
rng = check_random_state(random_state)
if not (beta > 0):
raise ValueError('`beta` must be positive. Given: {}'.format(beta))
# beta/2*[N-1, N-2, ..., 1, 0]
b_2_Ni = 0.5 * beta * np.arange(size - 1, -1, step=-1)
# c_odd = c_1, c_3, ..., c_2N-1
c_odd = rng.beta(b_2_Ni + a, b_2_Ni + b)
# c_even = c_0, c_2, c_2N-2
c_even = np.zeros(size)
c_even[1:] = rng.beta(b_2_Ni[:-1], b_2_Ni[1:] + a + b)
# xi_odd = xi_2i-1 = (1-c_2i-2) c_2i-1
xi_odd = (1 - c_even) * c_odd
# xi_even = xi_0=0, xi_2, xi_2N-2
# xi_2i = (1-c_2i-1)*c_2i
xi_even = np.zeros(size)
xi_even[1:] = (1 - c_odd[:-1]) * c_even[1:]
# alpha_i = xi_2i-2 + xi_2i-1
# alpha_1 = xi_0 + xi_1 = xi_1
alpha_coef = xi_even + xi_odd
# beta_i+1 = xi_2i-1 * xi_2i
beta_coef = xi_odd[:-1] * xi_even[1:]
return la.eigvalsh_tridiagonal(alpha_coef, np.sqrt(beta_coef))
#####################
# Circular ensemble #
#####################
def circular_sampler_full(N, beta=2, haar_mode='QR',
random_state=None):
"""
.. seealso::
:cite:`Mez06` Section 5
"""
rng = check_random_state(random_state)
if haar_mode == 'Hermite':
# size_sym_mat = int(N*(N-1)/2)
if beta == 1: # COE
A = rng.randn(N, N)
elif beta == 2: # CUE
A = rng.randn(N, N) + 1j * rng.randn(N, N)
elif beta == 4:
X = rng.randn(N, N) + 1j * rng.randn(N, N)
Y = rng.randn(N, N) + 1j * rng.randn(N, N)
A = np.block([[X, Y], [-Y.conj(), X.conj()]])
else:
err_print = ('For `haar_mode="hermite"`, `beta` = 1, 2 or 4.',
'Given: {}'.format(beta))
raise ValueError('\n'.join(err_print))
_, U = la.eigh(A + A.conj().T)
elif haar_mode == 'QR':
if beta == 1: # COE
A = rng.randn(N, N)
elif beta == 2: # CUE
A = rng.randn(N, N) + 1j * rng.randn(N, N)
# elif beta==4:
else:
err_print = ('With `haar_mode="QR", `beta` = 1 or 2.',
'Given: {}'.format(beta))
raise ValueError('\n'.join(err_print))
# U, _ = la.qr(A)
Q, R = la.qr(A)
d = np.diagonal(R)
U = np.multiply(Q, d / np.abs(d), Q)
else:
err_print = ('Invalid `haar_mode`.',
'Choose from `haar_mode="Hermite" or "QR".',
'Given: {}'.format(haar_mode))
raise ValueError('\n'.join(err_print))
return la.eigvals(U)
def mu_ref_unif_unit_circle_sampler_quindiag(beta=2, size=10,
random_state=None):
"""
.. see also::
:cite:`KiNe04` Theorem 1
"""
rng = check_random_state(random_state)
if not (isinstance(beta, int) and (beta > 0)):
raise ValueError('`beta` must be positive integer.\
Given: {}'.format(beta))
alpha = np.zeros(size, dtype=np.complex_)
# nu = 1 + beta*(N-1, N-2, ..., 0)
for i, nu in enumerate(1 + beta * np.arange(size - 1, -1, step=-1)):
gauss_vec = rng.randn(nu + 1)
alpha[i] = (gauss_vec[0] + 1j * gauss_vec[1]) / la.norm(gauss_vec)
rho = np.sqrt(1 - np.abs(alpha[:-1])**2)
xi = np.zeros((size - 1, 2, 2), dtype=np.complex_) # xi[0,..,N-1]
xi[:, 0, 0], xi[:, 0, 1] = alpha[:-1].conj(), rho
xi[:, 1, 0], xi[:, 1, 1] = rho, -alpha[:-1]
# L = diag(xi_0, xi_2, ...)
# M = diag(1, xi_1, x_3, ...)
# xi[N-1] = alpha[N-1].conj()
if size % 2 == 0: # even
L = sp.block_diag(xi[::2, :, :],
dtype=np.complex_)
M = sp.block_diag([1.0, *xi[1::2, :, :], alpha[-1].conj()],
dtype=np.complex_)
else: # odd
L = sp.block_diag([*xi[::2, :, :], alpha[-1].conj()],
dtype=np.complex_)
M = sp.block_diag([1.0, *xi[1::2, :, :]],
dtype=np.complex_)
return la.eigvals(L.dot(M).toarray())
###########
# Ginibre #
###########
def ginibre_sampler_full(N, random_state=None):
"""Compute the eigenvalues of a random complex standard Gaussian matrix"""
rng = check_random_state(random_state)
A = rng.randn(N, N) + 1j * rng.randn(N, N)
return la.eigvals(A) / np.sqrt(2.0)
|
guilgautier/DPPy
|
dppy/random_matrices.py
|
Python
|
mit
| 10,337
|
[
"Gaussian"
] |
52054041dc091bef0fd53bd051edcb21aa324df97f01c0685eccbd0f1c764bce
|
# Copyright 2005 by Jason A. Hackney. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Parser for dealing with text output from the MEME motif search program
"""
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/MEME/__init__.py
|
Python
|
apache-2.0
| 308
|
[
"Biopython"
] |
fe514a2c18d83697d30f1108c2bd21a211fb9ab8f94808c1ea18eb722e388234
|
from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(name='emmetrop',
version=version,
description="A biological analysis of a schematic eye and natural images",
long_description=""" """,
install_requires=["Sphinx",
"numpy",
"scipy",
"matplotlib",
"tables"
],
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='Brian Schmidt',
author_email='bps10@uw.edu',
url='',
dependency_links = [
"python.org"
],
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
# package_data={'':'LICENSE'},
include_package_data=True,
zip_safe=True,
entry_points="""
# -*- Entry points: -*-
""",
use_2to3 = True,
)
|
bps10/emmetrop
|
setup.py
|
Python
|
mit
| 982
|
[
"Brian"
] |
2b9f3b69a2a674a45b0bf5b1eb18da19b2f064dd2e22ce65e6e60cc65e9a5fa6
|
################################################################################
#
# Copyright 2015-2021 Félix Brezo and Yaiza Rubio
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
import argparse
import datetime as dt
import json
import os
import sys
import osrframework
import osrframework.utils.banner as banner
import osrframework.utils.platform_selection as platform_selection
import osrframework.utils.configuration as configuration
import osrframework.utils.general as general
def perform_search(platformNames=[], queries=[], exclude_platform_names=[]):
"""Method to perform the search itself on the different platforms
Args:
platformNames: List of names of the platforms.
queries: List of queries to be performed.
exclude_platform_names: A list of platforms not to be searched.
Returns:
list: A list with the entities collected.
"""
# Grabbing the <Platform> objects
platforms = platform_selection.get_platforms_by_name(platformNames, mode="searchfy", exclude_platform_names=exclude_platform_names)
results = []
for q in queries:
for pla in platforms:
# This returns a json.txt!
entities = pla.get_info(query=q, mode="searchfy")
if entities != "[]":
results += json.loads(entities)
return results
def get_parser():
"""Defines the argument parser
Returns:
argparse.ArgumentParser.
"""
DEFAULT_VALUES = configuration.get_configuration_values_for("searchfy")
# Capturing errors just in case the option is not found in the configuration
try:
exclude_list = [DEFAULT_VALUES["exclude_platforms"]]
except:
exclude_list = []
parser = argparse.ArgumentParser(description='searchfy - Piece of software that performs a query on the platforms in OSRFramework.', prog='searchfy', epilog='Check the README.md file for further details on the usage of this program or follow us on Twitter in <http://twitter.com/i3visio>.', add_help=False, conflict_handler='resolve')
parser._optionals.title = "Input options (one required)"
# Adding the main options
group_main = parser.add_mutually_exclusive_group(required=True)
group_main.add_argument('--license', required=False, action='store_true', default=False, help='shows the GPLv3+ license and exists.')
group_main.add_argument('-q', '--queries', metavar='<searches>', nargs='+', action='store', help = 'the list of queries to be performed).')
listAll = platform_selection.get_all_platform_names("searchfy")
# Configuring the processing options
group_processing = parser.add_argument_group('Processing arguments', 'Configuring the way in which searchfy will process the identified profiles.')
group_processing.add_argument('-e', '--extension', metavar='<sum_ext>', nargs='+', choices=['csv', 'gml', 'json', 'ods', 'png', 'txt', 'xls', 'xlsx' ], required=False, default=DEFAULT_VALUES.get("extension", ["csv"]), action='store', help='output extension for the summary files. Default: xls.')
group_processing.add_argument('-F', '--file_header', metavar='<alternative_header_file>', required=False, default=DEFAULT_VALUES.get("file_header", "profiles"), action='store', help='Header for the output filenames to be generated. If None was provided the following will be used: profiles.<extension>' )
group_processing.add_argument('-o', '--output_folder', metavar='<path_to_output_folder>', required=False, default=DEFAULT_VALUES.get("output_folder", "."), action='store', help='output folder for the generated documents. While if the paths does not exist, usufy.py will try to create; if this argument is not provided, usufy will NOT write any down any data. Check permissions if something goes wrong.')
group_processing.add_argument('-p', '--platforms', metavar='<platform>', choices=listAll, nargs='+', required=False, default=DEFAULT_VALUES.get("platforms", []) ,action='store', help='select the platforms where you want to perform the search amongst the following: ' + str(listAll) + '. More than one option can be selected.')
group_processing.add_argument('-w', '--web_browser', required=False, action='store_true', help='opening the URIs returned in the default web browser.')
group_processing.add_argument('-x', '--exclude', metavar='<platform>', choices=listAll, nargs='+', required=False, default=exclude_list, action='store', help='select the platforms that you want to exclude from the processing.')
# About options
group_about = parser.add_argument_group('About arguments', 'Showing additional information about this program.')
group_about.add_argument('-h', '--help', action='help', help='shows this help and exists.')
group_about.add_argument('--version', action='version', version='[%(prog)s] OSRFramework ' + osrframework.__version__, help='shows the version of the program and exists.')
return parser
def main(params=None):
"""Main function to launch searchfy
The function is created in this way so as to let other applications make
use of the full configuration capabilities of the application. The
parameters received are used as parsed by this modules `get_parser()`.
Args:
params (list): A list with the parameters as grabbed by the terminal. It is
None when this is called by an entry_point. If it is called by osrf
the data is already parsed.
Returns:
list. A list of i3visio entities.
"""
if params is None:
parser = get_parser()
args = parser.parse_args(params)
else:
args = params
results = []
print(general.title(banner.text))
saying_hello = f"""
Searchfy | Copyright (C) Yaiza Rubio & Félix Brezo (i3visio) 2014-2021
This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you
are welcome to redistribute it under certain conditions. For additional info,
visit <{general.LICENSE_URL}>.
"""
print(general.info(saying_hello))
if args.license:
general.showLicense()
else:
# Showing the execution time...
start_time = dt.datetime.now()
print(f"{start_time}\tStarting search in different platform(s)... Relax!\n")
print(general.emphasis("\tPress <Ctrl + C> to stop...\n"))
# Performing the search
try:
results = perform_search(platformNames=args.platforms, queries=args.queries, exclude_platform_names=args.exclude)
except KeyboardInterrupt:
print(general.error("\n[!] Process manually stopped by the user. Workers terminated without providing any result.\n"))
results = []
# Generating summary files for each ...
if args.extension:
# Verifying if the outputPath exists
if not os.path.exists (args.output_folder):
os.makedirs(args.output_folder)
# Grabbing the results
fileHeader = os.path.join(args.output_folder, args.file_header)
# Iterating through the given extensions to print its values
for ext in args.extension:
# Generating output files
general.export_usufy(results, ext, fileHeader)
# Printing the results if requested
now = dt.datetime.now()
print(f"\n{now}\tResults obtained:\n")
print(general.success(general.osrf_to_text_export(results)))
if args.web_browser:
general.open_results_in_browser(results)
now = dt.datetime.now()
print("\n{date}\tYou can find all the information collected in the following files:".format(date=str(now)))
for ext in args.extension:
# Showing the output files
print("\t" + general.emphasis(fileHeader + "." + ext))
# Showing the execution time...
end_time = dt.datetime.now()
print(f"\n{end_time}\tFinishing execution...\n")
print("Total time used:\t" + general.emphasis(str(end_time-start_time)))
try:
print("Average seconds/query:\t" + general.emphasis(str((end_time-start_time).total_seconds()/len(args.platforms))) +" seconds\n")
except:
pass
# Urging users to place an issue on Github...
print(banner.footer)
if params:
return results
if __name__ == "__main__":
main(sys.argv[1:])
|
i3visio/osrframework
|
osrframework/searchfy.py
|
Python
|
agpl-3.0
| 9,100
|
[
"VisIt"
] |
f556c04d470e11aa5c200cea25683eb2a300ec3e4a39f37827a314fa625d8cd9
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import absolute_import
from __future__ import print_function
import re
import struct
from collections import defaultdict
from decimal import Decimal
from .pdict import PreservingDict
from .periodictable import *
from .physconst import *
from .exceptions import *
from .molecule import Molecule
from .orient import OrientMols
from .options import conv_float2negexp
def harvest_output(outtext):
"""Function to separate portions of a CFOUR output file *outtest*,
divided by xjoda.
"""
pass_psivar = []
pass_coord = []
pass_grad = []
#for outpass in re.split(r'--invoking executable xjoda', outtext, re.MULTILINE):
for outpass in re.split(r'JODA beginning optimization cycle', outtext, re.MULTILINE):
psivar, c4coord, c4grad = harvest_outfile_pass(outpass)
pass_psivar.append(psivar)
pass_coord.append(c4coord)
pass_grad.append(c4grad)
#print '\n\nXXXXXXXXXXXXXXXXXXXXXXXXXX\n\n'
#print outpass
#print psivar, c4coord, c4grad
#print psivar, c4grad
#print '\n\nxxxxxxxxxxxxxxxxxxxxxxxxxx\n\n'
retindx = -1 if pass_coord[-1] else -2
# print ' <<< C4 PSIVAR >>>'
# for item in pass_psivar[retindx]:
# print(' %30s %16.8f' % (item, pass_psivar[retindx][item]))
# print ' <<< C4 COORD >>>'
# for item in pass_coord[retindx]:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
# print ' <<< C4 GRAD >>>'
# for item in pass_grad[retindx]:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
return pass_psivar[retindx], pass_coord[retindx], pass_grad[retindx]
def harvest_outfile_pass(outtext):
"""Function to read CFOUR output file *outtext* and parse important
quantum chemical information from it in
"""
psivar = PreservingDict()
psivar_coord = None
psivar_grad = None
# TODO: BCC
# CI
# QCISD(T)
# other ROHF tests
# vcc/ecc
NUMBER = "((?:[-+]?\\d*\\.\\d+(?:[DdEe][-+]?\\d+)?)|(?:[-+]?\\d+\\.\\d*(?:[DdEe][-+]?\\d+)?))"
# Process NRE
mobj = re.search(r'^\s+' + r'(?:Nuclear repulsion energy :)' + r'\s+' + NUMBER + r'\s+a\.u\.\s*$',
outtext, re.MULTILINE)
if mobj:
print('matched nre')
psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
# Process SCF
mobj = re.search(
r'^\s+' + r'(?:E\(SCF\))' + r'\s+=\s+' + NUMBER + r'\s+a\.u\.\s*$',
outtext, re.MULTILINE)
if mobj:
print('matched scf1')
psivar['SCF TOTAL ENERGY'] = mobj.group(1)
mobj = re.search(
r'^\s+' + r'(?:E\(SCF\)=)' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE)
if mobj:
print('matched scf2')
psivar['SCF TOTAL ENERGY'] = mobj.group(1)
if 'SCF TOTAL ENERGY' not in psivar:
# can be too greedy and match across scf cycles
mobj = re.search(
r'^\s+' + r'(?:SCF has converged.)' + r'\s*$' +
r'(?:.*?)' +
r'^\s+' + r'(?:\d+)' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched scf3')
psivar['SCF TOTAL ENERGY'] = mobj.group(1)
mobj = re.search(
r'^\s+' + r'(?:E\(ROHF\)=)' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE)
if mobj:
print('matched scf4')
psivar['SCF TOTAL ENERGY'] = mobj.group(1)
# Process MP2
mobj = re.search(
r'^\s+' + r'(?:E2\(AA\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(AB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(TOT\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:Total MP2 energy)' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*$',
outtext, re.MULTILINE)
if mobj:
print('matched mp2r')
psivar['MP2 SAME-SPIN CORRELATION ENERGY'] = 2 * Decimal(mobj.group(1))
psivar['MP2 OPPOSITE-SPIN CORRELATION ENERGY'] = mobj.group(2)
psivar['MP2 CORRELATION ENERGY'] = 2 * Decimal(mobj.group(1)) + Decimal(mobj.group(2))
psivar['MP2 TOTAL ENERGY'] = mobj.group(4)
mobj = re.search(
r'^\s+' + r'(?:E2\(AA\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(BB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(AB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(TOT\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:Total MP2 energy)' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*$',
outtext, re.MULTILINE)
if mobj:
print('matched mp2u')
psivar['MP2 SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(1)) + Decimal(mobj.group(2))
psivar['MP2 OPPOSITE-SPIN CORRELATION ENERGY'] = mobj.group(3)
psivar['MP2 CORRELATION ENERGY'] = Decimal(mobj.group(1)) + \
Decimal(mobj.group(2)) + Decimal(mobj.group(3))
psivar['MP2 TOTAL ENERGY'] = mobj.group(5)
mobj = re.search(
r'^\s+' + r'(?:E2\(AA\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(BB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(AB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(SINGLE\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(TOT\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:Total MP2 energy)' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*$',
outtext, re.MULTILINE)
if mobj:
print('matched mp2ro')
psivar['MP2 SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(1)) + Decimal(mobj.group(2))
psivar['MP2 OPPOSITE-SPIN CORRELATION ENERGY'] = mobj.group(3)
psivar['MP2 SINGLES ENERGY'] = mobj.group(4)
psivar['MP2 CORRELATION ENERGY'] = Decimal(mobj.group(1)) + \
Decimal(mobj.group(2)) + Decimal(mobj.group(3)) + Decimal(mobj.group(4))
psivar['MP2 TOTAL ENERGY'] = mobj.group(6)
# Process MP3
mobj = re.search(
r'^\s+' + r'(?:D-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:D-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched mp3r')
dmp2 = Decimal(mobj.group(1))
dmp3 = Decimal(mobj.group(3))
psivar['MP2 CORRELATION ENERGY'] = dmp2
psivar['MP2 TOTAL ENERGY'] = mobj.group(2)
psivar['MP3 CORRELATION ENERGY'] = dmp2 + dmp3
psivar['MP3 TOTAL ENERGY'] = mobj.group(4)
psivar['MP2.5 CORRELATION ENERGY'] = dmp2 + Decimal('0.500000000000') * dmp3
psivar['MP2.5 TOTAL ENERGY'] = psivar['MP2.5 CORRELATION ENERGY'] + psivar['SCF TOTAL ENERGY']
mobj = re.search(
r'^\s+' + r'(?:S-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:D-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:S-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:D-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched mp3ro')
dmp2 = Decimal(mobj.group(1)) + Decimal(mobj.group(3))
dmp3 = Decimal(mobj.group(5)) + Decimal(mobj.group(7))
psivar['MP3 CORRELATION ENERGY'] = dmp2 + dmp3
psivar['MP3 TOTAL ENERGY'] = mobj.group(8)
psivar['MP2.5 CORRELATION ENERGY'] = dmp2 + Decimal('0.500000000000') * dmp3
psivar['MP2.5 TOTAL ENERGY'] = psivar['MP2.5 CORRELATION ENERGY'] + psivar['SCF TOTAL ENERGY']
# Process MP4
mobj = re.search(
r'^\s+' + r'(?:D-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:D-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:D-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:Q-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:S-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched mp4r')
dmp2 = Decimal(mobj.group(1))
dmp3 = Decimal(mobj.group(3))
dmp4sdq = Decimal(mobj.group(5)) + Decimal(mobj.group(7)) + Decimal(mobj.group(9))
psivar['MP2 CORRELATION ENERGY'] = dmp2
psivar['MP2 TOTAL ENERGY'] = mobj.group(2)
psivar['MP3 CORRELATION ENERGY'] = dmp2 + dmp3
psivar['MP3 TOTAL ENERGY'] = mobj.group(4)
psivar['MP2.5 CORRELATION ENERGY'] = dmp2 + Decimal('0.500000000000') * dmp3
psivar['MP2.5 TOTAL ENERGY'] = psivar['MP2.5 CORRELATION ENERGY'] + psivar['SCF TOTAL ENERGY']
psivar['MP4(SDQ) CORRELATION ENERGY'] = dmp2 + dmp3 + dmp4sdq
psivar['MP4(SDQ) TOTAL ENERGY'] = mobj.group(10)
mobj = re.search(
r'^\s+' + r'(?:S-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:D-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:S-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:D-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:L-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:NL-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched mp4ro')
dmp2 = Decimal(mobj.group(1)) + Decimal(mobj.group(3))
dmp3 = Decimal(mobj.group(5)) + Decimal(mobj.group(7))
dmp4sdq = Decimal(mobj.group(9)) + Decimal(mobj.group(11))
psivar['MP2 CORRELATION ENERGY'] = dmp2
psivar['MP2 TOTAL ENERGY'] = mobj.group(4)
psivar['MP3 CORRELATION ENERGY'] = dmp2 + dmp3
psivar['MP3 TOTAL ENERGY'] = mobj.group(8)
psivar['MP2.5 CORRELATION ENERGY'] = dmp2 + Decimal('0.500000000000') * dmp3
psivar['MP2.5 TOTAL ENERGY'] = psivar['MP2.5 CORRELATION ENERGY'] + psivar['SCF TOTAL ENERGY']
psivar['MP4(SDQ) CORRELATION ENERGY'] = dmp2 + dmp3 + dmp4sdq
psivar['MP4(SDQ) TOTAL ENERGY'] = mobj.group(12)
mobj = re.search(
r'^\s+' + r'(?:D-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:Q-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:S-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:T-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched mp4tr')
dmp4sdq = Decimal(mobj.group(1)) + Decimal(mobj.group(3)) + Decimal(mobj.group(5))
dmp4t = Decimal(mobj.group(7))
psivar['MP4(SDQ) CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY'] + dmp4sdq
psivar['MP4(SDQ) TOTAL ENERGY'] = mobj.group(6)
psivar['MP4(T) CORRECTION ENERGY'] = dmp4t
psivar['MP4(SDTQ) CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY'] + dmp4sdq + dmp4t
psivar['MP4(SDTQ) TOTAL ENERGY'] = mobj.group(8)
psivar['MP4 CORRELATION ENERGY'] = psivar['MP4(SDTQ) CORRELATION ENERGY']
psivar['MP4 TOTAL ENERGY'] = psivar['MP4(SDTQ) TOTAL ENERGY']
mobj = re.search(
r'^\s+' + r'(?:L-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:NL-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:WT12-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:T-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched mp4tro')
dmp4sdq = Decimal(mobj.group(1)) + Decimal(mobj.group(3))
dmp4t = Decimal(mobj.group(5)) + Decimal(mobj.group(7)) # TODO: WT12 with T, not SDQ?
psivar['MP4(SDQ) CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY'] + dmp4sdq
psivar['MP4(SDQ) TOTAL ENERGY'] = mobj.group(4)
psivar['MP4(T) CORRECTION ENERGY'] = dmp4t
psivar['MP4(SDTQ) CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY'] + dmp4sdq + dmp4t
psivar['MP4(SDTQ) TOTAL ENERGY'] = mobj.group(8)
psivar['MP4 CORRELATION ENERGY'] = psivar['MP4(SDTQ) CORRELATION ENERGY']
psivar['MP4 TOTAL ENERGY'] = psivar['MP4(SDTQ) TOTAL ENERGY']
# Process CC Iterations
mobj = re.search(
r'^\s+' + r'(?P<fullCC>(?P<iterCC>CC(?:\w+))(?:\(T\))?)' + r'\s+(?:energy will be calculated.)\s*' +
r'(?:.*?)' +
r'^\s+' + r'(?:\d+)' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s+DIIS\s*' +
r'^\s*(?:-+)\s*' +
r'^\s*(?:A miracle (?:has come|come) to pass. The CC iterations have converged.)\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched cc with full %s iterating %s' % (mobj.group('fullCC'), mobj.group('iterCC')))
psivar['%s CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(3)
psivar['%s TOTAL ENERGY' % (mobj.group('iterCC'))] = mobj.group(4)
# Process CC(T)
mobj = re.search(
r'^\s+' + r'(?:E\(SCF\))' + r'\s+=\s+' + NUMBER + r'\s+a\.u\.\s*' +
r'(?:.*?)' +
r'^\s+' + r'(?:E\(CCSD\))' + r'\s+=\s+' + NUMBER + r'\s*' +
r'(?:.*?)' +
r'^\s+' + r'(?:E\(CCSD\(T\)\))' + r'\s+=\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched ccsd(t) vcc')
psivar['SCF TOTAL ENERGY'] = mobj.group(1)
psivar['CCSD TOTAL ENERGY'] = mobj.group(2)
psivar['(T) CORRECTION ENERGY'] = Decimal(mobj.group(3)) - Decimal(mobj.group(2))
psivar['CCSD(T) CORRELATION ENERGY'] = Decimal(mobj.group(3)) - Decimal(mobj.group(1))
psivar['CCSD(T) TOTAL ENERGY'] = mobj.group(3)
mobj = re.search(
r'^\s+' + r'(?:E\(SCF\))' + r'\s+=\s*' + NUMBER + r'\s+a\.u\.\s*' +
r'(?:.*?)' +
r'^\s+' + r'(?:CCSD energy)' + r'\s+' + NUMBER + r'\s*' +
r'(?:.*?)' +
r'^\s+' + r'(?:Total perturbative triples energy:)' + r'\s+' + NUMBER + r'\s*' +
r'^\s*(?:-+)\s*' +
r'^\s+' + r'(?:CCSD\(T\) energy)' + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched ccsd(t) ecc')
psivar['SCF TOTAL ENERGY'] = mobj.group(1)
psivar['CCSD TOTAL ENERGY'] = mobj.group(2)
psivar['(T) CORRECTION ENERGY'] = mobj.group(3)
psivar['CCSD(T) CORRELATION ENERGY'] = Decimal(mobj.group(4)) - Decimal(mobj.group(1))
psivar['CCSD(T) TOTAL ENERGY'] = mobj.group(4)
mobj = re.search(
r'^\s+' + r'(?:HF-SCF energy)' + r'\s+' + NUMBER + r'\s*' +
r'(?:.*?)' +
r'^\s+' + r'(?:CCSD energy)' + r'\s+' + NUMBER + r'\s*' +
r'(?:.*?)' +
r'^\s+' + r'(?:E4T \+ E5ST)' + r'\s+' + NUMBER + r'\s*' +
r'(?:.*?)' +
r'^\s*(?:-+)\s*' +
r'^\s+' + r'(?:CCSD\(T\) energy)' + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched ccsd(t) ecc v2')
psivar['SCF TOTAL ENERGY'] = mobj.group(1)
psivar['CCSD TOTAL ENERGY'] = mobj.group(2)
psivar['(T) CORRECTION ENERGY'] = mobj.group(3)
psivar['CCSD(T) CORRELATION ENERGY'] = Decimal(mobj.group(4)) - Decimal(mobj.group(1))
psivar['CCSD(T) TOTAL ENERGY'] = mobj.group(4)
mobj = re.search(
r'^\s+' + r'(?:CCSD energy)' + r'\s+' + NUMBER + r'\s*' +
r'^\s*(?:-+)\s*' +
r'^\s+' + r'(?:CCSD\(T\) energy)' + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched ccsd(t) lamb')
psivar['CCSD TOTAL ENERGY'] = mobj.group(1)
psivar['(T) CORRECTION ENERGY'] = Decimal(mobj.group(2)) - Decimal(mobj.group(1))
psivar['CCSD(T) CORRELATION ENERGY'] = Decimal(mobj.group(2)) - psivar['SCF TOTAL ENERGY']
psivar['CCSD(T) TOTAL ENERGY'] = mobj.group(2)
# Process SCS-CC
mobj = re.search(
r'^\s+' + r'(?P<fullCC>(?P<iterCC>CC(?:\w+))(?:\(T\))?)' + r'\s+(?:energy will be calculated.)\s*' +
r'(?:.*?)' +
r'^\s*' + r'(?:@CCENRG-I, Correlation energies.)' + r'\s+(?:ECCAA)\s+' + NUMBER + r'\s*' +
r'^\s+(?:ECCBB)\s+' + NUMBER + '\s*' +
r'^\s+(?:ECCAB)\s+' + NUMBER + '\s*' +
r'^\s+(?:Total)\s+' + NUMBER + '\s*',
outtext, re.MULTILINE | re.DOTALL)
if mobj: # PRINT=2 to get SCS-CC components
print('matched scscc')
psivar['%s SAME-SPIN CORRELATION ENERGY' % (mobj.group('iterCC'))] = Decimal(mobj.group(3)) + Decimal(mobj.group(4))
psivar['%s OPPOSITE-SPIN CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(5)
psivar['%s CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(6)
mobj = re.search(
r'^\s+' + r'(?P<fullCC>(?P<iterCC>CC(?:\w+))(?:\(T\))?)' + r'\s+(?:energy will be calculated.)\s*' +
r'(?:.*?)' +
r'^\s+' + r'Amplitude equations converged in' + r'\s*\d+\s*' + r'iterations.\s*' +
r'(?:.*?)' +
r'^\s+' + r'The AA contribution to the correlation energy is:\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'The BB contribution to the correlation energy is:\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'The AB contribution to the correlation energy is:\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'The total correlation energy is\s+' + NUMBER + r'\s+a.u.\s*' +
r'(?:.*?)' +
#r'^\s+' + r'The CC iterations have converged.' + r'\s*$',
r'^\s+' + r'(?:A miracle come to pass. )?' + r'The CC iterations have converged.' + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj: # PRINT=2 to get SCS components
print('matched scscc2')
psivar['%s SAME-SPIN CORRELATION ENERGY' % (mobj.group('iterCC'))] = Decimal(mobj.group(3)) + Decimal(mobj.group(4))
psivar['%s OPPOSITE-SPIN CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(5)
psivar['%s CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(6)
# Process gradient
mobj = re.search(
r'\s+' + r'Molecular gradient' + r'\s*' +
r'\s+' + r'------------------' + r'\s*' +
r'\s+' + r'\n' +
r'(?:(?:\s+[A-Z]+\s*#\d+\s+[xyz]\s+[-+]?\d+\.\d+\s*\n)+)' + # optional, it seems
r'\n\n' + # optional, it seems
r'((?:\s+[A-Z]+\s*#\d+\s+\d?\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s*\n)+)' +
r'\n\n' +
r'\s+' + 'Molecular gradient norm',
outtext, re.MULTILINE)
if mobj:
print('matched molgrad')
atoms = []
psivar_grad = []
for line in mobj.group(1).splitlines():
lline = line.split()
atoms.append(lline[0])
#psivar_gradient.append([Decimal(lline[-3]), Decimal(lline[-2]), Decimal(lline[-1])])
psivar_grad.append([float(lline[-3]), float(lline[-2]), float(lline[-1])])
# Process geometry
mobj = re.search(
# r'\s+(?:-+)\s*' +
# r'^\s+' + r'Z-matrix Atomic Coordinates (in bohr)' + r'\s*' +
r'^\s+' + r'Symbol Number X Y Z' + r'\s*' +
r'^\s+(?:-+)\s*' +
r'((?:\s+[A-Z]+\s+[0-9]+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s*\n)+)' +
r'^\s+(?:-+)\s*',
outtext, re.MULTILINE)
if mobj:
print('matched geom')
molxyz = '%d bohr\n\n' % len(mobj.group(1).splitlines())
for line in mobj.group(1).splitlines():
lline = line.split()
molxyz += '%s %16s %16s %16s\n' % (lline[0], lline[-3], lline[-2], lline[-1])
# Rather a dinky Molecule as no ghost, charge, or multiplicity
psivar_coord = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True)
# Process atom geometry
mobj = re.search(
r'^\s+' + r'@GETXYZ-I, 1 atoms read from ZMAT.' + r'\s*' +
r'^\s+' + r'[0-9]+\s+([A-Z]+)\s+[0-9]+\s+' + NUMBER + r'\s*',
outtext, re.MULTILINE)
if mobj:
print('matched atom')
# Dinky Molecule
molxyz = '1 bohr\n\n%s 0.0 0.0 0.0\n' % (mobj.group(1))
psivar_coord = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True)
# Process error codes
mobj = re.search(
r'^\s*' + r'--executable ' + r'(\w+)' + r' finished with status' + r'\s+' + r'([1-9][0-9]*)',
outtext, re.MULTILINE)
if mobj:
print('matched error')
psivar['CFOUR ERROR CODE'] = mobj.group(2)
# Process CURRENT energies (TODO: needs better way)
if 'SCF TOTAL ENERGY' in psivar:
psivar['CURRENT REFERENCE ENERGY'] = psivar['SCF TOTAL ENERGY']
psivar['CURRENT ENERGY'] = psivar['SCF TOTAL ENERGY']
psivar['HF TOTAL ENERGY'] = psivar['SCF TOTAL ENERGY']
if 'MP2 TOTAL ENERGY' in psivar and 'MP2 CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['MP2 CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['MP2 TOTAL ENERGY']
if 'MP3 TOTAL ENERGY' in psivar and 'MP3 CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['MP3 TOTAL ENERGY']
if 'MP4 TOTAL ENERGY' in psivar and 'MP4 CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['MP4 CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['MP4 TOTAL ENERGY']
# if ('%s TOTAL ENERGY' % (mobj.group('fullCC')) in psivar) and \
# ('%s CORRELATION ENERGY' % (mobj.group('fullCC')) in psivar):
# psivar['CURRENT CORRELATION ENERGY'] = psivar['%s CORRELATION ENERGY' % (mobj.group('fullCC')]
# psivar['CURRENT ENERGY'] = psivar['%s TOTAL ENERGY' % (mobj.group('fullCC')]
if 'CC2 TOTAL ENERGY' in psivar and 'CC2 CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CC2 CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CC2 TOTAL ENERGY']
if 'CCSD TOTAL ENERGY' in psivar and 'CCSD CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD TOTAL ENERGY']
if 'CCSD(T) TOTAL ENERGY' in psivar and 'CCSD(T) CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD(T) CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD(T) TOTAL ENERGY']
if 'CC3 TOTAL ENERGY' in psivar and 'CC3 CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CC3 CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CC3 TOTAL ENERGY']
if 'CCSDT TOTAL ENERGY' in psivar and 'CCSDT CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSDT CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSDT TOTAL ENERGY']
return psivar, psivar_coord, psivar_grad
def harvest(p4Mol, c4out, **largs):
"""Parses all the pieces of output from Cfour: the stdout in
*c4out* and the contents of various scratch files like GRD stored
in their namesake keys in *largs*. Since all Cfour output uses
its own orientation and atom ordering for the given molecule,
a qcdb.Molecule *p4Mol*, if supplied, is used to transform the
Cfour output back into consistency with *p4Mol*.
"""
# Collect results from output file and subsidiary files
outPsivar, outMol, outGrad = harvest_output(c4out)
if 'GRD' in largs:
grdMol, grdGrad = harvest_GRD(largs['GRD'])
else:
grdMol, grdGrad = None, None
if 'FCMFINAL' in largs:
fcmHess = harvest_FCM(largs['FCMFINAL'])
else:
fcmHess = None
if 'DIPOL' in largs:
dipolDip = harvest_DIPOL(largs['DIPOL'])
else:
dipolDip = None
# Reconcile the coordinate information: several cases
# Case p4Mol GRD Check consistency Apply orientation? ReturnMol (1-19-2014)
# sp with mol thru cfour {} None None outMol N.C. outMol
# opt with mol thru cfour {} None grdMol outMol && grdMol N.C. grdMol
# sp with mol thru molecule {} p4Mol None p4Mol && outMol p4Mol <-- outMol p4Mol (same as input arg)
# opt with mol thru molecule {} p4Mol grdMol p4Mol && outMol && grdMol p4Mol <-- grdMol p4Mol (same as input arg)
if outMol:
if grdMol:
if abs(outMol.nuclear_repulsion_energy() - grdMol.nuclear_repulsion_energy()) > 1.0e-3:
raise ValidationError("""Cfour outfile (NRE: %f) inconsistent with Cfour GRD (NRE: %f).""" % \
(outMol.nuclear_repulsion_energy(), grdMol.nuclear_repulsion_energy()))
if p4Mol:
if abs(outMol.nuclear_repulsion_energy() - p4Mol.nuclear_repulsion_energy()) > 1.0e-3:
raise ValidationError("""Cfour outfile (NRE: %f) inconsistent with Psi4 input (NRE: %f).""" % \
(outMol.nuclear_repulsion_energy(), p4Mol.nuclear_repulsion_energy()))
else:
raise ValidationError("""No coordinate information extracted from Cfour output.""")
# print ' <<< [1] P4-MOL >>>'
# if p4Mol:
# p4Mol.print_out_in_bohr()
# print ' <<< [2] C4-OUT-MOL >>>'
# if outMol:
# outMol.print_out_in_bohr()
# print ' <<< [3] C4-GRD-MOL >>>'
# if grdMol:
# grdMol.print_out_in_bohr()
# Set up array reorientation object
if p4Mol and grdMol:
p4c4 = OrientMols(p4Mol, grdMol)
oriCoord = p4c4.transform_coordinates2(grdMol)
oriGrad = p4c4.transform_gradient(grdGrad)
oriDip = None if dipolDip is None else p4c4.transform_vector(dipolDip)
elif p4Mol and outMol:
p4c4 = OrientMols(p4Mol, outMol)
oriCoord = p4c4.transform_coordinates2(outMol)
oriGrad = None
oriDip = None if dipolDip is None else p4c4.transform_vector(dipolDip)
elif outMol:
oriCoord = None
oriGrad = None
oriDip = None if dipolDip is None else dipolDip
# print p4c4
# print ' <<< [4] C4-ORI-MOL >>>'
# if oriCoord is not None:
# for item in oriCoord:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
#
# print ' <<< [1] C4-GRD-GRAD >>>'
# if grdGrad is not None:
# for item in grdGrad:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
# print ' <<< [2] C4-ORI-GRAD >>>'
# if oriGrad is not None:
# for item in oriGrad:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
retMol = None if p4Mol else grdMol
if oriDip:
outPsivar['CURRENT DIPOLE X'] = str(oriDip[0] * psi_dipmom_au2debye)
outPsivar['CURRENT DIPOLE Y'] = str(oriDip[1] * psi_dipmom_au2debye)
outPsivar['CURRENT DIPOLE Z'] = str(oriDip[2] * psi_dipmom_au2debye)
if oriGrad:
retGrad = oriGrad
elif grdGrad:
retGrad = grdGrad
else:
retGrad = None
return outPsivar, retGrad, retMol
def harvest_GRD(grd):
"""Parses the contents *grd* of the Cfour GRD file into the gradient
array and coordinate information. The coordinate info is converted
into a rather dinky Molecule (no charge, multiplicity, or fragment),
but this is these coordinates that govern the reading of molecule
orientation by Cfour. Return qcdb.Molecule and gradient array.
"""
grd = grd.splitlines()
Nat = int(grd[0].split()[0])
molxyz = '%d bohr\n\n' % (Nat)
grad = []
for at in range(Nat):
mline = grd[at + 1].split()
el = 'GH' if int(float(mline[0])) == 0 else z2el[int(float(mline[0]))]
molxyz += '%s %16s %16s %16s\n' % (el, mline[-3], mline[-2], mline[-1])
lline = grd[at + 1 + Nat].split()
grad.append([float(lline[-3]), float(lline[-2]), float(lline[-1])])
mol = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True)
return mol, grad
def harvest_zmat(zmat):
"""Parses the contents of the Cfour ZMAT file into array and
coordinate information. The coordinate info is converted into a
rather dinky Molecule (no fragment, but does read charge, mult,
unit). Return qcdb.Molecule. Written for findif zmat* where
geometry always Cartesian and Bohr.
"""
zmat = zmat.splitlines()[1:] # skip comment line
Nat = 0
readCoord = True
isBohr = ''
charge = 0
mult = 1
molxyz = ''
cgeom = []
for line in zmat:
if line.strip() == '':
readCoord = False
elif readCoord:
lline = line.split()
molxyz += line + '\n'
Nat += 1
else:
if line.find('CHARGE') > -1:
idx = line.find('CHARGE')
charge = line[idx + 7:]
idxc = charge.find(',')
if idxc > -1:
charge = charge[:idxc]
charge = int(charge)
if line.find('MULTIPLICITY') > -1:
idx = line.find('MULTIPLICITY')
mult = line[idx + 13:]
idxc = mult.find(',')
if idxc > -1:
mult = mult[:idxc]
mult = int(mult)
if line.find('UNITS=BOHR') > -1:
isBohr = ' bohr'
molxyz = '%d%s\n%d %d\n' % (Nat, isBohr, charge, mult) + molxyz
mol = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True)
return mol
def harvest_FCM(fcm):
"""Parses the contents *fcm* of the Cfour FCMFINAL file into a hessian array.
"""
fcm = fcm.splitlines()
Nat = int(fcm[0].split()[0])
Ndof = int(fcm[0].split()[1])
empty = True
hess = []
for df in range(Ndof):
for at in range(Nat):
lline = fcm[Ndof * at + at + 1].split()
if empty:
if (abs(float(lline[0])) > 1.0e-8) or \
(abs(float(lline[1])) > 1.0e-8) or \
(abs(float(lline[2])) > 1.0e-8):
empty = False
fcm.append([float(lline[0]), float(lline[1]), float(lline[2])])
return None if empty else hess
def harvest_DIPOL(dipol):
"""Parses the contents *dipol* of the Cfour DIPOL file into a dipol vector.
"""
dipol = dipol.splitlines()
lline = dipol[0].split()
dip = [float(lline[0]), float(lline[1]), float(lline[2])]
#return None if empty else dip
return dip
def muster_memory(mem):
"""Transform input *mem* in MB into psi4-type options for cfour.
"""
text = ''
# prepare memory keywords to be set as c-side keywords
options = defaultdict(lambda: defaultdict(dict))
options['CFOUR']['CFOUR_MEMORY_SIZE']['value'] = int(mem)
options['CFOUR']['CFOUR_MEM_UNIT']['value'] = 'MB'
for item in options['CFOUR']:
options['CFOUR'][item]['clobber'] = True
return text, options
# Ways of modifying a computation
# global: set global c-side option
# local: set local c-side option
# kwarg: set kwarg
# i-local: set global=local c-side option to an interface module
# ro-def: code uses default entirely specified by read_options
# module-def: code uses default that is complex mixture of read_options settings
# i-def: interfaced code uses defaults not entirely expressed in read_options
# driver-def: driver code sets complex defaults
#
# Pure psi4 operation
# kwarg ~= local > global > driver-def > module-def > ro-def
#
# Interfaced psi4 operation
# kwarg ~= i-local > local > global > driver-def > i-def
# P4 infrastructure replacing interfaced infrastructure (mol, basis, mem) where unavoidable overlap in how things are specified (mult in mol{} vs keyword) is treated as a clobber & complain if conflict VS P4 infrastructure as an aliased/convenient leak into interfaced infrastructure (psi) and is strictly no clobber or complain.
def muster_psi4options(opt):
"""Translate psi4 keywords *opt* that have been explicitly set into
their Cfour counterparts. Since explicitly set Cfour module keyword
values will always be used preferentially to these inferred from
psi4, the 'clobber' property is set to False.
"""
text = ''
options = defaultdict(lambda: defaultdict(dict))
if 'GLOBALS' in opt:
if 'PUREAM' in opt['GLOBALS']:
options['CFOUR']['CFOUR_SPHERICAL']['value'] = \
opt['MINTS']['PUREAM']['value']
if 'SCF' in opt:
if 'REFERENCE' in opt['SCF']:
options['CFOUR']['CFOUR_REFERENCE']['value'] = \
{'RHF': 'RHF',
'UHF': 'UHF',
'ROHF': 'ROHF'}[opt['SCF']['REFERENCE']['value']]
if 'D_CONVERGENCE' in opt['SCF']:
options['CFOUR']['CFOUR_SCF_CONV']['value'] = \
conv_float2negexp(opt['SCF']['D_CONVERGENCE']['value'])
if 'MAXITER' in opt['SCF']:
options['CFOUR']['CFOUR_SCF_MAXCYC']['value'] = \
opt['SCF']['MAXITER']['value']
if 'DAMPING_PERCENTAGE' in opt['SCF']:
options['CFOUR']['CFOUR_SCF_DAMPING']['value'] = \
int(10 * opt['SCF']['DAMPING_PERCENTAGE']['value'])
for item in options['CFOUR']:
options['CFOUR'][item]['clobber'] = False
return text, options
# Philosophy break:
# Specification options
# Massaging options
# * No program's defaults should be tampered with w/o provokation
# want all defaults applied to all programs, so p4 scf_conv is 5 and c4 scf_conv is 5
# want separate regimes, so conv 6 covers all the p4 parts and cfour_conv = 8 covers the c4 parts
# want mixture, so basis gets applied to c4 but others don't
# first case, when options specified explicitly
# [scf][d_convergence] [cfour][cfour_scf_conv] what happens?
# 8 from opt() 7 by default
# 6 from set {...} 7 by default 6 (guideline that psi4 format converts when clear)
# 8 from opt() 5 from set {...} 5 (local trumps)
# 6 from set {...} 5 from set {...} 5 (local trumps)
#
# energy(name) [cfour][cfour_calc_level]
# c4-scf SCF by default
# c4-scf CCSD from set {...}
def muster_modelchem(name, dertype):
"""Transform calculation method *name* and derivative level *dertype*
into options for cfour. While deliberately requested pieces,
generally |cfour__cfour_deriv_level| and |cfour__cfour_calc_level|,
are set to complain if contradicted ('clobber' set to True), other
'recommended' settings, like |cfour__cfour_cc_program|, can be
countermanded by keywords in input file ('clobber' set to False).
Occasionally, want these pieces to actually overcome keywords in
input file ('superclobber' set to True).
"""
text = ''
lowername = name.lower()
options = defaultdict(lambda: defaultdict(dict))
if dertype == 0:
if lowername == 'cfour':
pass # permit clean operation of sandwich mode
else:
options['CFOUR']['CFOUR_DERIV_LEVEL']['value'] = 'ZERO'
elif dertype == 1:
options['CFOUR']['CFOUR_DERIV_LEVEL']['value'] = 'FIRST'
elif dertype == 2:
options['CFOUR']['CFOUR_DERIV_LEVEL']['value'] = 'SECOND'
else:
raise ValidationError("""Requested Cfour dertype %d is not available.""" % (dertype))
if lowername == 'cfour':
pass
elif lowername in ['c4-scf', 'c4-hf']:
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'SCF'
elif lowername == 'c4-mp2':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'MP2'
elif lowername == 'c4-mp3':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'MP3'
elif lowername == 'c4-mp4(sdq)':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'SDQ-MP4'
elif lowername == 'c4-mp4':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'MP4'
elif lowername == 'c4-cc2':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CC2'
elif lowername == 'c4-ccsd':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CCSD'
options['CFOUR']['CFOUR_CC_PROGRAM']['value'] = 'ECC'
elif lowername == 'c4-cc3':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CC3'
elif lowername == 'c4-ccsd(t)':
# Can't use (T) b/c bug in xsymcor lops it off
#options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CCSD(T)'
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CCSD[T]'
options['CFOUR']['CFOUR_CC_PROGRAM']['value'] = 'ECC'
elif lowername == 'c4-ccsdt':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CCSDT'
options['CFOUR']['CFOUR_CC_PROGRAM']['value'] = 'ECC'
elif lowername == 'c4-ccsdt(q)':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CCSDT(Q)'
options['CFOUR']['CFOUR_CC_PROGRAM']['value'] = 'NCC'
elif lowername == 'c4-ccsdtq':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CCSDTQ'
options['CFOUR']['CFOUR_CC_PROGRAM']['value'] = 'NCC'
else:
raise ValidationError("""Requested Cfour computational methods %d is not available.""" % (lowername))
# Set clobbering
if 'CFOUR_DERIV_LEVEL' in options['CFOUR']:
options['CFOUR']['CFOUR_DERIV_LEVEL']['clobber'] = True
options['CFOUR']['CFOUR_DERIV_LEVEL']['superclobber'] = True
if 'CFOUR_CALC_LEVEL' in options['CFOUR']:
options['CFOUR']['CFOUR_CALC_LEVEL']['clobber'] = True
options['CFOUR']['CFOUR_CALC_LEVEL']['superclobber'] = True
if 'CFOUR_CC_PROGRAM' in options['CFOUR']:
options['CFOUR']['CFOUR_CC_PROGRAM']['clobber'] = False
return text, options
def cfour_list():
"""Return an array of Cfour methods with energies. Appended
to procedures['energy'].
"""
val = []
val.append('cfour')
val.append('c4-scf')
val.append('c4-hf')
val.append('c4-mp2')
val.append('c4-mp3')
val.append('c4-mp4(sdq)')
val.append('c4-mp4')
val.append('c4-cc2')
val.append('c4-ccsd')
val.append('c4-cc3')
val.append('c4-ccsd(t)')
val.append('c4-ccsdt')
val.append('c4-ccsdt(q)')
val.append('c4-ccsdtq')
return val
def cfour_gradient_list():
"""Return an array of Cfour methods with analytical gradients.
Appended to procedures['gradient'].
"""
val = []
val.append('cfour')
val.append('c4-scf')
val.append('c4-hf')
val.append('c4-mp2')
val.append('c4-mp3')
val.append('c4-mp4(sdq)')
val.append('c4-mp4')
val.append('c4-cc2')
val.append('c4-ccsd')
val.append('c4-cc3')
val.append('c4-ccsd(t)')
val.append('c4-ccsdt')
return val
def cfour_psivar_list():
"""Return a dict with keys of most Cfour methods and values of dicts
with the PSI Variables returned by those methods. Used by cbs()
wrapper to avoid unnecessary computations in compound methods.
Result is appended to ``VARH``.
"""
VARH = {}
VARH['c4-scf'] = {
'c4-scf': 'SCF TOTAL ENERGY'}
VARH['c4-hf'] = {
'c4-hf': 'HF TOTAL ENERGY'}
VARH['c4-mp2'] = {
'c4-hf': 'HF TOTAL ENERGY',
'c4-mp2': 'MP2 TOTAL ENERGY'}
VARH['c4-mp3'] = {
'c4-hf': 'HF TOTAL ENERGY',
'c4-mp2': 'MP2 TOTAL ENERGY',
'c4-mp2.5': 'MP2.5 TOTAL ENERGY',
'c4-mp3': 'MP3 TOTAL ENERGY'}
VARH['c4-mp4(sdq)'] = {
'c4-hf': 'HF TOTAL ENERGY',
'c4-mp2': 'MP2 TOTAL ENERGY',
'c4-mp2.5': 'MP2.5 TOTAL ENERGY',
'c4-mp3': 'MP3 TOTAL ENERGY',
'c4-mp4(sdq)': 'MP4(SDQ) TOTAL ENERGY'}
VARH['c4-mp4'] = {
'c4-hf': 'HF TOTAL ENERGY',
'c4-mp2': 'MP2 TOTAL ENERGY',
'c4-mp2.5': 'MP2.5 TOTAL ENERGY',
'c4-mp3': 'MP3 TOTAL ENERGY',
'c4-mp4(sdq)': 'MP4(SDQ) TOTAL ENERGY',
'c4-mp4': 'MP4(SDTQ) TOTAL ENERGY'}
VARH['c4-cc2'] = {
'c4-hf': 'HF TOTAL ENERGY',
'c4-mp2': 'MP2 TOTAL ENERGY',
'c4-cc2': 'CC2 TOTAL ENERGY'}
VARH['c4-ccsd'] = {
'c4-hf': 'HF TOTAL ENERGY',
'c4-mp2': 'MP2 TOTAL ENERGY',
'c4-ccsd': 'CCSD TOTAL ENERGY'}
VARH['c4-cc3'] = {
'c4-hf': 'HF TOTAL ENERGY',
'c4-mp2': 'MP2 TOTAL ENERGY',
'c4-cc3': 'CC3 TOTAL ENERGY'}
VARH['c4-ccsd(t)'] = {
'c4-hf': 'HF TOTAL ENERGY',
'c4-mp2': 'MP2 TOTAL ENERGY',
'c4-ccsd': 'CCSD TOTAL ENERGY',
'c4-ccsd(t)': 'CCSD(T) TOTAL ENERGY'}
VARH['c4-ccsdt'] = {
'c4-hf': 'HF TOTAL ENERGY',
'c4-mp2': 'MP2 TOTAL ENERGY',
'c4-ccsd': 'CCSD TOTAL ENERGY',
'c4-ccsdt': 'CCSDT TOTAL ENERGY'}
return VARH
#def backtransform(chgeMol, permMol, chgeGrad=None, chgeDip=None):
#def format_fjobarc(fje, fjelem, fjcoord, fjgrd, map, fjdip):
def format_fjobarc(energy, map, elem, coordinates, gradient, dipole):
"""Takes the key results from a gradient computation (*energy*,
element Z list *elem*, *coordinates*, *gradient*,
*dipole*, and atom ordering *map*) and writes a string *fja*
that exactly mimics the contents of a Cfour FJOBARC file.
"""
fja = 'TOTENERG\n'
fja += '%15d%15d\n' % (struct.unpack("ii", struct.pack("d", energy)))
fja += 'COORD\n'
Nat = len(coordinates)
flatcoord = []
for at in range(Nat):
for xyz in range(3):
flatcoord.append(coordinates[map[at]][xyz])
for idx in range(len(flatcoord)):
if abs(flatcoord[idx]) < 1.0E-14: # TODO
flatcoord[idx] = 0.0
fja += '%15d%15d' % (struct.unpack("ii", struct.pack("d", flatcoord[idx])))
if idx % 2 == 1:
fja += '\n'
if len(flatcoord) % 2 == 1:
fja += '\n'
fja += 'MAP2ZMAT\n'
for idx in range(Nat):
fja += '%15d%15d' % (struct.unpack("ii", struct.pack("l", map[idx] + 1)))
if idx % 2 == 1:
fja += '\n'
if Nat % 2 == 1:
fja += '\n'
fja += 'GRD FILE\n'
fja += '%5d%20.10f\n' % (Nat, 0.0)
for at in range(Nat):
fja += '%20.10f%20.10f%20.10f%20.10f\n' % (elem[at], coordinates[at][0], coordinates[at][1], coordinates[at][2])
for at in range(Nat):
fja += '%20.10f%20.10f%20.10f%20.10f\n' % (elem[at], gradient[at][0], gradient[at][1], gradient[at][2])
fja += 'DIPOL FILE\n'
fja += '%20.10f%20.10f%20.10f\n' % (dipole[0], dipole[1], dipole[2])
return fja
def backtransform(chgeMol, permMol, chgeGrad=None, chgeDip=None):
"""Here, *chgeMol* and *chgeGrd* need to be turned into the native Cfour
orientation embodied by *permMol*. Currently for vpt2.
"""
# Set up array reorientation object
p4c4 = OrientMols(permMol, chgeMol) # opposite than usual
oriCoord = p4c4.transform_coordinates2(chgeMol)
p4Elem = []
for at in range(chgeMol.natom()):
p4Elem.append(chgeMol.Z(at))
oriElem = p4c4.transform_elementlist(p4Elem)
oriElemMap = p4c4.Catommap
oriGrad = None if chgeGrad is None else p4c4.transform_gradient(chgeGrad)
oriDip = None if chgeDip is None else p4c4.transform_vector(chgeDip)
if chgeGrad and chgeDip:
return oriElemMap, oriElem, oriCoord, oriGrad, oriDip
else:
return oriElemMap, oriElem, oriCoord
#def backtransform_grad(p4Mol, c4Mol, p4Grd, p4Dip):
# """Here, p4Mol and p4Grd need to be turned into the native Cfour
# orientation embodied by c4Mol. Currently for vpt2.
#
# """
# # Set up array reorientation object
# p4c4 = OrientMols(c4Mol, p4Mol) # opposite than usual
# oriCoord = p4c4.transform_coordinates2(p4Mol)
# oriGrad = p4c4.transform_gradient(p4Grd)
# p4Elem = []
# for at in range(p4Mol.natom()):
# p4Elem.append(p4Mol.Z(at))
# oriElem = p4c4.transform_elementlist(p4Elem)
# oriElemMap = p4c4.Catommap
# oriDip = p4c4.transform_vector(p4Dip)
#
# #print p4c4
# #print ' <<< Input C4 Mol >>>'
# #c4Mol.print_out()
# #print ' <<< Input P4 Mol >>>'
# #p4Mol.print_out()
# #print ' <<< Input P4 Grad >>>'
# #if p4Grd is not None:
# # for item in p4Grd:
# # print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
# #print ' <<< Rotated P4 Coord >>>'
# #if oriCoord is not None:
# # for item in oriCoord:
# # print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
# #print ' <<< Rotated P4 Elem >>>'
# #if oriElem is not None:
# # for item in oriElem :
# # print(' %16.8f' % (item))
# #print ' <<< Rotated P4 Dip >>>'
# #if oriDip is not None:
# # print(' %16.8f %16.8f %16.8f' % (oriDip[0], oriDip[1], oriDip[2]))
# #print ' <<< Rotated P4 Grad >>>'
# #if oriGrad is not None:
# # for item in oriGrad:
# # print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
#
# return oriElemMap, oriElem, oriCoord, oriGrad, oriDip
# #return oriElem, oriCoord, oriGrad, oriElemMap, oriDip
def jajo2mol(jajodic):
"""Returns a Molecule from entries in dictionary *jajodic* extracted
from JAINDX and JOBARC.
"""
map = jajodic['MAP2ZMAT']
elem = jajodic['ATOMCHRG']
coord = jajodic['COORD ']
Nat = len(elem)
molxyz = '%d bohr\n\n' % (Nat)
# TODO chgmult, though not really necessary for reorientation
for at in range(Nat):
posn = map[at] - 1
el = 'GH' if elem[posn] == 0 else z2el[elem[posn]]
posn *= 3
molxyz += '%s %21.15f %21.15f %21.15f\n' % (el, coord[posn], coord[posn + 1], coord[posn + 2])
mol = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True)
return mol
|
amjames/psi4
|
psi4/driver/qcdb/cfour.py
|
Python
|
lgpl-3.0
| 47,615
|
[
"CFOUR",
"Psi4"
] |
f472fbe5b010a5368a3170fc31ae7baffa4093fd453f91a1eee196aa9b07f265
|
#!/usr/bin/env python
"""
Create generic LPU and simple pulse input signal.
"""
from itertools import product
import sys
import numpy as np
import h5py
import networkx as nx
def create_lpu_graph(lpu_name, N_sensory, N_local, N_proj):
"""
Create a generic LPU graph.
Creates a graph containing the neuron and synapse parameters for an LPU
containing the specified number of local and projection neurons. The graph
also contains the parameters for a set of sensory neurons that accept
external input. All neurons are either spiking or graded potential neurons;
the Leaky Integrate-and-Fire model is used for the former, while the
Morris-Lecar model is used for the latter (i.e., the neuron's membrane
potential is deemed to be its output rather than the time when it emits an
action potential). Synapses use either the alpha function model or a
conductance-based model.
Parameters
----------
lpu_name : str
Name of LPU. Used in port identifiers.
N_sensory : int
Number of sensory neurons.
N_local : int
Number of local neurons.
N_proj : int
Number of project neurons.
Returns
-------
g : networkx.MultiDiGraph
Generated graph.
"""
# Set numbers of neurons:
neu_type = ('sensory', 'local', 'proj')
neu_num = (N_sensory, N_local, N_proj)
# Neuron ids are between 0 and the total number of neurons:
G = nx.MultiDiGraph()
in_port_idx = 0
spk_out_id = 0
gpot_out_id = 0
for (t, n) in zip(neu_type, neu_num):
for i in range(n):
id = t+"_"+str(i)
name = t+"_"+str(i)
# Half of the sensory neurons and projection neurons are
# spiking neurons. The other half are graded potential neurons.
# All local neurons are graded potential only.
if t != 'local' and np.random.rand() < 0.5:
G.add_node(id,
{'class': 'LeakyIAF',
'name': name+'_s',
'initV': np.random.uniform(-60.0,-25.0),
'reset_potential': -67.5489770451,
'resting_potential': 0.0,
'threshold': -25.1355161007,
'resistance': 1002.445570216,
'capacitance': 0.0669810502993,
'circuit': 'proj' if t == 'proj' else 'local'
})
# Projection neurons are all assumed to be attached to output
# ports (which are represented as separate nodes):
if t == 'proj':
G.add_node(id+'_port',
{'class': 'Port',
'name': name+'port',
'port_type': 'spike',
'port_io': 'out',
'selector': '/%s/out/spk/%s' % (lpu_name, str(spk_out_id))
})
G.add_edge(id, id+'_port')
spk_out_id += 1
else:
# An input port node is created for and attached to each non-projection
# neuron with a synapse; this assumes that data propagates from one LPU to
# another as follows:
# LPU0[projection neuron] -> LPU0[output port] -> LPU1[input port] ->
# LPU1[synapse] -> LPU1[non-projection neuron]
G.add_node('in_port'+str(in_port_idx),
{'class': 'Port',
'name': 'in_port'+str(in_port_idx),
'port_type': 'spike',
'port_io': 'in',
'selector': '/%s/in/spk/%s' % (lpu_name, in_port_idx)
})
G.add_node('synapse_'+'in_port'+str(in_port_idx)+'_to_'+id,
{'class': 'AlphaSynapse',
'name': 'in_port'+str(in_port_idx)+'-'+name,
'ad': 0.19*1000,
'ar': 1.1*100,
'gmax': 0.003*1e-3,
'reverse': 65.0,
'circuit': 'local'
})
G.add_edge('in_port'+str(in_port_idx),
'synapse_'+'in_port'+str(in_port_idx)+'_to_'+id)
G.add_edge('synapse_'+'in_port'+str(in_port_idx)+'_to_'+id,
id)
in_port_idx += 1
else:
G.add_node(id,
{'class': "MorrisLecar",
'name': name+'_g',
'V1': 30.,
'V2': 15.,
'V3': 0.,
'V4': 30.,
'phi': 0.025,
'offset': 0.,
'V_L': -50.,
'V_Ca': 100.0,
'V_K': -70.0,
'g_Ca': 1.1,
'g_K': 2.0,
'g_L': 0.5,
'initV': -52.14,
'initn': 0.02,
'circuit': 'proj' if t == 'proj' else 'local'
})
# Projection neurons are all assumed to be attached to output
# ports (which are not represented as separate nodes):
if t == 'proj':
G.add_node(id+'_port',
{'class': 'Port',
'name': name+'port',
'port_type': 'gpot',
'port_io': 'out',
'selector': '/%s/out/gpot/%s' % (lpu_name, str(gpot_out_id))
})
G.add_edge(id, id+'_port')
gpot_out_id += 1
else:
G.add_node('in_port'+str(in_port_idx),
{'class': 'Port',
'name': 'in_port'+str(in_port_idx),
'port_type': 'gpot',
'port_io': 'in',
'selector': '/%s/in/gpot/%s' % (lpu_name, in_port_idx)
})
G.add_node('synapse_'+'in_port'+str(in_port_idx)+'_to_'+id,
{'class': 'PowerGPotGPot',
'name': 'in_port'+str(in_port_idx)+'-'+name,
'reverse': -80.0,
'saturation': 0.03*1e-3,
'slope': 0.8*1e-6,
'power': 1.0,
'threshold': -50.0,
'circuit': 'local'
})
G.add_edge('in_port'+str(in_port_idx),
'synapse_'+'in_port'+str(in_port_idx)+'_to_'+id,
delay = 0.001)
G.add_edge('synapse_'+'in_port'+str(in_port_idx)+'_to_'+id,
id)
in_port_idx += 1
# Assume a probability of synapse existence for each group of synapses:
# sensory -> local, sensory -> projection, local -> projection,
# projection -> local:
for r, (i, j) in zip((0.5, 0.1, 0.1, 0.3),
((0, 1), (0, 2), (1, 2), (2, 1))):
for src, tar in product(range(neu_num[i]), range(neu_num[j])):
# Don't connect all neurons:
if np.random.rand() > r: continue
# Connections from the sensory neurons use the alpha function model;
# all other connections use the power_gpot_gpot model:
pre_id = neu_type[i]+"_"+str(src)
post_id = neu_type[j]+"_"+str(tar)
name = G.node[pre_id]['name'] + '-' + G.node[post_id]['name']
synapse_id = 'synapse_' + name
if G.node[pre_id]['class'] is 'LeakyIAF':
G.add_node(synapse_id,
{'class' : 'AlphaSynapse',
'name' : name,
'ar' : 1.1*1e2,
'ad' : 1.9*1e3,
'reverse' : 65.0 if G.node[post_id]['class'] is 'LeakyIAF' else 10.0,
'gmax' : 3*1e-6 if G.node[post_id]['class'] is 'LeakyIAF' else 3.1e-7,
'circuit' : 'local'})
G.add_edge(pre_id, synapse_id)
G.add_edge(synapse_id, post_id)
else:
G.add_node(synapse_id,
{'class' : 'PowerGPotGPot',
'name' : name,
'slope' : 0.8*1e-6,
'threshold' : -50.0,
'power' : 1.0,
'saturation' : 0.03*1e-3,
'reverse' : -100.0,
'circuit' : 'local'})
G.add_edge(pre_id, synapse_id, delay = 0.001)
G.add_edge(synapse_id, post_id)
return G
def create_lpu(file_name, lpu_name, N_sensory, N_local, N_proj):
"""
Create a generic LPU graph.
Creates a GEXF file containing the neuron and synapse parameters for an LPU
containing the specified number of local and projection neurons. The GEXF
file also contains the parameters for a set of sensory neurons that accept
external input. All neurons are either spiking or graded potential neurons;
the Leaky Integrate-and-Fire model is used for the former, while the
Morris-Lecar model is used for the latter (i.e., the neuron's membrane
potential is deemed to be its output rather than the time when it emits an
action potential). Synapses use either the alpha function model or a
conductance-based model.
Parameters
----------
file_name : str
Output GEXF file name.
lpu_name : str
Name of LPU. Used in port identifiers.
N_sensory : int
Number of sensory neurons.
N_local : int
Number of local neurons.
N_proj : int
Number of project neurons.
Returns
-------
g : networkx.MultiDiGraph
Generated graph.
"""
g = create_lpu_graph(lpu_name, N_sensory, N_local, N_proj)
nx.write_gexf(g, file_name)
def create_input(file_name, N_sensory, dt=1e-4, dur=1.0, start=0.3, stop=0.6, I_max=0.6):
"""
Create input stimulus for sensory neurons in artificial LPU.
Creates an HDF5 file containing input signals for the specified number of
neurons. The signals consist of a rectangular pulse of specified duration
and magnitude.
Parameters
----------
file_name : str
Name of output HDF5 file.
g: networkx.MultiDiGraph
NetworkX graph object representing the LPU
dt : float
Time resolution of generated signal.
dur : float
Duration of generated signal.
start : float
Start time of signal pulse.
stop : float
Stop time of signal pulse.
I_max : float
Pulse magnitude.
"""
Nt = int(dur/dt)
t = np.arange(0, dt*Nt, dt)
uids = ["sensory_"+str(i) for i in range(N_sensory)]
uids = np.array(uids)
I = np.zeros((Nt, N_sensory), dtype=np.float64)
I[np.logical_and(t>start, t<stop)] = I_max
with h5py.File(file_name, 'w') as f:
f.create_dataset('I/uids', data=uids)
f.create_dataset('I/data', (Nt, N_sensory),
dtype=np.float64,
data=I)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('lpu_file_name', nargs='?', default='generic_lpu.gexf.gz',
help='LPU file name')
parser.add_argument('in_file_name', nargs='?', default='generic_input.h5',
help='Input file name')
parser.add_argument('-s', type=int,
help='Seed random number generator')
parser.add_argument('-l', '--lpu', type=str, default='gen',
help='LPU name')
args = parser.parse_args()
if args.s is not None:
np.random.seed(args.s)
dt = 1e-4
dur = 1.0
start = 0.3
stop = 0.6
I_max = 0.6
neu_num = [np.random.randint(31, 40) for i in xrange(3)]
create_lpu(args.lpu_file_name, args.lpu, *neu_num)
g = nx.read_gexf(args.lpu_file_name)
create_input(args.in_file_name, neu_num[0], dt, dur, start, stop, I_max)
create_lpu(args.lpu_file_name, args.lpu, *neu_num)
|
AdamRTomkins/Neurokernel-singularity-container
|
examples/data/gen_generic_lpu.py
|
Python
|
apache-2.0
| 12,988
|
[
"NEURON"
] |
660554cd2995f9924cb88d473078f5dd57730f382aa79c1b20744c5741fa15b9
|
"""
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure(figsize=(8,8))
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
|
gtrichards/PHYS_T480
|
code/plot_ica_blind_source_separation.py
|
Python
|
mit
| 2,241
|
[
"Gaussian"
] |
88f27933a1a1d4a4d2cea8ee1d29b2e7abe6acea85a3da219e79e42558d150a7
|
"""
Tests to try and ensure that important mayavi imports work with no UI.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2009, Enthought, Inc.
# License: BSD Style.
import sys
import unittest
from traits.etsconfig.api import ETSConfig
class TestNoUIToolkit(unittest.TestCase):
"""Test if any important mayavi imports work with no UI
whatsoever."""
def setUp(self):
self.orig_tk = ETSConfig.toolkit
ETSConfig._toolkit = 'null'
# Import something from Pyface to force any potential imports
# from a UI toolkit. Why did I pick Pyface? Well, adder_node
# imports ImageResource and this seems to trigger some UI
# toolkit import and this makes life difficult as far as the
# testing goes. Forcing the issue here should let us test
# safely since the Pyface imports will be done.
from pyface.api import GUI
# Remove any references to wx and Qt
saved = {}
for mod in ['wx', 'PyQt4', 'PySide']:
saved[mod] = sys.modules.pop(mod, None)
self.saved = saved
def tearDown(self):
ETSConfig._toolkit = self.orig_tk
# Add back any any references to wx and Qt
for mod in ['wx', 'PyQt4', 'PySide']:
m = self.saved[mod]
if m is not None:
sys.modules[mod] = m
def test_no_ui(self):
"""Test if mayavi imports work without any UI (wx or PyQt4)."""
# These imports should work without any UI.
from mayavi import mlab
from mayavi.api import Engine
from mayavi.sources.api import VTKDataSource
from mayavi.filters.api import Optional
from mayavi.modules.api import Outline
from mayavi.preferences.api import preference_manager
# Should not have triggered an import wx or PyQt4.
self.assertEqual('wx' in sys.modules, False)
self.assertEqual('PyQt4' in sys.modules, False)
self.assertEqual('PySide' in sys.modules, False)
if __name__ == '__main__':
unittest.main()
|
dmsurti/mayavi
|
mayavi/tests/test_no_ui_toolkit.py
|
Python
|
bsd-3-clause
| 2,082
|
[
"Mayavi"
] |
b47eee9d50fc07ecad0078c13af110cc46507e5772184e5a2b8842f67e8d0972
|
"""This is a set of tools built up over time for working with Gaussian and
QChem input and output."""
########################################################################
# #
# #
# This script was written by Thomas Heavey in 2017. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2017 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
pass
|
thompcinnamon/QM-calc-scripts
|
gautools/__init__.py
|
Python
|
apache-2.0
| 1,715
|
[
"Gaussian"
] |
20e383d8f2fa772792f5ebe58f50f8a2d514b5bc04915da421c553bd724257ae
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cProfile
from contextlib import contextmanager
import subprocess
import logging
import os
import socket
import sys
import threading
import time
import werkzeug.utils
import zipfile
from collections import defaultdict, Mapping, OrderedDict
from datetime import datetime
from itertools import islice, izip, groupby
from lxml import etree
from which import which
from threading import local
import traceback
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
from .parse_version import parse_version
import openerp
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode # noqa
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase)
#----------------------------------------------------------
# Subprocesses
#----------------------------------------------------------
def find_in_path(name):
path = os.environ.get('PATH', os.defpath).split(os.pathsep)
if config.get('bin_path') and config['bin_path'] != 'None':
path.append(config['bin_path'])
try:
return which(name, path=os.pathsep.join(path))
except IOError:
return None
def _exec_pipe(prog, args, env=None):
cmd = (prog,) + args
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
close_fds = os.name=="posix"
pop = subprocess.Popen(cmd, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=close_fds, env=env)
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Command `%s` not found.' % name)
return _exec_pipe(prog, args)
#----------------------------------------------------------
# Postgres subprocesses
#----------------------------------------------------------
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
raise Exception('Command `%s` not found.' % name)
def exec_pg_environ():
""" On systems where pg_restore/pg_dump require an explicit password (i.e.
on Windows where TCP sockets are used), it is necessary to pass the
postgres user password in the PGPASSWORD environment variable or in a
special .pgpass file.
See also http://www.postgresql.org/docs/8.4/static/libpq-envars.html
"""
env = os.environ.copy()
if not env.get('PGPASSWORD') and openerp.tools.config['db_password']:
env['PGPASSWORD'] = openerp.tools.config['db_password']
return env
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
with open(os.devnull) as dn:
args2 = (prog,) + args
rc = subprocess.call(args2, env=env, stdout=dn, stderr=subprocess.STDOUT)
if rc:
raise Exception('Postgres subprocess %s error %s' % (args2, rc))
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
return _exec_pipe(prog, args, env)
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import openerp.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.join(basedir, path))
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis (christophe@tinyerp.com)
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
def topological_sort(elems):
""" Return a list of elements sorted so that their dependencies are listed
before them in the result.
:param elems: specifies the elements to sort with their dependencies; it is
a dictionary like `{element: dependencies}` where `dependencies` is a
collection of elements that must appear before `element`. The elements
of `dependencies` are not required to appear in `elems`; they will
simply not appear in the result.
:returns: a list with the keys of `elems` sorted according to their
specification.
"""
# the algorithm is inspired by [Tarjan 1976],
# http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
result = []
visited = set()
def visit(n):
if n not in visited:
visited.add(n)
if n in elems:
# first visit all dependencies of n, then append n to result
map(visit, elems[n])
result.append(n)
map(visit, elems)
return result
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
class currency(float):
""" Deprecate
.. warning::
Don't use ! Use res.currency.round()
"""
def __init__(self, value, accuracy=2, rounding=None):
if rounding is None:
rounding=10**-accuracy
self.rounding=rounding
self.accuracy=accuracy
def __new__(cls, value, accuracy=2, rounding=None):
return float.__new__(cls, round(value, accuracy))
#def __str__(self):
# display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy))
# return str(display_value)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
ALL_LANGUAGES = {
'am_ET': u'Amharic / አምሃርኛ',
'ar_SY': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български език',
'bs_BA': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'el_GR': u'Greek / Ελληνικά',
'en_AU': u'English (AU)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_BO': u'Spanish (BO) / Español (BO)',
'es_CL': u'Spanish (CL) / Español (CL)',
'es_CO': u'Spanish (CO) / Español (CO)',
'es_CR': u'Spanish (CR) / Español (CR)',
'es_DO': u'Spanish (DO) / Español (DO)',
'es_EC': u'Spanish (EC) / Español (EC)',
'es_ES': u'Spanish / Español',
'es_GT': u'Spanish (GT) / Español (GT)',
'es_MX': u'Spanish (MX) / Español (MX)',
'es_PA': u'Spanish (PA) / Español (PA)',
'es_PE': u'Spanish (PE) / Español (PE)',
'es_PY': u'Spanish (PY) / Español (PY)',
'es_UY': u'Spanish (UY) / Español (UY)',
'es_VE': u'Spanish (VE) / Español (VE)',
'et_EE': u'Estonian / Eesti keel',
'fa_IR': u'Persian / فارس',
'fi_FI': u'Finnish / Suomi',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_FR': u'French / Français',
'gl_ES': u'Galician / Galego',
'gu_IN': u'Gujarati / ગુજરાતી',
'he_IL': u'Hebrew / עִבְרִי',
'hi_IN': u'Hindi / हिंदी',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'ja_JP': u'Japanese / 日本語',
'kab_DZ': u'Kabyle / Taqbaylit',
'ko_KP': u'Korean (KP) / 한국어 (KP)',
'ko_KR': u'Korean (KR) / 한국어 (KR)',
'lo_LA': u'Lao / ພາສາລາວ',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'lv_LV': u'Latvian / latviešu valoda',
'mk_MK': u'Macedonian / македонски јазик',
'mn_MN': u'Mongolian / монгол',
'nb_NO': u'Norwegian Bokmål / Norsk bokmål',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Dutch (BE) / Nederlands (BE)',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portuguese (BR) / Português (BR)',
'pt_PT': u'Portuguese / Português',
'ro_RO': u'Romanian / română',
'ru_RU': u'Russian / русский язык',
'sl_SI': u'Slovenian / slovenščina',
'sk_SK': u'Slovak / Slovenský jazyk',
'sq_AL': u'Albanian / Shqip',
'sr_RS': u'Serbian (Cyrillic) / српски',
'sr@latin': u'Serbian (Latin) / srpski',
'sv_SE': u'Swedish / svenska',
'te_IN': u'Telugu / తెలుగు',
'tr_TR': u'Turkish / Türkçe',
'vi_VN': u'Vietnamese / Tiếng Việt',
'uk_UA': u'Ukrainian / українська',
'zh_CN': u'Chinese (CN) / 简体中文',
'zh_HK': u'Chinese (HK)',
'zh_TW': u'Chinese (TW) / 正體字',
'th_TH': u'Thai / ภาษาไทย',
}
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1])
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
__icons_list = ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD',
'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER',
'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE',
'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO',
'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT',
'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE',
'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM',
'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK',
'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK',
'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC',
'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL',
'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD',
'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY',
'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND',
'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW',
'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES',
'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT',
'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED',
'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT',
'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK',
'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE',
'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100',
'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT',
'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase',
'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner',
'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph',
'terp-check','terp-go-month','terp-go-year','terp-go-today','terp-document-new','terp-camera_test',
'terp-emblem-important','terp-gtk-media-pause','terp-gtk-stop','terp-gnome-cpu-frequency-applet+',
'terp-dialog-close','terp-gtk-jump-to-rtl','terp-gtk-jump-to-ltr','terp-accessories-archiver',
'terp-stock_align_left_24','terp-stock_effects-object-colorize','terp-go-home','terp-gtk-go-back-rtl',
'terp-gtk-go-back-ltr','terp-personal','terp-personal-','terp-personal+','terp-accessories-archiver-minus',
'terp-accessories-archiver+','terp-stock_symbol-selection','terp-call-start','terp-dolar',
'terp-face-plain','terp-folder-blue','terp-folder-green','terp-folder-orange','terp-folder-yellow',
'terp-gdu-smart-failing','terp-go-week','terp-gtk-select-all','terp-locked','terp-mail-forward',
'terp-mail-message-new','terp-mail-replied','terp-rating-rated','terp-stage','terp-stock_format-scientific',
'terp-dolar_ok!','terp-idea','terp-stock_format-default','terp-mail-','terp-mail_delete'
]
def icons(*a, **kw):
global __icons_list
return [(x, x) for x in __icons_list ]
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
# RATIONALE BEHIND TIMESTAMP CALCULATIONS AND TIMEZONE MANAGEMENT:
# The server side never does any timestamp calculation, always
# sends them in a naive (timezone agnostic) format supposed to be
# expressed within the server timezone, and expects the clients to
# provide timestamps in the server timezone as well.
# It stores all timestamps in the database in naive format as well,
# which also expresses the time in the server timezone.
# For this reason the server makes its timezone name available via the
# common/timezone_get() rpc method, which clients need to read
# to know the appropriate time offset to use when reading/writing
# times.
def get_win32_timezone():
"""Attempt to return the "standard name" of the current timezone on a win32 system.
@return the standard name of the current win32 timezone, or False if it cannot be found.
"""
res = False
if sys.platform == "win32":
try:
import _winreg
hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
current_tz_key = _winreg.OpenKey(hklm, r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", 0,_winreg.KEY_ALL_ACCESS)
res = str(_winreg.QueryValueEx(current_tz_key,"StandardName")[0]) # [0] is value, [1] is type code
_winreg.CloseKey(current_tz_key)
_winreg.CloseKey(hklm)
except Exception:
pass
return res
def detect_server_timezone():
"""Attempt to detect the timezone to use on the server side.
Defaults to UTC if no working timezone can be found.
@return the timezone identifier as expected by pytz.timezone.
"""
try:
import pytz
except Exception:
_logger.warning("Python pytz module is not available. "
"Timezone will be set to UTC by default.")
return 'UTC'
# Option 1: the configuration option (did not exist before, so no backwards compatibility issue)
# Option 2: to be backwards compatible with 5.0 or earlier, the value from time.tzname[0], but only if it is known to pytz
# Option 3: the environment variable TZ
sources = [ (config['timezone'], 'OpenERP configuration'),
(time.tzname[0], 'time.tzname'),
(os.environ.get('TZ',False),'TZ environment variable'), ]
# Option 4: OS-specific: /etc/timezone on Unix
if os.path.exists("/etc/timezone"):
tz_value = False
try:
f = open("/etc/timezone")
tz_value = f.read(128).strip()
except Exception:
pass
finally:
f.close()
sources.append((tz_value,"/etc/timezone file"))
# Option 5: timezone info from registry on Win32
if sys.platform == "win32":
# Timezone info is stored in windows registry.
# However this is not likely to work very well as the standard name
# of timezones in windows is rarely something that is known to pytz.
# But that's ok, it is always possible to use a config option to set
# it explicitly.
sources.append((get_win32_timezone(),"Windows Registry"))
for (value,source) in sources:
if value:
try:
tz = pytz.timezone(value)
_logger.info("Using timezone %s obtained from %s.", tz.zone, source)
return value
except pytz.UnknownTimeZoneError:
_logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value)
_logger.warning("No valid timezone could be detected, using default UTC "
"timezone. You can specify it explicitly with option 'timezone' in "
"the server configuration.")
return 'UTC'
def get_server_timezone():
return "UTC"
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def server_to_local_timestamp(src_tstamp_str, src_format, dst_format, dst_tz_name,
tz_offset=True, ignore_unparsable_time=True):
"""
Convert a source timestamp string into a destination timestamp string, attempting to apply the
correct offset if both the server and local timezone are recognized, or no
offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ).
WARNING: This method is here to allow formatting dates correctly for inclusion in strings where
the client would not be able to format/offset it correctly. DO NOT use it for returning
date fields directly, these are supposed to be handled by the client!!
@param src_tstamp_str: the str value containing the timestamp in the server timezone.
@param src_format: the format to use when parsing the server timestamp.
@param dst_format: the format to use when formatting the resulting timestamp for the local/client timezone.
@param dst_tz_name: name of the destination timezone (such as the 'tz' value of the client context)
@param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed
using src_format or formatted using dst_format.
@return local/client formatted timestamp, expressed in the local/client timezone if possible
and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined.
"""
if not src_tstamp_str:
return False
res = src_tstamp_str
if src_format and dst_format:
# find out server timezone
server_tz = get_server_timezone()
try:
# dt_value needs to be a datetime.datetime object (so no time.struct_time or mx.DateTime.DateTime here!)
dt_value = datetime.strptime(src_tstamp_str, src_format)
if tz_offset and dst_tz_name:
try:
import pytz
src_tz = pytz.timezone(server_tz)
dst_tz = pytz.timezone(dst_tz_name)
src_dt = src_tz.localize(dt_value, is_dst=True)
dt_value = src_dt.astimezone(dst_tz)
except Exception:
pass
res = dt_value.strftime(dst_format)
except Exception:
# Normal ways to end up here are if strptime or strftime failed
if not ignore_unparsable_time:
return False
return res
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('openerp.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('openerp.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, basestring),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-d', '--database', '-u', '--update', '-i', '--init']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict udpates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None):
""" Signal handler: dump a stack trace for each existing thread."""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = dict([(th.ident, {'name': th.name, 'uid': getattr(th, 'uid', 'n/a')})
for th in threading.enumerate()])
for threadId, stack in sys._current_frames().items():
thread_info = threads_info.get(threadId)
code.append("\n# Thread: %s (id:%s) (uid:%s)" %
(thread_info and thread_info['name'] or 'n/a',
threadId,
thread_info and thread_info['uid'] or 'n/a'))
for line in extract_stack(stack):
code.append(line)
if openerp.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
class frozendict(dict):
""" An implementation of an immutable dictionary. """
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
class OrderedSet(OrderedDict):
""" A simple collection that remembers the elements insertion order. """
def __init__(self, seq=()):
super(OrderedSet, self).__init__([(x, None) for x in seq])
def add(self, elem):
self[elem] = None
def discard(self, elem):
self.pop(elem, None)
@contextmanager
def ignore(*exc):
try:
yield
except exc:
pass
# Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9
if parse_version(getattr(werkzeug, '__version__', '0.0')) < parse_version('0.9.0'):
def html_escape(text):
return werkzeug.utils.escape(text, quote=True)
else:
def html_escape(text):
return werkzeug.utils.escape(text)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
charbeljc/OCB
|
openerp/tools/misc.py
|
Python
|
agpl-3.0
| 45,843
|
[
"VisIt"
] |
fc33add885ffd0e862b90ac685022546c090928f6f54ef0500cbb1dba39d8829
|
'''
First Created 2016/05/06 by Blaise Thompson
Last Edited 2016/08/08 by Blaise Thompson
Contributors: Blaise Thompson
'''
### import ####################################################################
import os
import sys
import importlib
import collections
import WrightTools as wt
### define ####################################################################
# paths
directory = os.path.dirname(__file__)
key = os.path.basename(directory)
package_folder = os.path.dirname(directory)
# shared module
spec = importlib.util.spec_from_file_location('shared', os.path.join(package_folder, 'shared.py'))
shared_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(shared_module)
# dictionaries to fill
raw_dictionary = collections.OrderedDict()
processed_dictionary = collections.OrderedDict()
### download ##################################################################
bypass_download = False
if __name__ == '__main__' and not bypass_download:
shared_module.download(key, directory)
if False:
### TOPAS-C amplitude and center (preamp) #####################################
# raw and processed are identical in this case
out_path = 'TOPAS_C_full_preamp.p'
force_workup = False
def workup():
# ensure that user wants to spend the time doing the workup
if not force_workup:
prompt = 'TOPAS-C amplitude and center (preamp) workup may take some time, proceed?'
response = raw_input(prompt)
proceed = util.strtobool(response)
if not proceed:
return None, None
# get path
motortune_path = os.path.join(directory, 'TOPAS-C', 'MOTORTUNE [w1_Crystal_1, w1_Delay_1, wa] 2016.01.13 19_00_00.data')
# read information from headers
headers = wt.kit.read_headers(motortune_path)
wa_index = headers['name'].index('wa')
zi_index = headers['name'].index('array')
c1 = np.array(headers['w1_Crystal_1 points'])
d1 = np.array(headers['w1_Delay_1 points'])
# TODO: check if my line count is correct (am I thinking floats?)
# this array is large (~2.6 billion lines)
# it cannot be imported directly into memory
# instead I load chunks and fit them to Gaussians as I go
acqns = c1.size * d1.size
outs = np.full((acqns, 4), np.nan)
file_slicer = wt.kit.FileSlicer(motortune_path)
function = wt.fit.Gaussian()
for i in range(acqns):
# get data from file
lines = file_slicer.get(256)
arr = np.array([np.fromstring(line, sep='\t') for line in lines]).T
# fit data, record
out = function.fit(arr[zi_index], arr[wa_index])
outs[i] = out
wt.kit.update_progress(100.*i/10201)
outs.shape = (c1.size, d1.size, 4)
cen = outs[..., 0].T
wid = outs[..., 1].T
amp = outs[..., 2].T
bas = outs[..., 3].T
# assemble data object
c1_axis = wt.data.Axis(c1, units=None, name='c1')
d1_axis = wt.data.Axis(d1, units=None, name='d1')
axes = [c1_axis, d1_axis]
cen_channel = wt.data.Channel(cen, units='nm', name='center')
wid_channel = wt.data.Channel(wid, units='wn', name='width')
amp_channel = wt.data.Channel(amp, units=None, name='amplitude')
bas_channel = wt.data.Channel(bas, units=None, name='baseline')
channels = [amp_channel, cen_channel, wid_channel, bas_channel]
data = wt.data.Data(axes, channels, name='TOPAS-C preamp')
data.save(os.path.join(directory, out_path))
# finish
return data, data.copy()
# get from pickle or create
if os.path.isfile(os.path.join(directory, out_path)) and not force_workup:
raw_data = wt.data.from_pickle(os.path.join(directory, out_path), verbose=False)
processed_data = raw_data.copy()
else:
raw_data, processed_data = workup()
# check version
if raw_data is None:
pass
elif not raw_data.__version__.split('.')[0] == wt.__version__.split('.')[0]:
raw_data, processed_data = workup()
# add to dictionaries
raw_dictionary['TOPAS-C preamp'] = raw_data
processed_dictionary['TOPAS-C preamp'] = processed_data
### TOPAS-C amplitude and center (poweramp) ###################################
# raw and processed are identical in this case
out_path = 'TOPAS_C_full_poweramp_guassian.p'
force_workup = False
def workup():
# ensure that user wants to spend the time doing the workup
if not force_workup:
prompt = 'TOPAS-C amplitude and center (poweramp) workup may take some time, proceed?'
response = raw_input(prompt)
proceed = util.strtobool(response)
if not proceed:
return None, None
# get path
motortune_path = os.path.join(directory, 'TOPAS-C', 'MOTORTUNE [w1, w1_Crystal_2, w1_Delay_2, wa] 2016.01.25 16_56_06.data')
# for some reason, read headers fails for this file...
# define information that would normally be contained in headers manually
wa_index = 28
zi_index = 29
w1 = np.linspace(1140, 1620, 25)
c2 = np.linspace(-2.5, 2.5, 51) # TODO: actual values
d2 = np.linspace(-1.5, 1.5, 51) # TODO: actual values
# this array is large (~16 million lines)
# it cannot be imported directly into memory
# instead I load chunks and fit them to Gaussians as I go
acqns = w1.size * c2.size * d2.size
outs = np.full((acqns, 4), np.nan)
file_slicer = wt.kit.FileSlicer(motortune_path)
function = wt.fit.Gaussian()
for i in range(acqns):
# get data from file
lines = file_slicer.get(256)
arr = np.array([np.fromstring(line, sep='\t') for line in lines]).T
# fit data, record
out = function.fit(arr[zi_index], arr[wa_index])
outs[i] = out
wt.kit.update_progress(100.*i/acqns)
outs.shape = (w1.size, c2.size, d2.size, 4)
cen = outs[..., 0]
wid = outs[..., 1]
amp = outs[..., 2]
bas = outs[..., 3]
# assemble data object
w1_axis = wt.data.Axis(w1, units='nm', name='w1')
c1_axis = wt.data.Axis(c2, units=None, name='c2')
d1_axis = wt.data.Axis(d2, units=None, name='d2')
axes = [w1_axis, c1_axis, d1_axis]
cen_channel = wt.data.Channel(cen, units='nm', name='center')
wid_channel = wt.data.Channel(wid, units='wn', name='width')
amp_channel = wt.data.Channel(amp, units=None, name='amplitude')
bas_channel = wt.data.Channel(bas, units=None, name='baseline')
channels = [amp_channel, cen_channel, wid_channel, bas_channel]
data = wt.data.Data(axes, channels, name='TOPAS-C poweramp')
data.save(os.path.join(directory, out_path))
# finish
return data, data.copy()
# get from pickle or create
if os.path.isfile(os.path.join(directory, out_path)) and not force_workup:
raw_data = wt.data.from_pickle(os.path.join(directory, out_path), verbose=False)
processed_data = raw_data.copy()
else:
raw_data, processed_data = workup()
# check version
if raw_data is None:
pass
elif not raw_data.__version__.split('.')[0] == wt.__version__.split('.')[0]:
raw_data, processed_data = workup()
# add to dictionaries
raw_dictionary['TOPAS-C poweramp'] = raw_data
processed_dictionary['TOPAS-C poweramp'] = processed_data
### TOPAS-C amplitude and center (poweramp, moments) ##########################
# raw and processed are identical in this case
out_path = 'TOPAS_C_full_poweramp_moments.p'
force_workup = False
def workup():
# ensure that user wants to spend the time doing the workup
if not force_workup:
prompt = 'TOPAS-C amplitude and center (poweramp) workup may take some time, proceed?'
response = raw_input(prompt)
proceed = util.strtobool(response)
if not proceed:
return None, None
# get path
motortune_path = os.path.join(directory, 'TOPAS-C', 'MOTORTUNE [w1, w1_Crystal_2, w1_Delay_2, wa] 2016.01.25 16_56_06.data')
# for some reason, read headers fails for this file...
# define information that would normally be contained in headers manually
wa_index = 28
zi_index = 29
w1 = np.linspace(1140, 1620, 25)
c2 = np.linspace(-2.5, 2.5, 51)
d2 = np.linspace(-1.5, 1.5, 51)
# this array is large (~16 million lines)
# it cannot be imported directly into memory
# instead I load chunks and fit them to Gaussians as I go
acqns = w1.size * c2.size * d2.size
outs = np.full((acqns, 6), np.nan)
file_slicer = wt.kit.FileSlicer(motortune_path)
function = wt.fit.Moments()
for i in range(acqns):
# get data from file
lines = file_slicer.get(256)
arr = np.array([np.fromstring(line, sep='\t') for line in lines]).T
# fit data, record
out = function.fit(arr[zi_index], arr[wa_index])
outs[i] = out
wt.kit.update_progress(100.*i/acqns)
file_slicer.close()
outs.shape = (w1.size, c2.size, d2.size, 6)
# assemble data object
w1_axis = wt.data.Axis(w1, units='nm', name='w1')
c1_axis = wt.data.Axis(c2, units=None, name='c2')
d1_axis = wt.data.Axis(d2, units=None, name='d2')
axes = [w1_axis, c1_axis, d1_axis]
ch_0 = wt.data.Channel(outs[..., 0], units='nm', name='integral')
ch_1 = wt.data.Channel(outs[..., 1], units='wn', name='one')
ch_2 = wt.data.Channel(outs[..., 2], units=None, name='two')
ch_3 = wt.data.Channel(outs[..., 3], units=None, name='three')
ch_4 = wt.data.Channel(outs[..., 4], units=None, name='four')
ch_5 = wt.data.Channel(outs[..., 5], units=None, name='baseline')
channels = [ch_0, ch_1, ch_2, ch_3, ch_4, ch_5]
data = wt.data.Data(axes, channels, name='TOPAS-C poweramp')
data.save(os.path.join(directory, out_path))
# finish
return data, data.copy()
# get from pickle or create
if os.path.isfile(os.path.join(directory, out_path)) and not force_workup:
raw_data = wt.data.from_pickle(os.path.join(directory, out_path), verbose=False)
processed_data = raw_data.copy()
else:
raw_data, processed_data = workup()
# check version
if raw_data is None:
pass
elif not raw_data.__version__.split('.')[0] == wt.__version__.split('.')[0]:
raw_data, processed_data = workup()
# add to dictionaries
raw_dictionary['TOPAS-C poweramp moments'] = raw_data
processed_dictionary['TOPAS-C poweramp moments'] = processed_data
### OPA800 signal and idler motortune #########################################
if False:
# TODO: make and save data pickle here
# TODO: seperate figure making from data import
data_path = r'MOTORTUNE [w2_Grating, w2_BBO] 2015.10.15 17_38_22.data'
# this data is sufficiently old that we have to process it manually :-(
# get values from file
headers = wt.kit.read_headers(data_path)
arr = np.genfromtxt(data_path).T
# extract arrays
grating_index = headers['name'].index('w2_Grating')
bbo_index = headers['name'].index('w2_BBO')
signal_index = headers['name'].index('pyro2_mean')
gra = arr[grating_index]
gra.shape = (-1, 401)
gra = gra[:, 0]
bbo = arr[bbo_index]
bbo.shape = (-1, 401)
bbo = bbo[0]
sig = arr[signal_index]
sig.shape = (-1, 401)
sig -= sig.min()
sig /= sig.max()
sig = sig.T
# prepare plot
fig = plt.figure(figsize=[8, 6])
gs = grd.GridSpec(1, 2, hspace=0.05, wspace=0.05, width_ratios=[20, 1])
# pcolor
cmap = wt.artists.colormaps['default']
ax = plt.subplot(gs[0])
X, Y, Z = wt.artists.pcolor_helper(gra, bbo, sig)
cax = plt.pcolor(X, Y, Z, vmin=0, vmax=np.nanmax(Z), cmap=cmap)
ax.set_xlim(22, gra.max())
ax.set_ylim(bbo.min(), bbo.max())
# labels
ax.set_xlabel('Grating (mm)', fontsize=16)
ax.set_ylabel('BBO (mm)', fontsize=16)
ax.grid()
ax.axvline(34.6, c='k', alpha=0.5, lw=2)
ax.axhline(39.2, c='k', alpha=0.5, lw=2)
# on-plot labels
distance = 0.05
wt.artists.corner_text('II signal', ax=ax, corner='UL', fontsize=16, distance=distance)
wt.artists.corner_text('II idler', ax=ax, corner='UR', fontsize=16, distance=distance)
wt.artists.corner_text('III signal', ax=ax, corner='LL', fontsize=16, distance=distance)
wt.artists.corner_text('III idler', ax=ax, corner='LR', fontsize=16, distance=distance)
# colorbar
plt.colorbar(cax, cax=plt.subplot(gs[1]))
# finish
plt.savefig('signal_and_idler_motortune.png', dpi=300, transparent=True)
### OPA800 DFG Mixer Motortune ################################################
if False:
# TODO: make and save data pickle here
# TODO: seperate figure making from data import
data_path = r'MOTORTUNE [w2, w2_Mixer] 2015.10.16 17_59_58.data'
# this data is sufficiently old that we have to process it manually :-(
# get values from file
headers = wt.kit.read_headers(data_path)
arr = np.genfromtxt(data_path).T
# extract arrays
w2_index = headers['name'].index('w2')
mixer_index = headers['name'].index('w2_Mixer')
signal_index = headers['name'].index('pyro2_mean')
w2 = arr[w2_index]
w2.shape = (-1, 501)
w2 = w2[:, 0]
mix = arr[mixer_index]
mix.shape = (-1, 501)
mix = mix[0]
sig = arr[signal_index]
sig.shape = (-1, 501)
sig -= sig.min()
sig /= sig.max()
sig = sig.T
# prepare plot
fig = plt.figure(figsize=[8, 6])
gs = grd.GridSpec(1, 2, hspace=0.05, wspace=0.05, width_ratios=[20, 1])
# pcolor
cmap = wt.artists.colormaps['default']
ax = plt.subplot(gs[0])
X, Y, Z = wt.artists.pcolor_helper(w2, mix, sig)
cax = plt.pcolor(X, Y, Z, vmin=0, vmax=np.nanmax(Z), cmap=cmap)
ax.set_xlim(w2.min(), w2.max())
ax.set_ylim(mix.min(), mix.max())
ax.grid()
# axis labels
ax.set_xlabel('w2 (wn)', fontsize=16)
ax.set_ylabel('Grating (mm)', fontsize=16)
# colorbar
plt.colorbar(cax, cax=plt.subplot(gs[1]))
# finish
plt.savefig('DFG_mixer_motortune.png', dpi=300, transparent=True)
|
wright-group/WrightData
|
unpublished Thompson - Automated OPA Tuning/workup.py
|
Python
|
cc0-1.0
| 15,119
|
[
"Gaussian"
] |
13af9b311dfd3ec6011abb52e9b6a85e547c9d1455df411da9d2f395d9c93e1f
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd
from espressomd import thermostat
from espressomd import interactions
from espressomd import diamond
import sys
# System parameters
#############################################################
system = espressomd.System(box_l=[100.0, 100.0, 100.0])
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
system.time_step = 0.01
system.cell_system.skin = 0.4
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
system.cell_system.set_n_square(use_verlet_lists=False)
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1, sigma=1,
cutoff=2**(1. / 6), shift="auto")
fene = interactions.FeneBond(k=10, d_r_max=2)
system.bonded_inter.add(fene)
diamond.Diamond(a=20, bond_length=2, MPC=20)
|
KonradBreitsprecher/espresso
|
samples/minimal-diamond.py
|
Python
|
gpl-3.0
| 1,533
|
[
"ESPResSo"
] |
7192725d0d83fb55e10797d91e3a72178a133c8c6842c576c2c69e1491932467
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all files contain proper licensing information."""
import json
import optparse
import os.path
import subprocess
import sys
def PrintUsage():
print """Usage: python checklicenses.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checklicenses".
--ignore-suppressions Ignores path-specific license whitelist. Useful when
trying to remove a suppression/whitelist entry.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checklicenses.py
python checklicenses.py --root ~/chromium/src third_party"""
WHITELISTED_LICENSES = [
'Anti-Grain Geometry',
'Apache (v2.0)',
'Apache (v2.0) BSD (2 clause)',
'Apache (v2.0) GPL (v2)',
'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License
'APSL (v2)',
'APSL (v2) BSD (4 clause)',
'BSD',
'BSD (2 clause)',
'BSD (2 clause) ISC',
'BSD (2 clause) MIT/X11 (BSD like)',
'BSD (3 clause)',
'BSD (3 clause) GPL (v2)',
'BSD (3 clause) ISC',
'BSD (3 clause) LGPL (v2 or later)',
'BSD (3 clause) LGPL (v2.1 or later)',
'BSD (3 clause) MIT/X11 (BSD like)',
'BSD (4 clause)',
'BSD-like',
# TODO(phajdan.jr): Make licensecheck not print BSD-like twice.
'BSD-like MIT/X11 (BSD like)',
'BSL (v1.0)',
'FreeType (BSD like)',
'FreeType (BSD like) with patent clause',
'GPL (v2) LGPL (v2.1 or later)',
'GPL (v2 or later) with Bison parser exception',
'GPL (v2 or later) with libtool exception',
'GPL (v3 or later) with Bison parser exception',
'GPL with Bison parser exception',
'Independent JPEG Group License',
'ISC',
'LGPL (unversioned/unknown version)',
'LGPL (v2)',
'LGPL (v2 or later)',
'LGPL (v2.1)',
'LGPL (v2.1 or later)',
'LGPL (v3 or later)',
'MIT/X11 (BSD like)',
'MIT/X11 (BSD like) LGPL (v2.1 or later)',
'MPL (v1.0) LGPL (v2 or later)',
'MPL (v1.1)',
'MPL (v1.1) BSD (3 clause) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) BSD (3 clause) LGPL (v2.1 or later)',
'MPL (v1.1) BSD-like',
'MPL (v1.1) BSD-like GPL (unversioned/unknown version)',
'MPL (v1.1) BSD-like GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (v2)',
'MPL (v1.1) GPL (v2) LGPL (v2 or later)',
'MPL (v1.1) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (unversioned/unknown version)',
'MPL (v1.1) LGPL (v2 or later)',
'MPL (v1.1) LGPL (v2.1 or later)',
'MPL (v2.0)',
'Ms-PL',
'Public domain',
'Public domain BSD',
'Public domain BSD (3 clause)',
'Public domain BSD-like',
'Public domain LGPL (v2.1 or later)',
'libpng',
'zlib/libpng',
'SGI Free Software License B',
'SunSoft (BSD like)',
'University of Illinois/NCSA Open Source License (BSD like)',
('University of Illinois/NCSA Open Source License (BSD like) '
'MIT/X11 (BSD like)'),
]
PATH_SPECIFIC_WHITELISTED_LICENSES = {
'base/third_party/icu': [ # http://crbug.com/98087
'UNKNOWN',
],
# http://code.google.com/p/google-breakpad/issues/detail?id=450
'breakpad/src': [
'UNKNOWN',
],
'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092
'UNKNOWN',
],
# This contains files copied from elsewhere from the tree. Since the copied
# directories might have suppressions below (like simplejson), whitelist the
# whole directory. This is also not shipped code.
'chrome/common/extensions/docs/server2/third_party': [
'UNKNOWN',
],
'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095
'UNKNOWN',
],
'native_client': [ # http://crbug.com/98099
'UNKNOWN',
],
'native_client/toolchain': [
'BSD GPL (v2 or later)',
'BSD (2 clause) GPL (v2 or later)',
'BSD (3 clause) GPL (v2 or later)',
'BSL (v1.0) GPL',
'BSL (v1.0) GPL (v3.1)',
'GPL',
'GPL (unversioned/unknown version)',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3.1)',
'GPL (v3 or later)',
],
'third_party/WebKit': [
'UNKNOWN',
],
# http://code.google.com/p/angleproject/issues/detail?id=217
'third_party/angle': [
'UNKNOWN',
],
# http://crbug.com/222828
# http://bugs.python.org/issue17514
'third_party/chromite/third_party/argparse.py': [
'UNKNOWN',
],
# http://crbug.com/326117
# https://bitbucket.org/chrisatlee/poster/issue/21
'third_party/chromite/third_party/poster': [
'UNKNOWN',
],
# http://crbug.com/333508
'third_party/clang_format/script': [
'UNKNOWN',
],
# http://crbug.com/333508
'buildtools/clang_format/script': [
'UNKNOWN',
],
# https://mail.python.org/pipermail/cython-devel/2014-July/004062.html
'third_party/cython': [
'UNKNOWN',
],
'third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/expat/files/lib': [ # http://crbug.com/98121
'UNKNOWN',
],
'third_party/ffmpeg': [
'GPL',
'GPL (v2)',
'GPL (v2 or later)',
'UNKNOWN', # http://crbug.com/98123
],
'third_party/fontconfig': [
# https://bugs.freedesktop.org/show_bug.cgi?id=73401
'UNKNOWN',
],
'third_party/freetype2': [ # http://crbug.com/177319
'UNKNOWN',
],
'third_party/hunspell': [ # http://crbug.com/98134
'UNKNOWN',
],
'third_party/iccjpeg': [ # http://crbug.com/98137
'UNKNOWN',
],
'third_party/icu': [ # http://crbug.com/98301
'UNKNOWN',
],
'third_party/lcov': [ # http://crbug.com/98304
'UNKNOWN',
],
'third_party/lcov/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/libc++/trunk/include/support/solaris': [
# http://llvm.org/bugs/show_bug.cgi?id=18291
'UNKNOWN',
],
'third_party/libc++/trunk/src/support/solaris/xlocale.c': [
# http://llvm.org/bugs/show_bug.cgi?id=18291
'UNKNOWN',
],
'third_party/libc++/trunk/test': [
# http://llvm.org/bugs/show_bug.cgi?id=18291
'UNKNOWN',
],
'third_party/libevent': [ # http://crbug.com/98309
'UNKNOWN',
],
'third_party/libjingle/source/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjpeg_turbo': [ # http://crbug.com/98314
'UNKNOWN',
],
'third_party/libpng': [ # http://crbug.com/98318
'UNKNOWN',
],
# The following files lack license headers, but are trivial.
'third_party/libusb/src/libusb/os/poll_posix.h': [
'UNKNOWN',
],
'third_party/libvpx/source': [ # http://crbug.com/98319
'UNKNOWN',
],
'third_party/libxml': [
'UNKNOWN',
],
'third_party/libxslt': [
'UNKNOWN',
],
'third_party/lzma_sdk': [
'UNKNOWN',
],
'third_party/mesa/src': [
'GPL (v2)',
'GPL (v3 or later)',
'MIT/X11 (BSD like) GPL (v3 or later) with Bison parser exception',
'UNKNOWN', # http://crbug.com/98450
],
'third_party/modp_b64': [
'UNKNOWN',
],
'third_party/openmax_dl/dl' : [
'Khronos Group',
],
'third_party/openssl': [ # http://crbug.com/98451
'UNKNOWN',
],
'third_party/boringssl': [
# There are some files in BoringSSL which came from OpenSSL and have no
# license in them. We don't wish to add the license header ourselves
# thus we don't expect to pass license checks.
'UNKNOWN',
],
'third_party/ots/tools/ttf-checksum.py': [ # http://code.google.com/p/ots/issues/detail?id=2
'UNKNOWN',
],
'third_party/molokocacao': [ # http://crbug.com/98453
'UNKNOWN',
],
'third_party/npapi/npspy': [
'UNKNOWN',
],
'third_party/ocmock/OCMock': [ # http://crbug.com/98454
'UNKNOWN',
],
'third_party/ply/__init__.py': [
'UNKNOWN',
],
'third_party/protobuf': [ # http://crbug.com/98455
'UNKNOWN',
],
# http://crbug.com/222831
# https://bitbucket.org/eliben/pyelftools/issue/12
'third_party/pyelftools': [
'UNKNOWN',
],
'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462
'UNKNOWN',
],
'third_party/simplejson': [
'UNKNOWN',
],
'third_party/skia': [ # http://crbug.com/98463
'UNKNOWN',
],
'third_party/snappy/src': [ # http://crbug.com/98464
'UNKNOWN',
],
'third_party/smhasher/src': [ # http://crbug.com/98465
'UNKNOWN',
],
'third_party/speech-dispatcher/libspeechd.h': [
'GPL (v2 or later)',
],
'third_party/sqlite': [
'UNKNOWN',
],
# http://crbug.com/334668
# MIT license.
'tools/swarming_client/third_party/httplib2': [
'UNKNOWN',
],
# http://crbug.com/334668
# Apache v2.0.
'tools/swarming_client/third_party/oauth2client': [
'UNKNOWN',
],
# https://github.com/kennethreitz/requests/issues/1610
'tools/swarming_client/third_party/requests': [
'UNKNOWN',
],
'third_party/swig/Lib/linkruntime.c': [ # http://crbug.com/98585
'UNKNOWN',
],
'third_party/talloc': [
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98588
],
'third_party/tcmalloc': [
'UNKNOWN', # http://crbug.com/98589
],
'third_party/tlslite': [
'UNKNOWN',
],
'third_party/webdriver': [ # http://crbug.com/98590
'UNKNOWN',
],
# https://github.com/html5lib/html5lib-python/issues/125
# https://github.com/KhronosGroup/WebGL/issues/435
'third_party/webgl/src': [
'UNKNOWN',
],
'third_party/webrtc': [ # http://crbug.com/98592
'UNKNOWN',
],
'third_party/xdg-utils': [ # http://crbug.com/98593
'UNKNOWN',
],
'third_party/yasm/source': [ # http://crbug.com/98594
'UNKNOWN',
],
'third_party/zlib/contrib/minizip': [
'UNKNOWN',
],
'third_party/zlib/trees.h': [
'UNKNOWN',
],
'tools/emacs': [ # http://crbug.com/98595
'UNKNOWN',
],
'tools/gyp/test': [
'UNKNOWN',
],
'tools/python/google/__init__.py': [
'UNKNOWN',
],
'tools/stats_viewer/Properties/AssemblyInfo.cs': [
'UNKNOWN',
],
'tools/symsrc/pefile.py': [
'UNKNOWN',
],
'tools/telemetry/third_party/pyserial': [
# https://sourceforge.net/p/pyserial/feature-requests/35/
'UNKNOWN',
],
'v8/test/cctest': [ # http://crbug.com/98597
'UNKNOWN',
],
'v8/src/third_party/kernel/tools/perf/util/jitdump.h': [ # http://crbug.com/391716
'UNKNOWN',
],
}
def check_licenses(options, args):
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = options.base_directory
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(options.base_directory, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print "Using base directory:", options.base_directory
print "Checking:", start_dir
print
licensecheck_path = os.path.abspath(os.path.join(options.base_directory,
'third_party',
'devscripts',
'licensecheck.pl'))
licensecheck = subprocess.Popen([licensecheck_path,
'-l', '100',
'-r', start_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = licensecheck.communicate()
if options.verbose:
print '----------- licensecheck stdout -----------'
print stdout
print '--------- end licensecheck stdout ---------'
if licensecheck.returncode != 0 or stderr:
print '----------- licensecheck stderr -----------'
print stderr
print '--------- end licensecheck stderr ---------'
print "\nFAILED\n"
return 1
used_suppressions = set()
errors = []
for line in stdout.splitlines():
filename, license = line.split(':', 1)
filename = os.path.relpath(filename.strip(), options.base_directory)
# All files in the build output directory are generated one way or another.
# There's no need to check them.
if filename.startswith('out/'):
continue
# For now we're just interested in the license.
license = license.replace('*No copyright*', '').strip()
# Skip generated files.
if 'GENERATED FILE' in license:
continue
if license in WHITELISTED_LICENSES:
continue
if not options.ignore_suppressions:
matched_prefixes = [
prefix for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES
if filename.startswith(prefix) and
license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]]
if matched_prefixes:
used_suppressions.update(set(matched_prefixes))
continue
errors.append({'filename': filename, 'license': license})
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
for error in errors:
print "'%s' has non-whitelisted license '%s'" % (
error['filename'], error['license'])
print "\nFAILED\n"
print "Please read",
print "http://www.chromium.org/developers/adding-3rd-party-libraries"
print "for more info how to handle the failure."
print
print "Please respect OWNERS of checklicenses.py. Changes violating"
print "this requirement may be reverted."
# Do not print unused suppressions so that above message is clearly
# visible and gets proper attention. Too much unrelated output
# would be distracting and make the important points easier to miss.
return 1
print "\nSUCCESS\n"
if not len(args):
unused_suppressions = set(
PATH_SPECIFIC_WHITELISTED_LICENSES.iterkeys()).difference(
used_suppressions)
if unused_suppressions:
print "\nNOTE: unused suppressions detected:\n"
print '\n'.join(unused_suppressions)
return 0
def main():
default_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
option_parser = optparse.OptionParser()
option_parser.add_option('--root', default=default_root,
dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Print debug logging')
option_parser.add_option('--ignore-suppressions',
action='store_true',
default=False,
help='Ignore path-specific license whitelist.')
option_parser.add_option('--json', help='Path to JSON output file')
options, args = option_parser.parse_args()
return check_licenses(options, args)
if '__main__' == __name__:
sys.exit(main())
|
7kbird/chrome
|
tools/checklicenses/checklicenses.py
|
Python
|
bsd-3-clause
| 15,975
|
[
"Galaxy"
] |
41166d93284ea75cff236803a04394b668c9d7ced3d704bb3b72446ab8cd16ed
|
#!/usr/bin/env python3
"""
Usage:
heatmaps.py [ --term-file=<file> --pickle-file=<file> --ipython --make-csv=<file> --ice ]
heatmaps.py -h | --help
Options:
-t --term-file=<file> newline separate list of terms
-p --pickle-file=<file>
-c --make-csv=<file>
-i --ipython
--ice make heatmaps as icecicles rather than trees
"""
#TODO
# move to its own project
# convert to class based
# break into two files?
# one that convertes data with the desired format to visualize or manipulate (the matrix is the heatmap)
# one that acquires that data and gets it into the desired format
# potentially one that deals with sorting and ordering the indicies (this is where applications to desc arrive)
# inputs should take
# a list of terms
# a root term and a property relationship
# a tree of nodes (ids) and edges (property, or possibly conditional properties)
# rest API
#
#
#
#
# converting terms and data sources (and eventually anything) into a reliably indexed matrix
# as long as we know the mapping from the ids to the reference table the order actually doesnt matter
# in fact it may make life easier if we can just add new sources on to the end (eh)
# the ids will be used to generate a REFERENCE matrix where ids are mapped to natural numbers 0-n
# various orderings of the ids will be mapped to permutations of the original index
# eg aijth term from the reference matrix will be placed in the bxyth position when a new ordering
# maps i->x and j->y
# aij i->rows j->columns as per convention, we will make the term ids the rows and the source (etc) ids the columns
# this works nicely with the fact that each row has only a subset of the sources
# WE NEED to have the FULL list of terms
# consideration: the list of terms probably changes more quickly than the list of sources, another argument for keeping
# terms as rows since we will have to iterate over all terms when we index a new source anyway
# XXX DERP just keep the bloody thing in dict form and use the orderings from there
# all we need to know is how many total data sources there are and what index we want to use for each of them (ie we need to fill in the zeros)
# XXX may still want this becasue if we want to reorder on data sources it is going to be a pain if we can't use slicing
#
# NOTES: normalize by the first appearance of the term in the literature to attempt to control for historical entrenchment
# consider also normalizing by the total number of records per datasource??
import os
import pickle
from collections import defaultdict
from IPython import embed #FIXME name collisions sadness
import requests
import libxml2
import numpy as np
import pylab as plt
from docopt import docopt
args = docopt(__doc__, version='heatmaps .0001')
#urls
url_oq_con_term = "http://nif-services.neuinfo.org/ontoquest/concepts/term/" #used in get_term_id
url_oq_gp_term = "http://nif-services.neuinfo.org/ontoquest/getprop/term/" # used to get the id for relationship
url_oq_rel = "http://nif-services.neuinfo.org/ontoquest/rel/all/%s?level=1&includeDerived=true&limit=0" # %s is id
url_serv_summary = "http://nif-services.neuinfo.org/servicesv1/v1/summary.xml?q="
#xpaths
term_id_xpath = "//class[not(contains(id,'NEMO'))]/id/text()" #FIXME ok for some reason the non-nemo id gives SHIT results
#term_id_xpath = "//class[contains(id,'NEMO')]/id/text()" #FIXME NEVER MIND! that tree goes nowhere ;_;
rel_id_xpath = "//class[not(contains(id,'%s'))]/id/text()" # %s should be relationship here!
child_term_ids_object_xpath = "//relationship[object/@id='%s' and property/@id='%s']/subject/@id" # %s id %s relationship
child_term_ids_subject_xpath = "//relationship[subject/@id='%s' and property/@id='%s']/object/@id"
#files
file_birnlex_796_rel = "~/Downloads/birnlex_796.xml"
###
# Data acquisition, get and parse xml
###
def re_tree_der():
"""
A rel/all dump with includeDerived=ture on brain flattens everything, the tree is still there, but we have to
recreate it
JUST KIDDING! not actually possible because they real did flatten it >_<
"""
xmlDoc = libxml2.parseEntity(file_birnlex_796_rel)
c = xmlDoc.xpathNewContext()
child_term_ids_xpath = "//relationship[subject/@id='%s' and property/@id='%s']/subject/@id"%('birnlex_768',) # %s id %s relationship
c.xpathEval(child_term_ids_xpath)
def get_xpath(doc, query):
""" doc is a string that is an xml document
query is a string that is a valid xpath query
returns a list of nodes
"""
try:
node = libxml2.parseDoc(doc)
except libxml2.parserError: # derp
return []
xpc = node.xpathNewContext()
return xpc.xpathEval(query)
def run_xpath(url, *queries):
#xmlDoc = libxml2.parseEntity(url) #XXX this causes hangs due to no timeout
#xmlDoc = libxml2.parseFile('/home/tom/Dropbox/neuroinformatics/force15_poster/summary.xml')
#print("WARNING YOU ARE NOT GETTING REAL DATA")
#"""
try:
resp = requests.get(url, timeout=20) # sometimes we need a longer timeout :/ FIXME :/ stateful?
except requests.exceptions.Timeout:
return [None] * len(queries)
try:
xmlDoc = libxml2.parseDoc(resp.text)
except libxml2.parserError: # derp
return [None] * len(queries)
#"""
xpc = xmlDoc.xpathNewContext()
out = []
for query in queries:
out.append(xpc.xpathEval(query))
if len(queries) == 1:
return out[0]
return out
def get_rel_id(relationship): #FIXME this is NOT consistently ordred! AND is_a and part_of behave VERY differently!
"""
Used to get relationship ids so that xpath queries will actually work :/
"""
query_url = url_oq_gp_term + relationship
#response = requests.get(query_url)
#ids = get_xpath(response.text, rel_id_xpath%relationship)
ids = run_xpath(query_url, rel_id_xpath%relationship)
print([t.content for t in ids])
try:
id_ = ids[0].content
except IndexError:
id_ = None
return id_
def get_term_id(term):
""" Return the id for a term or None if an error occures """
query_url = url_oq_con_term + term.replace(" ", "%20")
try:
response = requests.get(query_url, timeout=20)
except requests.exceptions.Timeout:
return None
ids = get_xpath(response.text, term_id_xpath)
#ids = run_xpath(query_url, term_id_xpath)
try:
id_ = ids[0].content
except IndexError:
id_ = None
return id_
def val_term_id(term, reltionship): # TODO FIXME this is actually a problem with the ontology ;_;
""" get term id by validating that this particular term actually has the relationship listed """
query_url = url_oq_con_term + term.replace(" ", "%20")
response = requests.get(query_url)
ids = get_xpath(response.text, term_id_xpath)
def get_child_term_ids(parent_id, level, relationship, child_relationship, exclude_parents=False):
""" This was burried deep within the tree of kepler actors making it nearly
impossible to find the actual data. Also, who though that using the
equivalent of environment variables to pass global information down
a giant tree of actors was a remotely good idea?
NOTE: the terms are unordered, not entierly clear when we should try
to order them
this will concat all the way up, flattening the tree at that level
yay dev tools level fail
"""
#TODO: allow more dynamic traversal of the tree by stoping at nodes where
#the reference count is zero for all children so we can show relative depth
#esp. important for coverage of species
#response = requests.get(url_oq_rel%parent_id) #FIXME brain returns a truncated result ;_; that is what is breaking things!
if child_relationship == "subject":
xpath = child_term_ids_subject_xpath%(parent_id, relationship)
xnames = "//relationship[subject/@id='%s' and property/@id='%s']/object"%(parent_id, relationship)
else:
xpath = child_term_ids_object_xpath%(parent_id, relationship)
xnames = "//relationship[object/@id='%s' and property/@id='%s']/subject"%(parent_id, relationship)
#id_list = [n.content for n in get_xpath(response.text, xpath)] # FIXME not clear if this is returning what we want across all levels of the tree
query_url = url_oq_rel%parent_id
id_nodes, name_nodes = run_xpath(query_url, xpath, xnames)
#id_list = [i.content for i in id_nodes]
id_name_dict = {id_.content:n.content for id_, n in zip(id_nodes, name_nodes)}
if level == 1:
#print(id_list)
print('level',level,'parent_id',parent_id,'ids',id_name_dict)
#print([n.content for n in run_xpath(query_url, xnames)]) #FIXME MMMM HIT DAT SERVER
return id_name_dict
#return id_list
else:
child_dicts = []
new_level = level - 1
for id_ in id_name_dict.keys():
new_dict = get_child_term_ids(id_, new_level, relationship, child_relationship, exclude_parents) #funstuff here with changing the rels
child_dicts.append(new_dict)
if exclude_parents:
id_name_dict = {}
for dict_ in child_dicts:
id_name_dict.update(dict_)
print('level',level,'parent_id',parent_id,'ids',id_name_dict)
return id_name_dict
def get_summary_counts(id_):
if ' ' in id_:
id_ = '"%s"'%id_ # fix to prevent AND expansion RE: this kills service
print('getting summary for', id_)
query_url = url_serv_summary + id_
#nodes = run_xpath(query_url, '//results/result')
nodes, name, lit = run_xpath(query_url, '//results/result', '//clauses/query', '//literatureSummary/@resultCount')
if name:
name = name[0].content
print(name)
if nodes:
if nodes[0] == None:
return [('error-0',id_,'ERROR', -100)], None
else:
return [('error-1',id_,'ERROR', -100)], None
#name = run_xpath(query_url, '//clauses/query')[0].content # FIXME please don't hit this twice ;_;
nifIds = []
dbs = []
indexables = []
counts = []
for node in nodes:
if node.prop('nifId') not in nifIds: #TODO should we have a simple way to generalize schemas of attributes + content > columns?
nifId = node.prop('nifId')
db = node.prop('db')
indexable = node.prop('indexable')
cs = node.xpathEval('./count')
if len(cs) > 1:
print(id_, name, [c.content for c in cs])
raise IndexError('too many counts!')
count = int(cs[0].content)
nifIds.append(nifId)
dbs.append(db)
indexables.append(indexable)
counts.append(count)
else:
print(node.prop('nifId'))
#literature
nifIds.append('nlx_82958')
dbs.append('PubMed') # this is a fiction, it comes from PMC too
indexables.append('Literature')
counts.append(int(lit[0].content))
print(dbs)
return [a for a in zip(nifIds, dbs, indexables, counts)], name
#counts = get_xpath(response.text, term_id_xpath)
def get_term_count_data(term, level, relationship, child_relationship, exclude_parents=False, term_id=None):
"""
for a given term go get all its children at a given level and get the
counts for their instances across databases
"""
if not term_id:
term_id = get_term_id(term) # FIXME this fails to work as expected given relationships
child_data = {}
if term_id == None:
term_id = term
#print('ERROR:',term,'could not find an id!')
#return {}, {}
#else:
if level == 0: # FIXME surely there is a more rational place to put this?
id_name_dict = {term_id:term}
else:
id_name_dict = get_child_term_ids(term_id, level, relationship, child_relationship, exclude_parents=exclude_parents) # TODO this is one place we could add the level info??
for child_id in id_name_dict.keys():#[0:10]:
data, term_name = get_summary_counts(child_id)
print(data)
child_data[child_id] = data
return child_data, id_name_dict
def get_source_entity_nifids():
summary = get_summary_counts('*')
ids = ['nlx_82958']
names = ['PubMed']
#counts = []
for id_, name, idx, count in summary[0]:
if id_ not in ids:
ids.append(id_)
if name == 'Integrated':
name = name + ' ' + idx
names.append(name)
#counts.append(count)
#to_sort.append((id_, name, count))
#TODO sorting here doesnt help and didn't really reveal much of interest
#order = np.argsort(counts)
#ids = list(np.array(ids)[order])
#names = list(np.array(names)[order])
return ids, names
###
# Data processing
###
def make_collapse_map(ids, names):
""" use the data on source entities and collapse redundant entries """
collapse = defaultdict(list)
for id_, name in zip(ids, names):
collapse[name].append(id_)
unames = list(collapse.keys())
unames.sort() # this masks any prior sorting
ids_list = []
for n in unames:
ids_list.append(tuple(collapse[n]))
return ids_list, unames
def construct_columns(data_dict, term_id_list, datasource_nifid_list, collapse_map=None):
"""
Given two lists of ids, the first list will be rows the 2nd will be comluns
The values first list should match the keys in the data dict
The orderings of both indexes are stored in term_id_list and datasource_nifid_list
and those can be used to reorder the matrix, or maybe we just call this function again.
"""
n_cols = len(datasource_nifid_list)
#make a lookup dict to map nifids to indexes for faster updates
nid_map = {nid:i for i, nid in enumerate(datasource_nifid_list)}
rows_to_vstack = []
for term_id in term_id_list:
data_list = data_dict[term_id]
row = np.zeros((n_cols))
for nifId, _, _, count in data_list:
if count >= 0: #ignore errors
row[nid_map[nifId]] = count
rows_to_vstack.append(row)
data_matrix = np.vstack(rows_to_vstack)
print(data_matrix)
# a collapse map should be a list of tuples of nifids from the same source
# it MUST also have an acompanying name mapping (used to generate the list)
if collapse_map: # if we dont want the full split on source id
cols_to_hstack = []
for id_tup in collapse_map:
col_tup = [nid_map[id_] for id_ in id_tup] #get the cols for that id
new_col = np.sum(data_matrix[:,col_tup], axis=1)
cols_to_hstack.append(np.vstack(new_col))
data_matrix = np.hstack(cols_to_hstack)
return data_matrix
def discretize(data_matrix):
bins = [0,1,10,100]
vals = [None,1,2,3]
#for l in bins[:-1]:
#for u in bins[1:]:
#f_m = (data_matrix > l) * (data_matrix <= u)
#data_matrix[f_m]
for lower, upper, val in zip(bins[:-1],bins[1:], vals[:-1]):
data_matrix[ (data_matrix >= lower) * (data_matrix < upper) ] = val
data_matrix[data_matrix >= bins[-1]] = vals[-1]
return data_matrix
def compute_diversity(matrix):
""" our measure of how well something is understood """
# FIXME we clearly need to control for how 'generic' the term is, eg 'brain' could easily be the best understood
# this is not what we want, the most frequently used across fields is not very useful since it may mean that we
# well, no, that is wrong because we DO understand the brain pretty damned well at the level of pointing to it as
# an organ, that is a VERY different metric from how well we understand all of its parts (which is another metric)
total_data_sources = float(matrix.shape[1]) # yay py2 >_<
sources_per_term = np.sum(matrix > 0, axis=1) / total_data_sources
print(sources_per_term)
return sources_per_term
###
# Data display
###
def display_grid(mats, rns, titles, col_names, figname, hspace=0):
aspect = .3
ratio = float(mats[0].shape[1] + 1) / float(sum([m.shape[0] for m in mats]) + 1) # cols / rows
gcols = 2
grows = len(mats)
base = 22
dpi = 600
width_ratios = 98,2
size = (base, base / ratio * aspect) #FIXME >_<
term_fsize = 2
fig = plt.figure(figsize=size, dpi=dpi)
gs = plt.matplotlib.gridspec.GridSpec(grows, gcols, hspace=hspace, wspace=0, height_ratios = [m.shape[0] for m in mats], width_ratios=width_ratios)
axes = []
for r in range(grows):
matrix = mats[r]
row_names = rns[r]
if axes:
ax1 = fig.add_subplot(gs[r,0], sharex=axes[0][0])
else:
ax1 = fig.add_subplot(gs[r,0])
ax2 = fig.add_subplot(gs[r,1], sharey=ax1)
#axis 1
img = ax1.imshow(matrix, interpolation='nearest', cmap=plt.cm.get_cmap('Greens'), aspect='auto', vmin=0, vmax=3)
#axes
ax1.xaxis.set_ticks([i for i in range(len(col_names))])
ax1.xaxis.set_ticklabels(col_names)
ax1.xaxis.set_ticks_position('top')
[l.set_rotation(90) for l in ax1.xaxis.get_majorticklabels()] #alternate is to use plt.setp but why do that?
[l.set_fontsize(int(base * .25)) for l in ax1.xaxis.get_ticklabels()]
if axes:
#plt.setp(ax1.get_xticklabels(), visible=False)
ax1.xaxis.set_tick_params(label1On=False,label2On=False)
print('axis label', titles[r])
ax1.xaxis.set_label_text(titles[r])
ax1.xaxis.set_label_position('bottom')
#if titles[r]:
#embed()
ax1.yaxis.set_ticks([i for i in range(len(row_names))])
ax1.yaxis.set_ticklabels(row_names)
ax1.yaxis.set_ticks_position('left')
[l.set_fontsize(term_fsize) for l in ax1.yaxis.get_ticklabels()]
ax1.tick_params(direction='in', length=0, width=0)
#axis 2
div = compute_diversity(matrix) # FIXME this is called twice :/
ll, ul = ax1.get_ylim()
width = (ul - ll) / matrix.shape[0]
other = np.arange(ll, ul, width)[::-1] # for whatever reason backwards, probably imshow idiocy
ax2.barh(other, div, width, edgecolor='None') #FIXME for some reason horizonal breaks things?
ax2.yaxis.set_ticks_position('right')
[l.set_fontsize(term_fsize) for l in ax2.yaxis.get_ticklabels()]
ax2.set_xlim(0,1)
ax2.tick_params(direction='in', length=0, width=0)
ax2.xaxis.set_ticks([0,.5,1])
ax2.xaxis.set_ticklabels(['0','.5','1'])
[l.set_fontsize(int(base * .25)) for l in ax2.xaxis.get_ticklabels()]
axes.append((ax1, ax2))
title = figname
fig.savefig('/tmp/%s.png'%title, bbox_inches='tight', pad_inches=.1, dpi=dpi)
def display_heatmap(matrix, row_names, col_names, title):
#blanks = np.zeros_like(matrix[0])
#matrix = np.vstack((blanks, matrix, blanks))
#row_names = [''] + row_names + ['']
aspect = .3
#mm = float(max(matrix.shape)) #python2 a shit
ratio = float(matrix.shape[1] + 1) / float(matrix.shape[0] + 1) # cols / rows
print('ratio', ratio)
base = 22 #width
dpi = 600
width_ratios = 98, 2
#width_ratios = 100, 0
#size = (matrix.shape[1] / mm * base * (1/aspect), matrix.shape[0] / mm * base + 1) #FIXME deal with the font vs figsize :/
#size = (base + sum(width_ratios)/width_ratios[0], base / ratio * aspect) #FIXME >_<
size = (base, base / ratio * aspect) #FIXME >_<
print(size)
term_fsize = 2
#fig, (ax1, ax2) = plt.subplots(1, 2, figsize=size, dpi=dpi, sharey=True, gridspec_kw=gskw) # FIXME for some reason using gridspec breaks imshow and tight_layout a;dslkfja;dslkjf
fig = plt.figure(figsize=size, dpi=dpi)
gs = plt.matplotlib.gridspec.GridSpec(1, 2, wspace=0, width_ratios=width_ratios)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1], sharey=ax1)
#axis 1
img = ax1.imshow(matrix, interpolation='nearest', cmap=plt.cm.get_cmap('Greens'), aspect='auto',vmin=0,vmax=3)#, aspect=aspect, extent=(0,matrix.shape[1]+1,0,matrix.shape[0]+1))#, vmin=0, vmax=np.max(matrix)) #FIXME x axis spacing :/ #FIXME consider pcolormesh?
#axes
ax1.xaxis.set_ticks([i for i in range(len(col_names))])
ax1.xaxis.set_ticklabels(col_names)
ax1.xaxis.set_ticks_position('top')
[l.set_rotation(90) for l in ax1.xaxis.get_majorticklabels()] #alternate is to use plt.setp but why do that?
[l.set_fontsize(int(base * .25)) for l in ax1.xaxis.get_ticklabels()]
ax1.yaxis.set_ticks([i for i in range(len(row_names))])
ax1.yaxis.set_ticklabels(row_names)
ax1.yaxis.set_ticks_position('left')
#[l.set_fontsize(int(base / ratio * aspect * .75)+1) for l in ax1.yaxis.get_ticklabels()]
[l.set_fontsize(term_fsize) for l in ax1.yaxis.get_ticklabels()]
#ax1.set_xlim(-4,matrix.shape[1]*(10/3)+4)
#ax1.set_xlim(-10,matrix.shape[1]+10)
ax1.tick_params(direction='in', length=0, width=0)
#axis 2
div = compute_diversity(matrix) # FIXME this is called twice :/
#width = 1
#other = [i - .5 for i in range(len(div))] # FIXME ICK
ll, ul = ax1.get_ylim()
width = (ul - ll) / matrix.shape[0]
#other = np.arange(ll, ul, width)
other = np.arange(ll, ul, width)[::-1] # for whatever reason backwards, probably imshow idiocy
#other = np.linspace(ul, ll, len(div)) #using imshow makes everything backward :/
#try:
#width = other[1] - other[0]
#except IndexError:
#other = ll
#width = ul - ll
#ax2.plot(div, other)
print(other, div)
#ax2 = plt.subplot(122, sharey=ax1)
ax2.barh(other, div, width, edgecolor='None') #FIXME for some reason horizonal breaks things?
#ax2.yaxis.set_ticks([i for i in range(len(row_names))])
#ax2.yaxis.set_ticklabels(row_names)
ax2.yaxis.set_ticks_position('right')
#[l.set_fontsize(int(base / ratio * aspect * .75)+1) for l in ax2.yaxis.get_ticklabels()]
[l.set_fontsize(term_fsize) for l in ax2.yaxis.get_ticklabels()]
ax2.set_xlim(0,1)
ax2.tick_params(direction='in', length=0, width=0)
ax2.xaxis.set_ticks([0,.5,1])
ax2.xaxis.set_ticklabels(['0','.5','1'])
[l.set_fontsize(int(base * .25)) for l in ax2.xaxis.get_ticklabels()]
#ax2.xaxis.set_label('Div score.') # XXX
#ax2.set_sharey(ax1)
fig.suptitle(title, x=.5, y=0, fontsize=base*.25, verticalalignment='bottom') # FIXME stupidly broken >_<
#ax1.xaxis.set_label(title) #XXX
fig.savefig('/tmp/%s.png'%title, bbox_inches='tight', pad_inches=.1, dpi=dpi)
#fig.show()
return fig
def run_levels(term, level, relationship, child_relationship, term_id=None):
level_dict = {}
while level >= 0:
data, idn_dict = get_term_count_data(term, level, relationship, child_relationship, exclude_parents=True, term_id=term_id)
if idn_dict:
level_dict[level] = data, idn_dict
level -= 1
return level_dict
def display_div(div, names, levels, term, nterms=300):
if nterms == None:
nterms = len(div)
order = np.argsort(div)
div = np.array(div)[order][-nterms:]
row_names = np.array(names)[order][-nterms:]
levels = np.array(levels)[order][-nterms:]
colors = {
0:'b',
1:'g',
2:'r',
3:'c',
4:'y',
5:'m',
6:'gray',
}
#ll, ul = ax1.get_ylim()
#width = (ul - ll) / matrix.shape[0]
#other = np.arange(ll, ul, width)[::-1] # for whatever reason backwards, probably imshow idiocy
base = 5 #width
height = nterms // 10 + 2
dpi = 600
#fakefig = plt.figure()
fig = plt.figure(figsize=(5,height),dpi=dpi)
ax2 = fig.add_subplot(111)
#fig, ax2 = plt.subplots(figsize=(5,20),dpi=dpi)
width = 1
term_fsize = 9.5
other = np.arange(len(div)) - .5
hands = []
labs = []
for l, c in colors.items():
subset = levels == l
if subset.any():
lab = 'lvl %s, n = %s'%(l,len(div[subset]))
h = ax2.barh(other[subset], div[subset], width, color=c, edgecolor='None', label=lab) #FIXME for some reason horizonal breaks things?
hands.append(h)
labs.append(lab)
ax2.yaxis.set_ticks_position('right')
[l.set_fontsize(term_fsize) for l in ax2.yaxis.get_ticklabels()]
ax2.set_xlim(0,1)
ax2.tick_params(direction='in', length=0, width=0)
ax2.xaxis.set_ticks([0,.5,1])
ax2.xaxis.set_ticklabels(['0','.5','1'])
#ax2.xaxis.set_ticks_position('top')
#[l.set_fontsize(int(base * .25)) for l in ax2.xaxis.get_ticklabels()]
[l.set_fontsize(10) for l in ax2.xaxis.get_ticklabels()]
ax2.yaxis.set_ticks([i for i in range(len(row_names))])
ax2.yaxis.set_ticklabels(row_names)
ax2.yaxis.set_ticks_position('left')
ax2.set_ylim(-.5, len(div)-.5)
[l.set_fontsize(term_fsize) for l in ax2.yaxis.get_ticklabels()]
ax2.legend(loc='lower right',fontsize='small', frameon=False, borderpad=None)
#fig.legend(hands, labs, loc=4)
#plt.legend()
fig.tight_layout()
plt.title(term.capitalize()+' frequencies. Top %s terms.'%nterms,loc='right')
fig.savefig('/tmp/%s_div.png'%term, bbox_inches='tight', pad_inches=.1, dpi=dpi)
###
# Acqusition
###
def export_csv(path): #TODO we are really going to need to rework the idea of a level dict
resource_ids, resource_names = get_source_entity_nifids()
with open(path, 'rb') as f:
level_dict = pickle.load(f)
try:
level_dict[1]
term = list(level_dict[0][1].values())[0] # FIXME mmmm magic numbers
except KeyError:
term = os.path.basename(path).split('.')[0]
for level, (data, idn_dict) in level_dict.items():
row_ids = list(data.keys())
collapse_map, unames = make_collapse_map(resource_ids, resource_names)
matrix = construct_columns(data, row_ids, resource_ids, collapse_map)
csvs = ''
csvs += ',' + ','.join(unames) + '\n'
for row, id_ in zip(matrix, row_ids):
line = idn_dict[id_] + ',' + ','.join(str(i) for i in row) + '\n'
csvs += line
with open(os.path.dirname(path)+os.sep+term+' level %s.csv'%level, 'wt') as f:
f.write(csvs)
def disp_levels(level_dict, resource_ids, resource_names, ice=False): # TODO consider idn dict here?
term = list(level_dict[0][1].values())[0] # FIXME mmmm magic numbers
for level, (data, idn_dict) in level_dict.items():
row_ids = list(data.keys())
collapse_map, unames = make_collapse_map(resource_ids, resource_names)
matrix = construct_columns(data, row_ids, resource_ids, collapse_map)
div = compute_diversity(matrix)
order = np.argsort(div)
if ice:
order = order[::-1] #convert from trees to icecicles
discre = discretize(matrix[order])
row_names = []
for i in order:
#for rid in row_ids:
rid = row_ids[i]
name = idn_dict[rid]
row_names.append(name)
#out = compute_diversity(matrix)
#display_heatmap(discre, row_names, resource_names, '%s level %s'%(term, level))
display_heatmap(discre, row_names, unames, '%s level %s'%(term, level))
def acquire_data(save_loc='/tmp/'):
terms = 'hindbrain', 'midbrain', 'forebrain'
term_ids = 'birnlex_942', None, None
for term, term_id in zip(terms, term_ids):
levels = run_levels(term, 7, 'has_proper_part', 'subject', term_id=term_id) # TODO need to fix level 1 of this w/ the parts of the superior coliculus >_<
with open(save_loc+term+'.pickle','wb') as f:
pickle.dump(levels, f)
def acquire_nt_data(save_loc='/tmp/'):
terms = 'neurotransmitter',
term_ids = None,
for term, term_id in zip(terms, term_ids):
levels = run_levels(term, 0, 'subClassOf', 'object', term_id=term_id) # TODO need to fix level 1 of this w/ the parts of the superior coliculus >_<
with open(save_loc+term+'.pickle','wb') as f:
pickle.dump(levels, f)
return levels
def acquire_doa_data(save_loc='/tmp/'):
terms = 'drug of abuse',
term_ids = None,
for term, term_id in zip(terms, term_ids):
levels = run_levels(term, 2, 'subClassOf', 'object', term_id=term_id) # TODO need to fix level 1 of this w/ the parts of the superior coliculus >_<
with open(save_loc+term+'.pickle','wb') as f:
pickle.dump(levels, f)
return levels
def acquire_dis_data(save_loc='/tmp/'):
terms = 'nervous system disease',
term_ids = None,
for term, term_id in zip(terms, term_ids):
levels = run_levels(term, 5, 'subClassOf', 'object', term_id=term_id) # TODO need to fix level 1 of this w/ the parts of the superior coliculus >_<
with open(save_loc+term+'.pickle','wb') as f:
pickle.dump(levels, f)
return levels
def acquire_type_data(save_loc='/tmp/'):
terms = 'neuron',
term_ids = None,
for term, term_id in zip(terms, term_ids):
levels = run_levels(term, 5, 'subClassOf', 'object', term_id=term_id) # TODO need to fix level 1 of this w/ the parts of the superior coliculus >_<
with open(save_loc+term+'.pickle','wb') as f:
pickle.dump(levels, f)
return levels
def get_term_file_counts(term_file, name, save_loc='/tmp/'):
""" given a list of terms return the counts for each """
with open(term_file) as f:
lines = f.readlines()
terms = [line.rstrip('\n').rstrip('\r') for line in lines]
datas = {}
idns = {}
for term in terms:
print(term)
data, idn_dict = get_term_count_data(term, 0, 'subClassOf', 'subject', exclude_parents=True) # FIXME if level == 0 IGNORE ALL THE THINGS
datas.update(data)
idns.update(idn_dict)
level_dict = {0:(datas, idns)}
with open(save_loc+os.sep+name+'.pickle','wb') as f:
pickle.dump(level_dict, f)
return level_dict
def graph_data(paths, ice=False):
""" given a requisitely formatted dict in a pickle graph it """
nifids, nif_names = get_source_entity_nifids()
#terms = 'hindbrain', 'midbrain', 'forebrain', 'neurotransmitter', 'drug of abuse', 'species'
#terms = 'neurotransmitter',
#terms = 'drug of abuse',
#terms = 'species', #'neurotransmitter', 'drug of abuse'
#terms = 'nervous system disease',
#terms = 'auditory',
#for term in terms:
for path in paths:
#with open(load_loc+term+'.pickle','rb') as f:
with open(path,'rb') as f:
levels = pickle.load(f)
disp_levels(levels, nifids, nif_names, ice)
def graph_partonomy(paths, titles=None, flatten=False, figname='test'):
"""
Compute the diversity and graph terms ranked and color by level
"""
resource_ids, resource_names = get_source_entity_nifids()
mats = []
rns = []
#for term in terms:
for path in paths:
#with open(load_loc+term+'.pickle','rb') as f:
with open(path,'rb') as f:
level_dict = pickle.load(f)
term = list(level_dict[0][1].values())[0] # FIXME mmmm magic numbers
if flatten:
if len(level_dict.keys()) > 1: # 0 -> already flat
flat = {0:({},{})}
for level, (data, idn_dict) in level_dict.items():
flat[0][0].update(data)
flat[0][1].update(idn_dict)
level_dict = flat
levels = [0]
else:
levels = list(level_dict.keys())
levels.sort() #gurantee order
comp_div = []
comp_names = []
comp_levels = []
for level in levels:
data, idn_dict = level_dict[level]
row_ids = list(data.keys())
collapse_map, unames = make_collapse_map(resource_ids, resource_names)
matrix = construct_columns(data, row_ids, resource_ids, collapse_map)
div = compute_diversity(matrix)
order = np.argsort(div)
#if ice:
#order = order[::-1] # start high #NOPE trees better than ice
discre = discretize(matrix[order])
row_names = []
for i in order:
#for rid in row_ids:
rid = row_ids[i]
name = idn_dict[rid]
row_names.append(name)
mats.append(discre)
rns.append(row_names)
comp_div.extend(div)
comp_names.extend([idn_dict[rid] for rid in row_ids])
comp_levels.extend([level] * len(div))
display_div(comp_div, comp_names, comp_levels, term)
if titles == None:
#titles = [None]*(len(mats) - 1) + ['Brain partonomy']
#titles = [None]*(len(mats) - 1) + ['Diseases']
#titles = [None]*(len(mats) - 1) + ['Neuron types']
titles = [None]*len(mats)
hspace = 0
else:
hspace = .1
#display_grid(mats, rns, titles, unames, figname, hspace=hspace)
def make_legend():
fig = figure()
ax1 = fig.add_subplot()
ax2 = fig.add_subplot()
matrix = [None, 1, 2, 3]
img = ax1.imshow(matrix, interpolation='nearest', cmap=plt.cm.get_cmap('Greens'), aspect='auto', vmin=0, vmax=3)
ax1.barh(0,1,.5,'')
###
# Preliminary classifying, needs work... look at dipper for inspiration? or just write out the spec by hand
###
class BaseDatagetter:
"""
Datagetter classes should implement all the methods needed to do 3 things
1) retrieve the raw data and put it in structured form
2) sort the raw data
3) collapse the raw data
This class will actually RETRIEVE the data and save it so that it can be opperated on later
Often we will actually BUILD the collapse map or sort using only the raw data and thus
we will not need the functionality provided here.
NOTE: This class shall deal with REMOTE resources that CHANGE
NOTE: This class is what we will use to map DIVERSELY formatted data
into our common internal format, so make it good.
NOTE: one problem to consider is that this is going to assume that the "datum" indexed
is a scalar, but surely we can do better and make it work as long as the datatype is consistent
across all indecies (eg a bitmap, or a time series, or anything set of things that all produce valid
output from an arbitrary function f or set of functions)
NOTE: if you don't need an index and are just going to use 0...n then you probably can just use dg.get()
"""
def __init__(self):
self.indicies = []
self.get()
def get_indecies(self):
""" n should be number of the index. If not given auto? grrrr
or rather, this method should get ALL the indexes
This could read in from a text file or from the internet.
We reccomend defining the different functions as class methods
and then calling them from here.
"""
raise NotImplemented
# RULE the objects returned by the function that queries index_one
# should themselves contain denormalized references named by objects in index_two
index = ['hello','i','am','a','valid','index'] # TODO uniqueness??
self.indicies.append(index)
def get_collapse_map(self):
""" If you are going to sum the quantitative values across fields and there
is an external source for mapping those fields
"""
raise NotImplemented
def get_sort_map(self):
raise NotImplemented
def get(self):
""" Replace this method to define how to retrieve the data"""
raise NotImplemented
def _make_metadata(self):
""" Store standard metadata such as date and time """
pass
def store(self):
""" Store the retrieved results somehwere for now, we pickle
Consider also sqlite or postgress
"""
raise NotImplemented
class XMLDatagetter(BaseDatagetter):
def __init__(self, timeout=8):
self.timeout = timeout
def get_xml(self, url):
try:
self.resp = requests.get(url, timeout=self.timeout) # sometimes we need a longer timeout :/ FIXME :/ stateful?
except requests.exceptions.Timeout:
#TODO
pass
try:
self.xmlDoc = libxml2.parseDoc(resp.text)
except libxml2.parserError: # derp
#TODO
pass
def run_xpath(self, query):
self
class NIFSummary(BaseDatagetter):
def __init__(self):
pass
def get(self): #XXX I HAVE NO IDEA WHAT
pass
###
# Main
###
def main():
if args['--ipython']:
embed()
if args['--make-csv']:
export_csv(args['--make-csv'])
if args['--term-file']:
path = args['--term-file']
dirname = os.path.dirname(path)
filename = os.path.basename(path)
out = get_term_file_counts(args['--term-file'], filename, dirname)
embed()
if args['--pickle-file']:
path = args['--pickle-file']
graph_partonomy((path,)) # FIXME output naming
graph_data((path,),ice=args['--ice']) # FIXME output naming
#run_auditory_terms()
#aud_terms = 'auditory',
#graph_partonomy(terms=aud_terms, figname='Auditory Terms')
#graph_data() # FIXME
#acquire_data()
#out = acquire_nt_data()
#out = get_term_file_counts('/tmp/neurotransmitters','neurotransmitter') # FIXME NOTE: one thing to consider is how to deal to references to certain molecules which are NOT about its context as a neurotransmitter... THAT could be tricky
#out= acquire_doa_data()
#embed()
#out = get_term_file_counts('/tmp/blast_names','species') #TODO clean up names
#out = acquire_dis_data()
#graph_data()
#out = acquire_type_data()
#"""
#neu_terms = 'neuron',
#graph_partonomy(terms=neu_terms, figname='Neuron Types')
#dis_terms = 'nervous system disease',
#graph_partonomy(terms=dis_terms, figname='diseases')
#brain_terms = 'hindbrain', 'midbrain', 'forebrain',
#graph_partonomy(terms=brain_terms, figname='partonomy')
#other_terms = 'species', #'neurotransmitter', 'drug of abuse'
#other_titles = 'Species', #'Neurotransmitters', 'Drugs of abuse'
#graph_partonomy(terms=other_terms,flatten=True,titles=other_titles, figname='others') # species 227
#"""
if __name__ == "__main__":
main()
|
CheerfulTeapot/heatmaps
|
heatmaps.py
|
Python
|
mit
| 38,676
|
[
"NEURON"
] |
a434135cf19bfabda5886ba0651f99359c352bbd0801605643907261e3fc227a
|
from . import numdifftools, numpy as np
import pytest
from pyRSD.rsd.power.gal.derivatives import dPgal_dsigma_c
NMU = 41
@pytest.mark.parametrize("socorr", [True, False])
@pytest.mark.parametrize("fog_model", ['modified_lorentzian', 'lorentzian', 'gaussian'])
def test_partial(driver, socorr, fog_model):
# set the socorr and fog model
driver.theory.model.use_so_correction = socorr
driver.theory.model.fog_model = fog_model
model = driver.theory.model
# get the deriv arguments
k = driver.data.combined_k
mu = np.linspace(0., 1., NMU)
# broadcast to the right shape
k = k[:, np.newaxis]
mu = mu[np.newaxis, :]
k, mu = np.broadcast_arrays(k, mu)
k = k.ravel(order='F')
mu = mu.ravel(order='F')
pars = driver.theory.fit_params
args = (model, pars, k, mu)
# our derivative
x = dPgal_dsigma_c.eval(*args)
# numerical derivative
def f(x):
model.sigma_c = x
return driver.theory.model.power(k, mu)
g = numdifftools.Derivative(f, step=1e-3)
y = g(model.sigma_c)
# compare
np.testing.assert_allclose(x, y, rtol=1e-2)
|
nickhand/pyRSD
|
pyRSD/tests/test_gal_derivatives/test_sigma_c.py
|
Python
|
gpl-3.0
| 1,146
|
[
"Gaussian"
] |
4483b6d2a226b990f89dd25054b98513d1342388e3b25bb764bb7bba475d83f8
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015 Diamond Light Source <stuart.fisher@diamond.ac.uk>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implementation of Jon Diprose's ImageUploader in Python for
# formulatrix plate imagers, takes outputted xml / image files and
# puts them in the correct location, and adds an entry to SynchWeb
import json
import time
import glob
import re
import os
import sys
import atexit
import signal
import errno
import subprocess
import logging
import logging.handlers
import MySQLdb
from PIL import Image
from shutil import copyfile
import xml.etree.ElementTree as ET
class MySQL:
def __init__(self, user, pw, db, host='127.0.0.1'):
self._conn = MySQLdb.connect(host=host, user=user, passwd=pw, db=db)
self._conn.autocommit(1)
self._conn.ping(True)
self._cur = self._conn.cursor(MySQLdb.cursors.DictCursor)
def __del__(self):
if self._cur is not None:
self._cur.close()
if self._conn is not None:
self._conn.close()
def pq(self, query, args=[]):
res = self._cur.execute(query, args)
rows = []
for r in self._cur:
rows.append(r)
return rows if rows else []
def id(self):
return self._cur.connection.insert_id()
class FormulatrixUploader:
_running = True
def __init__(self, db=None, config=None):
self.db = db
self.config = config
for d in ['processed', 'nosample']:
if not os.path.exists(config['holding_dir']+'/'+d):
os.mkdir(config['holding_dir']+'/'+d)
def run(self):
while self._running:
files = glob.glob(self.config['holding_dir']+"/*EF*.xml")
for xml in files:
logging.getLogger().debug(xml)
st = os.stat(xml)
image = xml.replace('.xml', '.jpg')
if not os.path.exists(image):
logging.getLogger().error('Corresponding image not found for %s expected %s' % (str(xml), str(image)) )
continue
if time.time() - st.st_mtime > 10:
tree = ET.parse(xml)
root = tree.getroot()
# deal with xml namespace
ns = root.tag.split('}')[0].strip('{')
nss = { 'oppf': ns }
inspectionid = re.sub('\-.*', '', root.find('oppf:ImagingId', nss).text)
logging.getLogger().debug('inspection: %s' % str(inspectionid))
container = self._get_container(inspectionid)
if container is None:
continue
# Check if the visit dir exists yet
visit = container['visit']
proposal = visit[ : visit.index('-')]
new_root = '{root}/{proposal}/{visit}'.format(root=self.config['upload_dir'], proposal=proposal, visit=visit)
old_root = '{root}/{year}/{visit}'.format(root=self.config['upload_dir_old'], year=container['year'], visit=visit)
the_root = None
if os.path.exists(new_root):
the_root = new_root
elif os.path.exists(old_root):
the_root = old_root
else:
logging.getLogger().error('Visit location for image doesnt exist, tried %s and %s' % (new_root, old_root))
continue
# Keep images in visit/imaging/containerid/inspectionid
new_path = '{the_root}/imaging/{containerid}/{inspectionid}'.format(the_root=the_root, containerid=container['containerid'], inspectionid=inspectionid)
if not os.path.exists(new_path):
try:
os.makedirs(new_path)
if config['web_user']:
subprocess.call(['/usr/bin/setfacl', '-R', '-m', 'u:'+config['web_user']+':rwx', new_path]);
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(new_path):
pass
elif exc.errno == errno.EACCES:
logging.getLogger().error("%s - %s" % (exc.strerror, new_path))
continue
else:
raise
position = self._get_position(root.find('oppf:Drop', nss).text, container['containertype'])
if position is None:
logging.getLogger().error('Could not match drop: %s to position: %s' % (root.find('oppf:Drop', nss).text, container['containertype']) )
continue
logging.getLogger().debug('Drop: %s position: %s' % (root.find('oppf:Drop', nss).text, position))
sampleid = self._get_sampleid(position, container['containerid'])
if sampleid is None:
self._move_files(image, xml, 'nosample')
continue
mppx = float(root.find('oppf:SizeInMicrons', nss).find('oppf:Width', nss).text) / float(root.find('oppf:SizeInPixels', nss).find('oppf:Width', nss).text)
mppy = float(root.find('oppf:SizeInMicrons', nss).find('oppf:Height', nss).text) / float(root.find('oppf:SizeInPixels', nss).find('oppf:Height', nss).text)
db.pq("""INSERT INTO BLSampleImage (blsampleid, micronsperpixelx, micronsperpixely, containerinspectionid)
VALUES (%s,%s,%s,%s)""", [sampleid, mppx, mppy, inspectionid])
logging.getLogger().debug("INSERT INTO BLSampleImage "\
"(blsampleid, micronsperpixelx, micronsperpixely, containerinspectionid) "\
"VALUES (%s,%s,%s,%s)" % (str(sampleid), str(mppx), str(mppy), str(inspectionid)))
iid = db.id()
# Use blsampleimageid as file name as we are sure this is unique
new_file = '{path}/{iid}.jpg'.format(path=new_path, iid=iid)
db.pq("""UPDATE BLSampleImage set imagefullpath=%s WHERE blsampleimageid=%s""", [new_file, iid])
logging.getLogger().debug("UPDATE BLSampleImage set imagefullpath=%s WHERE blsampleimageid=%s" % (new_file, str(iid)))
# move image
logging.getLogger().debug('copy: %s to %s' % (image, new_file))
try:
copyfile(image, new_file)
# create a thumbnail
file, ext = os.path.splitext(new_file)
try:
im = Image.open(new_file)
im.thumbnail((config['thumb_width'], config['thumb_height']))
try:
im.save(file+'th'+ext)
except IOError as e:
logging.getLogger().error('Error saving image file %s' % file+'th'+ext)
# clear up - should be in a try ... except?
#self._move_files(image, xml, 'processed')
try:
os.unlink(image)
except IOError as e:
logging.getLogger().error('Error deleting image file %s' % image)
try:
os.unlink(xml)
except IOError as e:
logging.getLogger().error('Error deleting XML file %s' % xml)
except IOError as e:
logging.getLogger().error('Error opening image file %s' % new_file)
except IOError as e:
logging.getLogger().error('Error copying image file %s to %s' % (image, new_file))
logging.getLogger().debug('Sleeping until next iteration')
time.sleep(30)
def _move_files(self, image, xml, path):
for f in [image, xml]:
os.rename(f, f.replace(self.config['holding_dir'], self.config['holding_dir']+'/'+path))
logging.getLogger().debug('move %s %s' % (f, f.replace(self.config['holding_dir'], self.config['holding_dir']+'/'+path)))
def _get_container(self, inspectionid):
container = self.db.pq("""SELECT c.containertype, c.containerid, c.sessionid, CONCAT(CONCAT(CONCAT(p.proposalcode, p.proposalnumber), '-'), ses.visit_number) as visit, DATE_FORMAT(c.bltimestamp, '%%Y') as year
FROM Container c
INNER JOIN ContainerInspection ci ON ci.containerid = c.containerid
INNER JOIN Dewar d ON d.dewarid = c.dewarid
INNER JOIN Shipping s ON s.shippingid = d.shippingid
INNER JOIN Proposal p ON p.proposalid = s.proposalid
LEFT OUTER JOIN BLSession ses ON ses.sessionid = c.sessionid
WHERE ci.containerinspectionid=%s
LIMIT 1""", [inspectionid])
if not len(container):
logging.getLogger().error('Couldnt find container for inspectionid %s' % str(inspectionid))
return
logging.getLogger().debug(str(container))
if not container[0]['sessionid']:
logging.getLogger().error('Container %s has no sessionid. inspectionid is %s ' % (str(container[0]['containerid']), str(inspectionid)))
return
return container[0]
def _get_position(self, text_position, platetype):
well, drop = text_position.split('.')
drop = int(drop)
row = ord(well[0])-65
col = int(well[1:])-1
# Need to know what type of plate this is to know how many columns its got
# This should be in the database, currently in json format embedded in this collection:
# http://ispyb.diamond.ac.uk/beta/client/js/modules/shipment/collections/platetypes.js
if not platetype in self.config['types']:
logging.getLogger().error('Unknown plate type: %s' % platetype)
return
ty = self.config['types'][platetype]
# Position is a linear sequence left to right across the plate
return (ty['well_per_row']*row*ty['drops_per_well']) + (col*ty['drops_per_well']) + (drop-1) + 1
# Return a blsampleid from a position and containerid
def _get_sampleid(self, position, containerid):
sample = self.db.pq("""SELECT s.blsampleid, s.name, s.location
FROM BLSample s
INNER JOIN Container c ON c.containerid = s.containerid
WHERE s.location = %s AND c.containerid = %s
LIMIT 1""", [position, containerid])
if not len(sample):
logging.getLogger().error('Couldnt find a blsample for containerid: %s, position: %s', str(containerid), str(position))
return
logging.getLogger().debug(str(sample[0]))
return sample[0]['blsampleid']
def kill_handler(sig,frame):
hostname = os.uname()[1]
logging.getLogger().warning("%s: got SIGTERM on %s :-O" % (sys.argv[0], hostname))
logging.shutdown()
os._exit(-1)
def set_logging(logs):
levels_dict = {"debug" : logging.DEBUG, "info" : logging.INFO, "warning" : logging.WARNING, "error" : logging.ERROR, "critical" : logging.CRITICAL}
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
for log_name in logs:
handler = None
if log_name == "syslog":
handler = logging.handlers.SysLogHandler(address=(logs[log_name]['host'], logs[log_name]['port']))
elif log_name == "rotating_file":
handler = logging.handlers.RotatingFileHandler(filename=logs[log_name]['filename'], maxBytes=logs[log_name]['max_bytes'], backupCount=logs[log_name]['no_files'])
else:
sys.exit("Invalid logging mechanism defined in config: %s. (Valid options are syslog and rotating_file.)" % log_name)
handler.setFormatter(logging.Formatter(logs[log_name]['format']))
level = logs[log_name]['level']
if levels_dict[level]:
handler.setLevel(levels_dict[level])
else:
handler.setLevel(logging.WARNING)
logger.addHandler(handler)
cf = open('config.json')
config = json.load(cf)
cf.close()
set_logging(config['logging'])
try:
pid = os.fork()
except OSError, e:
logging.getLogger().error("Unable to fork, can't run as daemon in background")
sys.exit(1)
if pid != 0:
sys.exit()
# pid_file = config['pid_file']
# if pid_file != None:
# try:
# f = open(pid_file, 'w')
# f.write(str(os.getpid()))
# f.close()
# except:
# logging.getLogger().error("Unable to write to pid file %s" % pid_file)
atexit.register(logging.shutdown)
signal.signal(signal.SIGTERM, kill_handler)
db = MySQL(user=config['user'], pw=config['pw'], db=config['db'], host=config['host'])
uploader = FormulatrixUploader(db=db, config=config)
uploader.run()
|
tcspain/SynchWeb
|
api/formulatrix/uploader/formulatrix_uploader.py
|
Python
|
apache-2.0
| 13,823
|
[
"VisIt"
] |
78cf07f169bd5c9a78e516b4a3ae3d8111e32f2a87ba2a7bde69d3271e70827a
|
# -*- coding: utf-8 -*-
#
# Copyright (c), 2015-2016, Quantum Espresso Foundation and SISSA (Scuola
# Internazionale Superiore di Studi Avanzati). All rights reserved.
# This file is distributed under the terms of the MIT License. See the
# file 'LICENSE' in the root directory of the present distribution, or
# http://opensource.org/licenses/MIT.
# Authors: Davide Brunato
#
"""
This module contains XMLDocument class for xsdtypes package
"""
import logging
from xml.etree import ElementTree
from ..utils.logger import set_logger
logger = logging.getLogger('qespresso')
def etree_iter_path(node, tag=None, path='.'):
"""
A version of ElementTree node's iter function that return a couple
with node and his relative path.
:param node:
:param tag:
:param path:
:return:
"""
if tag == "*":
tag = None
if tag is None or node.tag == tag:
yield node, path
for child in node:
_child_path = '%s/%s' % (path, child.tag)
for child, child_path in etree_iter_path(child, tag, path=_child_path):
yield child, child_path
def etree_to_dict(etree, xml_schema, dict_class=dict, spaces_for_tab=4, use_defaults=True):
set_logger(1)
root_node = etree.getroot()
ret_dict = etree_node_to_dict(
root_node, xml_schema, dict_class=dict_class, spaces_for_tab=spaces_for_tab, use_defaults=use_defaults
)
set_logger(1)
return ret_dict
def etree_node_to_dict(root_node, xml_schema, root_path='.', dict_class=dict, spaces_for_tab=4, use_defaults=True):
def _etree_node_to_dict(node, node_path):
node_dict = dict_class()
logger.debug("Decode node '%s' with path '%s'" % (node.tag, node_path))
if node.attrib:
# if we have attributes, decode them
logger.debug("Decode attributes of element '{0}': {1}".format(node.tag, node.items()))
attr_dict = dict([
(attr, xml_schema.get_attribute_type(attr, node_path).decode(value))
for attr, value in node.items()
])
if use_defaults:
# Add default values for missing attributes
for attr in list(set(xml_schema.get_attributes(node_path)) - set(node.keys())):
default_value = xml_schema.get_attribute_default(attr, node_path)
if default_value is not None:
xsd_type = xml_schema.get_attribute_type(attr, node_path)
attr_dict[attr] = xsd_type.decode(default_value)
node_dict.update(attr_dict)
for child in node:
# recursively add the element's children
new_item = _etree_node_to_dict(child, node_path='%s/%s' % (node_path, child.tag))
if child.tag in node_dict:
# found duplicate tag, force a list
if isinstance(node_dict[child.tag], list):
# append to existing list
node_dict[child.tag].append(new_item)
else:
# convert to list
node_dict[child.tag] = [node_dict[child.tag], new_item]
else:
# only one, directly set the dictionary
node_dict[child.tag] = new_item
if node.text is None:
if use_defaults:
text = xml_schema.get_element_default(node_path) or ''
else:
text = ''
elif spaces_for_tab is None:
text = node.text.strip()
else:
text = node.text.strip().replace('\t', ' ' * spaces_for_tab)
# Get the XSD type for node's text decoding
xsd_type = xml_schema.get_element_type(node_path)
logger.debug("Decode '{0}' to type '{1}'".format(
' '.join(text.replace('\n', r'\n').replace('\t', ' ').split()), xsd_type.name)
)
if node_dict:
# if we have a dictionary add the text as a dictionary value (if there is any)
if len(text) > 0:
node_dict['_text'] = xsd_type.decode(text)
logger.debug("Text decoded to: {0}".format(node_dict['_text']))
else:
# if we don't have child nodes or attributes, just set the text
node_dict = xsd_type.decode(text)
logger.debug("Text decoded to: {0}".format(node_dict))
return node_dict
ret_dict = dict_class({root_node.tag: _etree_node_to_dict(root_node, root_path)})
return ret_dict
#
# Two functions to convert an element into a dictionary and back, an adaptation of code taken
# from http://code.activestate.com/recipes/573463-converting-xml-to-dictionary-and-back/.
#
def xml_to_dict(root_node, dict_class=dict, spaces_for_tab=4):
"""
Converts an XML ElementTree to a dictionary
:param root_node: Root node (Element) of the XML ElementTree
:param dict_class: Dictionary type subclass to create
:param spaces_for_tab: If not None, substitute tab characters with N spaces
:return: Dictionary representing the XML document
"""
def _element_to_dict(node):
node_dict = dict_class()
if len(node.items()) > 0:
# if we have attributes, set them
node_dict.update(dict(node.items()))
for child in node:
# recursively add the element's children
new_item = _element_to_dict(child)
if child.tag in node_dict:
# found duplicate tag, force a list
if isinstance(node_dict[child.tag], list):
# append to existing list
node_dict[child.tag].append(new_item)
else:
# convert to list
node_dict[child.tag] = [node_dict[child.tag], new_item]
else:
# only one, directly set the dictionary
node_dict[child.tag] = new_item
if node.text is None:
text = ''
elif spaces_for_tab is None:
text = node.text.strip()
else:
text = node.text.strip().replace('\t', ' ' * spaces_for_tab)
if node_dict:
# if we have a dictionary add the text as a dictionary value (if there is any)
if len(text) > 0:
node_dict['_text'] = text
else:
# if we don't have child nodes or attributes, just set the text
node_dict = text
return node_dict
# if not isinstance(root_node, etree.Element):
# raise TypeError('Expected ElementTree.Element')
return dict_class({root_node.tag: _element_to_dict(root_node)})
def dict_to_xml(xml_dict):
"""
Converts a dictionary into an XML ElementTree Element
"""
def _dict_to_element(parent, dictitem):
assert not isinstance(dictitem, list)
if isinstance(dictitem, dict):
for (tag, child) in dictitem.items():
if str(tag) == '_text':
parent.text = str(child)
elif isinstance(child, list):
# iterate through the array and convert
for listchild in child:
elem = ElementTree.Element(tag)
elem.tail = '\n'
parent.append(elem)
_dict_to_element(elem, listchild)
else:
elem = ElementTree.Element(tag)
elem.tail = '\n'
parent.append(elem)
_dict_to_element(elem, child)
else:
parent.text = str(dictitem)
root_tag = xml_dict.keys()[0]
root_element = ElementTree.Element(root_tag)
_dict_to_element(root_element, xml_dict[root_tag])
return root_element
|
afonari/q-e_schrodinger
|
bin/qexsd/qespresso/xsdtypes/etree.py
|
Python
|
gpl-2.0
| 7,763
|
[
"Quantum ESPRESSO"
] |
f0d6d2efea46ca3aaf5e56e0cd5879b39d318fcdd3f1f76c45c086e951a05bf3
|
import copy
import logging
from typing import List, Dict, Iterator, Tuple, Iterable, Optional
from randovania.game_description.game_patches import GamePatches
from randovania.game_description.requirements import Requirement, RequirementAnd
from randovania.game_description.resources.pickup_index import PickupIndex
from randovania.game_description.resources.resource_database import ResourceDatabase
from randovania.game_description.resources.resource_info import CurrentResources
from randovania.game_description.world.area import Area
from randovania.game_description.world.area_identifier import AreaIdentifier
from randovania.game_description.world.dock import DockLockType
from randovania.game_description.world.node import Node, DockNode, TeleporterNode, PickupNode, PlayerShipNode
from randovania.game_description.world.node_identifier import NodeIdentifier
from randovania.game_description.world.world import World
class WorldList:
worlds: List[World]
_nodes_to_area: Dict[Node, Area]
_nodes_to_world: Dict[Node, World]
_nodes: Optional[Tuple[Node, ...]]
_pickup_index_to_node: Dict[PickupIndex, PickupNode]
def __deepcopy__(self, memodict):
return WorldList(
worlds=copy.deepcopy(self.worlds, memodict),
)
def __init__(self, worlds: List[World]):
self.worlds = worlds
self._nodes = None
def _refresh_node_cache(self):
self._nodes_to_area, self._nodes_to_world = _calculate_nodes_to_area_world(self.worlds)
self._nodes = tuple(self._iterate_over_nodes())
self._pickup_index_to_node = {
node.pickup_index: node
for node in self._nodes
if isinstance(node, PickupNode)
}
def ensure_has_node_cache(self):
if self._nodes is None:
self._refresh_node_cache()
def invalidate_node_cache(self):
self._nodes = None
def _iterate_over_nodes(self) -> Iterator[Node]:
for world in self.worlds:
yield from world.all_nodes
def world_with_name(self, world_name: str) -> World:
for world in self.worlds:
if world.name == world_name or world.dark_name == world_name:
return world
raise KeyError("Unknown name: {}".format(world_name))
def world_with_area(self, area: Area) -> World:
for world in self.worlds:
if area in world.areas:
return world
raise KeyError("Unknown area: {}".format(area))
def identifier_for_node(self, node: Node) -> NodeIdentifier:
world = self.nodes_to_world(node)
area = self.nodes_to_area(node)
return NodeIdentifier.create(world.name, area.name, node.name)
@property
def all_areas(self) -> Iterator[Area]:
for world in self.worlds:
yield from world.areas
@property
def all_nodes(self) -> Tuple[Node, ...]:
self.ensure_has_node_cache()
return self._nodes
@property
def num_pickup_nodes(self) -> int:
return sum(1 for node in self.all_nodes if isinstance(node, PickupNode))
@property
def all_worlds_areas_nodes(self) -> Iterable[Tuple[World, Area, Node]]:
for world in self.worlds:
for area in world.areas:
for node in area.nodes:
yield world, area, node
def world_name_from_area(self, area: Area, distinguish_dark_aether: bool = False) -> str:
world = self.world_with_area(area)
if distinguish_dark_aether:
return world.correct_name(area.in_dark_aether)
else:
return world.name
def world_name_from_node(self, node: Node, distinguish_dark_aether: bool = False) -> str:
return self.world_name_from_area(self.nodes_to_area(node), distinguish_dark_aether)
def area_name(self, area: Area, separator: str = " - ", distinguish_dark_aether: bool = True) -> str:
return "{}{}{}".format(
self.world_name_from_area(area, distinguish_dark_aether),
separator,
area.name)
def node_name(self, node: Node, with_world=False, distinguish_dark_aether: bool = False) -> str:
prefix = "{}/".format(self.world_name_from_node(node, distinguish_dark_aether)) if with_world else ""
return "{}{}/{}".format(prefix, self.nodes_to_area(node).name, node.name)
def nodes_to_world(self, node: Node) -> World:
self.ensure_has_node_cache()
return self._nodes_to_world[node]
def nodes_to_area(self, node: Node) -> Area:
self.ensure_has_node_cache()
return self._nodes_to_area[node]
def resolve_dock_node(self, node: DockNode, patches: GamePatches) -> Optional[Node]:
connection = patches.dock_connection.get(self.identifier_for_node(node),
node.default_connection)
if connection is not None:
return self.node_by_identifier(connection)
def resolve_teleporter_node(self, node: TeleporterNode, patches: GamePatches) -> Optional[Node]:
connection = patches.elevator_connection.get(self.identifier_for_node(node),
node.default_connection)
if connection is not None:
return self.resolve_teleporter_connection(connection)
def resolve_teleporter_connection(self, connection: AreaIdentifier) -> Node:
area = self.area_by_area_location(connection)
if area.default_node is None:
raise IndexError("Area '{}' does not have a default_node".format(area.name))
node = area.node_with_name(area.default_node)
if node is None:
raise IndexError("Area '{}' default_node ({}) is missing".format(area.name, area.default_node))
return node
def connections_from(self, node: Node, patches: GamePatches) -> Iterator[Tuple[Node, Requirement]]:
"""
Queries all nodes from other areas you can go from a given node. Aka, doors and teleporters
:param patches:
:param node:
:return: Generator of pairs Node + Requirement for going to that node
"""
if isinstance(node, DockNode):
try:
target_node = self.resolve_dock_node(node, patches)
if target_node is None:
return
forward_weakness = patches.dock_weakness.get(self.identifier_for_node(node),
node.default_dock_weakness)
requirement = forward_weakness.requirement
# TODO: only add requirement if the blast shield has not been destroyed yet
if isinstance(target_node, DockNode):
# TODO: Target node is expected to be a dock. Should this error?
back_weakness = patches.dock_weakness.get(self.identifier_for_node(target_node),
target_node.default_dock_weakness)
if back_weakness.lock_type == DockLockType.FRONT_BLAST_BACK_BLAST:
requirement = RequirementAnd([requirement, back_weakness.requirement])
elif back_weakness.lock_type == DockLockType.FRONT_BLAST_BACK_IMPOSSIBLE:
# FIXME: this should check if we've already openend the back
if back_weakness != forward_weakness:
requirement = Requirement.impossible()
yield target_node, requirement
except ValueError:
# TODO: fix data to not having docks pointing to nothing
yield None, Requirement.impossible()
if isinstance(node, TeleporterNode):
try:
target_node = self.resolve_teleporter_node(node, patches)
if target_node is not None:
yield target_node, Requirement.trivial()
except IndexError:
# TODO: fix data to not have teleporters pointing to areas with invalid default_node_index
logging.error("Teleporter is broken!", node)
yield None, Requirement.impossible()
if isinstance(node, PlayerShipNode):
for other_node in self.all_nodes:
if isinstance(other_node, PlayerShipNode) and other_node != node:
yield other_node, other_node.is_unlocked
def area_connections_from(self, node: Node) -> Iterator[Tuple[Node, Requirement]]:
"""
Queries all nodes from the same area you can go from a given node.
:param node:
:return: Generator of pairs Node + Requirement for going to that node
"""
area = self.nodes_to_area(node)
for target_node, requirements in area.connections[node].items():
yield target_node, requirements
def potential_nodes_from(self, node: Node, patches: GamePatches) -> Iterator[Tuple[Node, Requirement]]:
"""
Queries all nodes you can go from a given node, checking doors, teleporters and other nodes in the same area.
:param node:
:param patches:
:return: Generator of pairs Node + Requirement for going to that node
"""
yield from self.connections_from(node, patches)
yield from self.area_connections_from(node)
def patch_requirements(self, static_resources: CurrentResources, damage_multiplier: float,
database: ResourceDatabase) -> None:
"""
Patches all Node connections, assuming the given resources will never change their quantity.
This is removes all checking for tricks and difficulties in runtime since these never change.
All damage requirements are multiplied by the given multiplier.
:param static_resources:
:param damage_multiplier:
:param database:
:return:
"""
for world in self.worlds:
for area in world.areas:
for node in area.nodes:
if isinstance(node, DockNode):
requirement = node.default_dock_weakness.requirement
object.__setattr__(node.default_dock_weakness, "requirement",
requirement.patch_requirements(static_resources,
damage_multiplier,
database).simplify())
for connections in area.connections.values():
for target, value in connections.items():
connections[target] = value.patch_requirements(
static_resources, damage_multiplier, database).simplify()
def node_by_identifier(self, identifier: NodeIdentifier) -> Node:
area = self.area_by_area_location(identifier.area_location)
node = area.node_with_name(identifier.node_name)
if node is not None:
return node
raise ValueError(f"No node with name {identifier.node_name} found in {area}")
def area_by_area_location(self, location: AreaIdentifier) -> Area:
return self.world_and_area_by_area_identifier(location)[1]
def world_by_area_location(self, location: AreaIdentifier) -> World:
return self.world_with_name(location.world_name)
def world_and_area_by_area_identifier(self, identifier: AreaIdentifier) -> tuple[World, Area]:
world = self.world_with_name(identifier.world_name)
area = world.area_by_name(identifier.area_name)
return world, area
def identifier_for_area(self, area: Area) -> AreaIdentifier:
world = self.world_with_area(area)
return AreaIdentifier(world_name=world.name, area_name=area.name)
def node_to_area_location(self, node: Node) -> AreaIdentifier:
return AreaIdentifier(
world_name=self.nodes_to_world(node).name,
area_name=self.nodes_to_area(node).name,
)
def node_from_pickup_index(self, index: PickupIndex) -> PickupNode:
self.ensure_has_node_cache()
return self._pickup_index_to_node[index]
def add_new_node(self, area: Area, node: Node):
self.ensure_has_node_cache()
self._nodes_to_area[node] = area
self._nodes_to_world[node] = self.world_with_area(area)
def _calculate_nodes_to_area_world(worlds: Iterable[World]):
nodes_to_area = {}
nodes_to_world = {}
for world in worlds:
for area in world.areas:
for node in area.nodes:
if node in nodes_to_area:
raise ValueError(
"Trying to map {} to {}, but already mapped to {}".format(
node, area, nodes_to_area[node]))
nodes_to_area[node] = area
nodes_to_world[node] = world
return nodes_to_area, nodes_to_world
|
henriquegemignani/randovania
|
randovania/game_description/world/world_list.py
|
Python
|
gpl-3.0
| 12,975
|
[
"BLAST"
] |
900d5ff87b7c3598c1d838c2d7697760eb8b4c754ea8663a24eb3fd60940658e
|
from nanopore.mappers.lastz import Lastz
import pysam
import os
from nanopore.analyses.utils import pathToBaseNanoporeDir
class LastzParams(Lastz):
def run(self):
#scoreFile = os.path.join(pathToBaseNanoporeDir(), "nanopore", "mappers", "last_em_575_M13_2D_scores.txt")
#Lastz.run(self, args="--hspthresh=1200 --gappedthresh=1500 --seed=match12 --scores=%s" % scoreFile)
Lastz.run(self, args="--hspthresh=1800 --gap=100,100")
class LastzParamsChain(LastzParams):
def run(self):
LastzParams.run(self)
self.chainSamFile()
class LastzParamsRealign(LastzParams):
def run(self):
LastzParams.run(self)
self.realignSamFile()
class LastzParamsRealignEm(LastzParams):
def run(self):
LastzParams.run(self)
self.realignSamFile(doEm=True)
class LastzParamsRealignTrainedModel(LastzParams):
def run(self):
LastzParams.run(self)
self.realignSamFile(useTrainedModel=True)
|
mitenjain/nanopore
|
nanopore/mappers/lastzParams.py
|
Python
|
mit
| 1,004
|
[
"pysam"
] |
75179e2a04c5b6c8bc34e08d3d03358dc29308c620ca959fea2a92253b2c1b24
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: svc
author:
- Brian Coca (@bcoca)
version_added: "1.9"
short_description: Manage daemontools services
description:
- Controls daemontools services on remote hosts using the svc utility.
options:
name:
description:
- Name of the service to manage.
required: true
state:
description:
- C(Started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
svc (svc -t) and C(killed) will always bounce the svc (svc -k).
C(reloaded) will send a sigusr1 (svc -1).
C(once) will run a normally downed svc once (svc -o), not really
an idempotent operation.
choices: [ killed, once, reloaded, restarted, started, stopped ]
downed:
description:
- Should a 'down' file exist or not, if it exists it disables auto startup.
Defaults to no. Downed does not imply stopped.
type: bool
default: 'no'
enabled:
description:
- Whether the service is enabled or not, if disabled it also implies stopped.
Take note that a service can be enabled and downed (no auto restart).
type: bool
service_dir:
description:
- Directory svscan watches for services
default: /service
service_src:
description:
- Directory where services are defined, the source of symlinks to service_dir.
'''
EXAMPLES = '''
- name: Start svc dnscache, if not running
svc:
name: dnscache
state: started
- name: Stop svc dnscache, if running
svc:
name: dnscache
state: stopped
- name: Kill svc dnscache, in all cases
svc:
name: dnscache
state: killed
- name: Restart svc dnscache, in all cases
svc:
name: dnscache
state: restarted
- name: Reload svc dnscache, in all cases
svc:
name: dnscache
state: reloaded
- name: Using alternative svc directory location
svc:
name: dnscache
state: reloaded
service_dir: /var/service
'''
import os
import re
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Svc(object):
"""
Main class that handles daemontools, can be subclassed and overridden in case
we want to use a 'derivative' like encore, s6, etc
"""
# def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = ['/command', '/usr/local/bin']
self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.downed = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths)
self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([self.service_dir, self.name])
self.src_full = '/'.join([self.service_src, self.name])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.downed = os.path.lexists('%s/down' % self.svc_full)
self.get_status()
else:
self.downed = os.path.lexists('%s/down' % self.src_full)
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError as e:
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
try:
os.unlink(self.svc_full)
except OSError as e:
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
self.execute_command([self.svc_cmd, '-dx', self.src_full])
src_log = '%s/log' % self.src_full
if os.path.exists(src_log):
self.execute_command([self.svc_cmd, '-dx', src_log])
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
m = re.search(r'\(pid (\d+)\)', out)
if m:
self.pid = m.group(1)
m = re.search(r'(\d+) seconds', out)
if m:
self.duration = m.group(1)
if re.search(' up ', out):
self.state = 'start'
elif re.search(' down ', out):
self.state = 'stopp'
else:
self.state = 'unknown'
return
if re.search(' want ', out):
self.state += 'ing'
else:
self.state += 'ed'
def start(self):
return self.execute_command([self.svc_cmd, '-u', self.svc_full])
def stopp(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, '-d', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, '-o', self.svc_full])
def reload(self):
return self.execute_command([self.svc_cmd, '-1', self.svc_full])
def restart(self):
return self.execute_command([self.svc_cmd, '-t', self.svc_full])
def kill(self):
return self.execute_command([self.svc_cmd, '-k', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception as e:
self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc())
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
downed=dict(type='bool'),
dist=dict(type='str', default='daemontools'),
service_dir=dict(type='str', default='/service'),
service_src=dict(type='str', default='/etc/service'),
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
state = module.params['state']
enabled = module.params['enabled']
downed = module.params['downed']
svc = Svc(module)
changed = False
orig_state = svc.report()
if enabled is not None and enabled != svc.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
svc.enable()
else:
svc.disable()
except (OSError, IOError) as e:
module.fail_json(msg="Could not change service link: %s" % to_native(e))
if state is not None and state != svc.state:
changed = True
if not module.check_mode:
getattr(svc, state[:-2])()
if downed is not None and downed != svc.downed:
changed = True
if not module.check_mode:
d_file = "%s/down" % svc.svc_full
try:
if downed:
open(d_file, "a").close()
else:
os.unlink(d_file)
except (OSError, IOError) as e:
module.fail_json(msg="Could not change downed file: %s " % (to_native(e)))
module.exit_json(changed=changed, svc=svc.report())
if __name__ == '__main__':
main()
|
shepdelacreme/ansible
|
lib/ansible/modules/system/svc.py
|
Python
|
gpl-3.0
| 9,270
|
[
"Brian"
] |
dd245a4cf513beb2b3838adb9a82ecb357c051e5da4543bcd2577211755b9988
|
#!/usr/bin/env python
""" update local cfg
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from DIRAC.Core.Base import Script
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s [options] ... DB ...' % Script.scriptName,
'Arguments:',
' setup: Name of the build setup (mandatory)']))
Script.parseCommandLine()
args = Script.getPositionalArgs()
# Setup the DFC
#
# DataManagement
# {
# Production
# {
# Services
# {
# FileCatalog
# {
# DirectoryManager = DirectoryClosure
# FileManager = FileManagerPS
# SecurityManager = FullSecurityManager
# }
# }
# Databases
# {
# FileCatalogDB
# {
# DBName = FileCatalogDB
# }
# }
# }
# }
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
csAPI = CSAPI()
for sct in ['Systems/DataManagement/Production/Services',
'Systems/DataManagement/Production/Services/FileCatalog',
'Systems/DataManagement/Production/Services/MultiVOFileCatalog']:
res = csAPI.createSection(sct)
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.setOption('Systems/DataManagement/Production/Services/FileCatalog/DirectoryManager', 'DirectoryClosure')
csAPI.setOption('Systems/DataManagement/Production/Services/FileCatalog/FileManager', 'FileManagerPs')
csAPI.setOption('Systems/DataManagement/Production/Services/FileCatalog/SecurityManager', 'VOMSSecurityManager')
csAPI.setOption('Systems/DataManagement/Production/Services/FileCatalog/UniqueGUID', True)
csAPI.setOption('Systems/DataManagement/Production/Services/MultiVOFileCatalog/DirectoryManager', 'DirectoryClosure')
csAPI.setOption('Systems/DataManagement/Production/Services/MultiVOFileCatalog/FileManager', 'FileManagerPs')
csAPI.setOption('Systems/DataManagement/Production/Services/MultiVOFileCatalog/SecurityManager', 'NoSecurityManager')
csAPI.setOption('Systems/DataManagement/Production/Services/MultiVOFileCatalog/UniqueGUID', True)
# configure MultiVO metadata related options:
res = csAPI.setOption(
'Systems/DataManagement/Production/Services/MultiVOFileCatalog/FileMetadata',
'MultiVOFileMetadata')
if not res['OK']:
print(res['Message'])
exit(1)
res = csAPI.setOption(
'Systems/DataManagement/Production/Services/MultiVOFileCatalog/DirectoryMetadata',
'MultiVODirectoryMetadata')
if not res['OK']:
print(res['Message'])
exit(1)
csAPI.commit()
|
yujikato/DIRAC
|
tests/Jenkins/dirac-cfg-update-services.py
|
Python
|
gpl-3.0
| 2,642
|
[
"DIRAC"
] |
5ab12c536bc4ddb12c0848de645e981ef59655aae80afb9df5e72689ed23e14e
|
#===============================================================================
#
# SpecificPopulation.py
#
# This file is part of ANNarchy.
#
# Copyright (C) 2013-2016 Julien Vitay <julien.vitay@gmail.com>,
# Helge Uelo Dinkelbach <helge.dinkelbach@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ANNarchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#===============================================================================
from ANNarchy.core.Population import Population
from ANNarchy.core.Neuron import Neuron
import ANNarchy.core.Global as Global
import numpy as np
class SpecificPopulation(Population):
"""
Interface class for user-defined definition of Population objects. An inheriting
class need to override the implementor functions _generate_[paradigm], otherwise
a NotImplementedError exception will be thrown.
"""
def __init__(self, geometry, neuron, name=None, copied=False):
"""
Initialization, receive default arguments of Population objects.
"""
Population.__init__(self, geometry=geometry, neuron=neuron, name=name, stop_condition=None, storage_order='post_to_pre', copied=copied)
def _generate(self):
"""
Overridden method of Population, called during the code generation process.
This function selects dependent on the chosen paradigm the correct implementor
functions defined by the user.
"""
if Global.config['paradigm'] == "openmp":
if Global.config["num_threads"] == 1:
self._generate_st()
else:
self._generate_omp()
elif Global.config['paradigm'] == "cuda":
self._generate_cuda()
else:
raise NotImplementedError
def _generate_st(self):
"""
Intended to be overridden by child class. Implememt code adjustments intended for single thread.
"""
raise NotImplementedError
def _generate_omp(self):
"""
Intended to be overridden by child class. Implememt code adjustments intended for openMP paradigm.
"""
raise NotImplementedError
def _generate_cuda(self):
"""
Intended to be overridden by child class. Implememt code adjustments intended for single thread and openMP paradigm.
"""
raise NotImplementedError
class PoissonPopulation(SpecificPopulation):
"""
Population of spiking neurons following a Poisson distribution.
**Case 1:** Input population
Each neuron of the population will randomly emit spikes, with a mean firing rate defined by the *rates* argument.
The mean firing rate in Hz can be a fixed value for all neurons:
```python
pop = PoissonPopulation(geometry=100, rates=100.0)
```
but it can be modified later as a normal parameter:
```python
pop.rates = np.linspace(10, 150, 100)
```
It is also possible to define a temporal equation for the rates, by passing a string to the argument:
```python
pop = PoissonPopulation(
geometry=100,
rates="100.0 * (1.0 + sin(2*pi*t/1000.0) )/2.0"
)
```
The syntax of this equation follows the same structure as neural variables.
It is also possible to add parameters to the population which can be used in the equation of `rates`:
```python
pop = PoissonPopulation(
geometry=100,
parameters = '''
amp = 100.0
frequency = 1.0
''',
rates="amp * (1.0 + sin(2*pi*frequency*t/1000.0) )/2.0"
)
```
**Note:** The preceding definition is fully equivalent to the definition of this neuron:
```python
poisson = Neuron(
parameters = '''
amp = 100.0
frequency = 1.0
''',
equations = '''
rates = amp * (1.0 + sin(2*pi*frequency*t/1000.0) )/2.0
p = Uniform(0.0, 1.0) * 1000.0 / dt
''',
spike = '''
p < rates
'''
)
```
The refractory period can also be set, so that a neuron can not emit two spikes too close from each other.
**Case 2:** Hybrid population
If the ``rates`` argument is not set, the population can be used as an interface from a rate-coded population.
The ``target`` argument specifies which incoming projections will be summed to determine the instantaneous firing rate of each neuron.
See the example in ``examples/hybrid/Hybrid.py`` for a usage.
"""
def __init__(self, geometry, name=None, rates=None, target=None, parameters=None, refractory=None, copied=False):
"""
:param geometry: population geometry as tuple.
:param name: unique name of the population (optional).
:param rates: mean firing rate of each neuron. It can be a single value (e.g. 10.0) or an equation (as string).
:param target: the mean firing rate will be the weighted sum of inputs having this target name (e.g. "exc").
:param parameters: additional parameters which can be used in the *rates* equation.
:param refractory: refractory period in ms.
"""
if rates is None and target is None:
Global._error('A PoissonPopulation must define either rates or target.')
self.target = target
self.parameters = parameters
self.refractory_init = refractory
self.rates_init = rates
if target is not None: # hybrid population
# Create the neuron
poisson_neuron = Neuron(
parameters = """
%(params)s
""" % {'params': parameters if parameters else ''},
equations = """
rates = sum(%(target)s)
p = Uniform(0.0, 1.0) * 1000.0 / dt
_sum_%(target)s = 0.0
""" % {'target': target},
spike = """
p < rates
""",
refractory=refractory,
name="Hybrid",
description="Hybrid spiking neuron emitting spikes according to a Poisson distribution at a frequency determined by the weighted sum of inputs."
)
elif isinstance(rates, str):
# Create the neuron
poisson_neuron = Neuron(
parameters = """
%(params)s
""" % {'params': parameters if parameters else ''},
equations = """
rates = %(rates)s
p = Uniform(0.0, 1.0) * 1000.0 / dt
_sum_exc = 0.0
""" % {'rates': rates},
spike = """
p < rates
""",
refractory=refractory,
name="Poisson",
description="Spiking neuron with spikes emitted according to a Poisson distribution."
)
elif isinstance(rates, np.ndarray):
poisson_neuron = Neuron(
parameters = """
rates = 10.0
""",
equations = """
p = Uniform(0.0, 1.0) * 1000.0 / dt
""",
spike = """
p < rates
""",
refractory=refractory,
name="Poisson",
description="Spiking neuron with spikes emitted according to a Poisson distribution."
)
else:
poisson_neuron = Neuron(
parameters = """
rates = %(rates)s
""" % {'rates': rates},
equations = """
p = Uniform(0.0, 1.0) * 1000.0 / dt
""",
spike = """
p < rates
""",
refractory=refractory,
name="Poisson",
description="Spiking neuron with spikes emitted according to a Poisson distribution."
)
SpecificPopulation.__init__(self, geometry=geometry, neuron=poisson_neuron, name=name, copied=copied)
if isinstance(rates, np.ndarray):
self.rates = rates
def _copy(self):
"Returns a copy of the population when creating networks."
return PoissonPopulation(self.geometry, name=self.name, rates=self.rates_init, target=self.target, parameters=self.parameters, refractory=self.refractory_init, copied=True)
def _generate_st(self):
"""
Generate single thread code.
We don't need any separate code snippets. All is done during the
normal code generation path.
"""
pass
def _generate_omp(self):
"""
Generate openMP code.
We don't need any separate code snippets. All is done during the
normal code generation path.
"""
pass
def _generate_cuda(self):
"""
Generate CUDA code.
We don't need any separate code snippets. All is done during the
normal code generation path.
"""
pass
class TimedArray(SpecificPopulation):
"""
Data structure holding sequential inputs for a rate-coded network.
The input values are stored in the (recordable) attribute `r`, without any further processing.
You will need to connect this population to another one using the ``connect_one_to_one()`` method.
By default, the firing rate of this population will iterate over the different values step by step:
```python
inputs = np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
]
)
inp = TimedArray(rates=inputs)
pop = Population(10, ...)
proj = Projection(inp, pop, 'exc')
proj.connect_one_to_one(1.0)
compile()
simulate(10.)
```
This creates a population of 10 neurons whose activity will change during the first 10*dt milliseconds of the simulation. After that delay, the last input will be kept (i.e. 1 for the last neuron).
If you want the TimedArray to "loop" over the different input vectors, you can specify a period for the inputs:
```python
inp = TimedArray(rates=inputs, period=10.)
```
If the period is smaller than the length of the rates, the last inputs will not be set.
If you do not want the inputs to be set at every step, but every 10 ms for example, youcan use the ``schedule`` argument:
```python
inp = TimedArray(rates=inputs, schedule=10.)
```
The input [1, 0, 0,...] will stay for 10 ms, then[0, 1, 0, ...] for the next 10 ms, etc...
If you need a less regular schedule, you can specify it as a list of times:
```python
inp = TimedArray(rates=inputs, schedule=[10., 20., 50., 60., 100., 110.])
```
The first input is set at t = 10 ms (r = 0.0 in the first 10 ms), the second at t = 20 ms, the third at t = 50 ms, etc.
If you specify less times than in the array of rates, the last ones will be ignored.
Scheduling can be combined with periodic cycling. Note that you can use the ``reset()`` method to manually reinitialize the TimedArray, times becoming relative to that call:
```python
simulate(100.) # ten inputs are shown with a schedule of 10 ms
inp.reset()
simulate(100.) # the same ten inputs are presented again.
```
"""
def __init__(self, rates, schedule=0., period= -1., name=None, copied=False):
"""
:param rates: array of firing rates. The first axis corresponds to time, the others to the desired dimensions of the population.
:param schedule: either a single value or a list of time points where inputs should be set. Default: every timestep.
:param period: time when the timed array will be reset and start again, allowing cycling over the inputs. Default: no cycling (-1.).
"""
neuron = Neuron(
parameters="",
equations=" r = 0.0",
name="Timed Array",
description="Timed array source."
)
# Geometry of the population
geometry = rates.shape[1:]
# Check the schedule
if isinstance(schedule, (int, float)):
if float(schedule) <= 0.0:
schedule = Global.config['dt']
schedule = [ float(schedule*i) for i in range(rates.shape[0])]
if len(schedule) > rates.shape[0]:
Global._error('TimedArray: the length of the schedule parameter cannot exceed the first dimension of the rates parameter.')
if len(schedule) < rates.shape[0]:
Global._warning('TimedArray: the length of the schedule parameter is smaller than the first dimension of the rates parameter (more data than time points). Make sure it is what you expect.')
SpecificPopulation.__init__(self, geometry=geometry, neuron=neuron, name=name, copied=copied)
self.init['schedule'] = schedule
self.init['rates'] = rates
self.init['period'] = period
def _copy(self):
"Returns a copy of the population when creating networks."
return TimedArray(self.init['rates'] , self.init['schedule'], self.init['period'], self.name, copied=True)
def _generate_st(self):
"""
adjust code templates for the specific population for single thread and openMP.
"""
self._specific_template['declare_additional'] = """
// Custom local parameters of a TimedArray
std::vector< int > _schedule; // List of times where new inputs should be set
std::vector< std::vector< %(float_prec)s > > _buffer; // buffer holding the data
int _period; // Period of cycling
long int _t; // Internal time
int _block; // Internal block when inputs are set not at each step
""" % {'float_prec': Global.config['precision']}
self._specific_template['access_additional'] = """
// Custom local parameters of a TimedArray
void set_schedule(std::vector<int> schedule) { _schedule = schedule; }
std::vector<int> get_schedule() { return _schedule; }
void set_buffer(std::vector< std::vector< %(float_prec)s > > buffer) { _buffer = buffer; r = _buffer[0]; }
std::vector< std::vector< %(float_prec)s > > get_buffer() { return _buffer; }
void set_period(int period) { _period = period; }
int get_period() { return _period; }
""" % {'float_prec': Global.config['precision']}
self._specific_template['init_additional'] = """
// Initialize counters
_t = 0;
_block = 0;
_period = -1;
"""
self._specific_template['export_additional'] = """
# Custom local parameters of a TimedArray
void set_schedule(vector[int])
vector[int] get_schedule()
void set_buffer(vector[vector[%(float_prec)s]])
vector[vector[%(float_prec)s]] get_buffer()
void set_period(int)
int get_period()
""" % {'float_prec': Global.config['precision']}
self._specific_template['reset_additional'] ="""
_t = 0;
_block = 0;
r.clear();
r = std::vector<%(float_prec)s>(size, 0.0);
""" % {'float_prec': Global.config['precision']}
self._specific_template['wrapper_access_additional'] = """
# Custom local parameters of a TimedArray
cpdef set_schedule( self, schedule ):
pop%(id)s.set_schedule( schedule )
cpdef np.ndarray get_schedule( self ):
return np.array(pop%(id)s.get_schedule( ))
cpdef set_rates( self, buffer ):
pop%(id)s.set_buffer( buffer )
cpdef np.ndarray get_rates( self ):
return np.array(pop%(id)s.get_buffer( ))
cpdef set_period( self, period ):
pop%(id)s.set_period(period)
cpdef int get_period(self):
return pop%(id)s.get_period()
""" % { 'id': self.id }
self._specific_template['update_variables'] = """
if(_active){
//std::cout << _t << " " << _block<< " " << _schedule[_block] << std::endl;
// Check if it is time to set the input
if(_t == _schedule[_block]){
// Set the data
r = _buffer[_block];
// Move to the next block
_block++;
// If was the last block, go back to the first block
if (_block == _schedule.size()){
_block = 0;
}
}
// If the timedarray is periodic, check if we arrive at that point
if(_period > -1 && (_t == _period-1)){
// Reset the counters
_block=0;
_t = -1;
}
// Always increment the internal time
_t++;
}
"""
self._specific_template['size_in_bytes'] = """
// schedule
size_in_bytes += _schedule.capacity() * sizeof(int);
// buffer
size_in_bytes += _buffer.capacity() * sizeof(std::vector<%(float_prec)s>);
for( auto it = _buffer.begin(); it != _buffer.end(); it++ )
size_in_bytes += it->capacity() * sizeof(%(float_prec)s);
""" % {'float_prec': Global.config['precision']}
def _generate_omp(self):
"""
adjust code templates for the specific population for single thread and openMP.
"""
self._specific_template['declare_additional'] = """
// Custom local parameters of a TimedArray
std::vector< int > _schedule; // List of times where new inputs should be set
std::vector< std::vector< %(float_prec)s > > _buffer; // buffer holding the data
int _period; // Period of cycling
long int _t; // Internal time
int _block; // Internal block when inputs are set not at each step
""" % {'float_prec': Global.config['precision']}
self._specific_template['access_additional'] = """
// Custom local parameters of a TimedArray
void set_schedule(std::vector<int> schedule) { _schedule = schedule; }
std::vector<int> get_schedule() { return _schedule; }
void set_buffer(std::vector< std::vector< %(float_prec)s > > buffer) { _buffer = buffer; r = _buffer[0]; }
std::vector< std::vector< %(float_prec)s > > get_buffer() { return _buffer; }
void set_period(int period) { _period = period; }
int get_period() { return _period; }
""" % {'float_prec': Global.config['precision']}
self._specific_template['init_additional'] = """
// Initialize counters
_t = 0;
_block = 0;
_period = -1;
"""
self._specific_template['export_additional'] = """
# Custom local parameters of a TimedArray
void set_schedule(vector[int])
vector[int] get_schedule()
void set_buffer(vector[vector[%(float_prec)s]])
vector[vector[%(float_prec)s]] get_buffer()
void set_period(int)
int get_period()
""" % {'float_prec': Global.config['precision']}
self._specific_template['reset_additional'] ="""
_t = 0;
_block = 0;
r.clear();
r = std::vector<%(float_prec)s>(size, 0.0);
""" % {'float_prec': Global.config['precision']}
self._specific_template['wrapper_access_additional'] = """
# Custom local parameters of a TimedArray
cpdef set_schedule( self, schedule ):
pop%(id)s.set_schedule( schedule )
cpdef np.ndarray get_schedule( self ):
return np.array(pop%(id)s.get_schedule( ))
cpdef set_rates( self, buffer ):
pop%(id)s.set_buffer( buffer )
cpdef np.ndarray get_rates( self ):
return np.array(pop%(id)s.get_buffer( ))
cpdef set_period( self, period ):
pop%(id)s.set_period(period)
cpdef int get_period(self):
return pop%(id)s.get_period()
""" % { 'id': self.id }
self._specific_template['update_variables'] = """
if(_active){
#pragma omp single
{
// Check if it is time to set the input
if(_t == _schedule[_block]){
// Set the data
r = _buffer[_block];
// Move to the next block
_block++;
// If was the last block, go back to the first block
if (_block == _schedule.size()){
_block = 0;
}
}
// If the timedarray is periodic, check if we arrive at that point
if(_period > -1 && (_t == _period-1)){
// Reset the counters
_block=0;
_t = -1;
}
// Always increment the internal time
_t++;
}
}
"""
self._specific_template['size_in_bytes'] = """
// schedule
size_in_bytes += _schedule.capacity() * sizeof(int);
// buffer
size_in_bytes += _buffer.capacity() * sizeof(std::vector<%(float_prec)s>);
for( auto it = _buffer.begin(); it != _buffer.end(); it++ )
size_in_bytes += it->capacity() * sizeof(%(float_prec)s);
""" % {'float_prec': Global.config['precision']}
def _generate_cuda(self):
"""
adjust code templates for the specific population for single thread and CUDA.
"""
# HD (18. Nov 2016)
# I suppress the code generation for allocating the variable r on gpu, as
# well as memory transfer codes. This is only possible as no other variables
# allowed in TimedArray.
self._specific_template['init_parameters_variables'] = ""
self._specific_template['host_device_transfer'] = ""
self._specific_template['device_host_transfer'] = ""
#
# Code for handling the buffer and schedule parameters
self._specific_template['declare_additional'] = """
// Custom local parameter timed array
std::vector< int > _schedule;
std::vector< %(float_prec)s* > gpu_buffer;
int _period; // Period of cycling
long int _t; // Internal time
int _block; // Internal block when inputs are set not at each step
""" % {'float_prec': Global.config['precision']}
self._specific_template['access_additional'] = """
// Custom local parameter timed array
void set_schedule(std::vector<int> schedule) { _schedule = schedule; }
std::vector<int> get_schedule() { return _schedule; }
void set_buffer(std::vector< std::vector< %(float_prec)s > > buffer) {
if ( gpu_buffer.empty() ) {
gpu_buffer = std::vector< %(float_prec)s* >(buffer.size(), nullptr);
// allocate gpu arrays
for(int i = 0; i < buffer.size(); i++) {
cudaMalloc((void**)&gpu_buffer[i], buffer[i].size()*sizeof(%(float_prec)s));
}
}
auto host_it = buffer.begin();
auto dev_it = gpu_buffer.begin();
for(host_it, dev_it; host_it < buffer.end(); host_it++, dev_it++) {
cudaMemcpy( *dev_it, host_it->data(), host_it->size()*sizeof(%(float_prec)s), cudaMemcpyHostToDevice);
}
gpu_r = gpu_buffer[0];
}
std::vector< std::vector< %(float_prec)s > > get_buffer() {
std::vector< std::vector< %(float_prec)s > > buffer = std::vector< std::vector< %(float_prec)s > >( gpu_buffer.size(), std::vector<%(float_prec)s>(size,0.0) );
auto host_it = buffer.begin();
auto dev_it = gpu_buffer.begin();
for( host_it, dev_it; host_it < buffer.end(); host_it++, dev_it++ ) {
cudaMemcpy( host_it->data(), *dev_it, size*sizeof(%(float_prec)s), cudaMemcpyDeviceToHost );
}
return buffer;
}
void set_period(int period) { _period = period; }
int get_period() { return _period; }
""" % {'float_prec': Global.config['precision']}
self._specific_template['init_additional'] = """
// counters
_t = 0;
_block = 0;
_period = -1;
"""
self._specific_template['reset_additional'] = """
// counters
_t = 0;
_block = 0;
gpu_r = gpu_buffer[0];
"""
self._specific_template['export_additional'] = """
# Custom local parameters timed array
void set_schedule(vector[int])
vector[int] get_schedule()
void set_buffer(vector[vector[%(float_prec)s]])
vector[vector[%(float_prec)s]] get_buffer()
void set_period(int)
int get_period()
""" % {'float_prec': Global.config['precision']}
self._specific_template['wrapper_access_additional'] = """
# Custom local parameters timed array
cpdef set_schedule( self, schedule ):
pop%(id)s.set_schedule( schedule )
cpdef np.ndarray get_schedule( self ):
return np.array(pop%(id)s.get_schedule( ))
cpdef set_rates( self, buffer ):
pop%(id)s.set_buffer( buffer )
cpdef np.ndarray get_rates( self ):
return np.array(pop%(id)s.get_buffer( ))
cpdef set_period( self, period ):
pop%(id)s.set_period(period)
cpdef int get_periodic(self):
return pop%(id)s.get_period()
""" % { 'id': self.id, 'float_prec': Global.config['precision'] }
self._specific_template['update_variables'] = """
if(_active) {
// std::cout << _t << " " << _block<< " " << _schedule[_block] << std::endl;
// Check if it is time to set the input
if(_t == _schedule[_block]){
// Set the data
gpu_r = gpu_buffer[_block];
// Move to the next block
_block++;
// If was the last block, go back to the first block
if ( _block == _schedule.size() ) {
_block = 0;
}
}
// If the timedarray is periodic, check if we arrive at that point
if( (_period > -1) && (_t == _period-1) ) {
// Reset the counters
_block=0;
_t = -1;
}
// Always increment the internal time
_t++;
}
"""
self._specific_template['update_variable_body'] = ""
self._specific_template['update_variable_header'] = ""
self._specific_template['update_variable_call'] = """
// host side update of neurons
pop%(id)s.update();
""" % {'id': self.id}
self._specific_template['size_in_bytes'] = "//TODO: "
def _instantiate(self, module):
# Create the Cython instance
self.cyInstance = getattr(module, self.class_name+'_wrapper')(self.size, self.max_delay)
def __setattr__(self, name, value):
if name == 'schedule':
if self.initialized:
self.cyInstance.set_schedule( np.array(value) / Global.config['dt'] )
else:
self.init['schedule'] = value
elif name == 'rates':
if self.initialized:
if len(value.shape) > 2:
# we need to flatten the provided data
flat_values = value.reshape( (value.shape[0], self.size) )
self.cyInstance.set_rates( flat_values )
else:
self.cyInstance.set_rates( value )
else:
self.init['rates'] = value
elif name == "period":
if self.initialized:
self.cyInstance.set_period(int(value /Global.config['dt']))
else:
self.init['period'] = value
else:
Population.__setattr__(self, name, value)
def __getattr__(self, name):
if name == 'schedule':
if self.initialized:
return Global.config['dt'] * self.cyInstance.get_schedule()
else:
return self.init['schedule']
elif name == 'rates':
if self.initialized:
if len(self.geometry) > 1:
# unflatten the data
flat_values = self.cyInstance.get_rates()
values = np.zeros( tuple( [len(self.schedule)] + list(self.geometry) ) )
for x in range(len(self.schedule)):
values[x] = np.reshape( flat_values[x], self.geometry)
return values
else:
return self.cyInstance.get_rates()
else:
return self.init['rates']
elif name == 'period':
if self.initialized:
return self.cyInstance.get_period() * Global.config['dt']
else:
return self.init['period']
else:
return Population.__getattribute__(self, name)
class SpikeSourceArray(SpecificPopulation):
"""
Spike source generating spikes at the times given in the spike_times array.
Depending on the initial array provided, the population will have one or several neurons, but the geometry can only be one-dimensional.
You can later modify the spike_times attribute of the population, but it must have the same number of neurons as the initial one.
The spike times are by default relative to the start of a simulation (``ANNarchy.get_time()`` is 0.0).
If you call the ``reset()`` method of a ``SpikeSourceArray``, this will set the spike times relative to the current time.
You can then repeat a stimulation many times.
```python
# 2 neurons firing at 100Hz with a 1 ms delay
times = [
[ 10, 20, 30, 40],
[ 11, 21, 31, 41]
]
inp = SpikeSourceArray(spike_times=times)
compile()
# Spikes at 10/11, 20/21, etc
simulate(50)
# Reset the internal time of the SpikeSourceArray
inp.reset()
# Spikes at 60/61, 70/71, etc
simulate(50)
```
"""
def __init__(self, spike_times, name=None, copied=False):
"""
:param spike_times: a list of times at which a spike should be emitted if the population should have only 1 neuron, a list of lists otherwise. Times are defined in milliseconds, and will be rounded to the closest multiple of the discretization time step dt.
:param name: optional name for the population.
"""
if not isinstance(spike_times, list):
Global._error('In a SpikeSourceArray, spike_times must be a Python list.')
if isinstance(spike_times[0], list): # several neurons
nb_neurons = len(spike_times)
else: # a single Neuron
nb_neurons = 1
spike_times = [ spike_times ]
# Create a fake neuron just to be sure the description has the correct parameters
neuron = Neuron(
parameters="",
equations="",
spike=" t == 0",
reset="",
name="Spike source",
description="Spike source array."
)
SpecificPopulation.__init__(self, geometry=nb_neurons, neuron=neuron, name=name, copied=copied)
self.init['spike_times'] = spike_times
def _copy(self):
"Returns a copy of the population when creating networks."
return SpikeSourceArray(self.init['spike_times'], self.name, copied=True)
def _sort_spikes(self, spike_times):
"Sort, unify the spikes and transform them into steps."
return [sorted(list(set([round(t/Global.config['dt']) for t in neur_times]))) for neur_times in spike_times]
def _generate_st(self):
"""
Code generation for single-thread.
"""
self._generate_omp()
def _generate_omp(self):
"""
Code generation for openMP paradigm.
"""
# Add possible targets
for target in self.targets:
tpl = {
'name': 'g_%(target)s' % {'target': target},
'locality': 'local',
'eq': '',
'bounds': {},
'flags': [],
'ctype': Global.config['precision'],
'init': 0.0,
'transformed_eq': '',
'pre_loop': {},
'cpp': '',
'switch': '',
'untouched': {},
'method': 'exponential',
'dependencies': []
}
self.neuron_type.description['variables'].append(tpl)
self.neuron_type.description['local'].append('g_'+target)
self._specific_template['declare_additional'] = """
// Custom local parameter spike_times
// std::vector< %(float_prec)s > r ;
std::vector< std::vector< long int > > spike_times ;
std::vector< long int > next_spike ;
std::vector< int > idx_next_spike;
long int _t;
// Recompute the spike times
void recompute_spike_times(){
std::fill(next_spike.begin(), next_spike.end(), -10000);
std::fill(idx_next_spike.begin(), idx_next_spike.end(), 0);
for(int i=0; i< size; i++){
if(!spike_times[i].empty()){
int idx = 0;
// Find the first spike time which is not in the past
while(spike_times[i][idx] < _t){
idx++;
}
// Set the next spike
if(idx < spike_times[i].size())
next_spike[i] = spike_times[i][idx];
else
next_spike[i] = -10000;
}
}
}
"""% { 'float_prec': Global.config['precision'] }
#self._specific_template['access_parameters_variables'] = ""
self._specific_template['init_additional'] = """
_t = 0;
next_spike = std::vector<long int>(size, -10000);
idx_next_spike = std::vector<int>(size, 0);
this->recompute_spike_times();
"""
self._specific_template['reset_additional'] = """
_t = 0;
this->recompute_spike_times();
"""
if Global.config["num_threads"] == 1:
self._specific_template['update_variables'] = """
if(_active){
spiked.clear();
for(int i = 0; i < size; i++){
// Emit spike
if( _t == next_spike[i] ){
last_spike[i] = _t;
idx_next_spike[i]++ ;
if(idx_next_spike[i] < spike_times[i].size()){
next_spike[i] = spike_times[i][idx_next_spike[i]];
}
spiked.push_back(i);
}
}
_t++;
}
"""
else:
self._specific_template['update_variables'] = """
if(_active){
#pragma omp single
{
spiked.clear();
}
#pragma omp for
for(int i = 0; i < size; i++){
// Emit spike
if( _t == next_spike[i] ){
last_spike[i] = _t;
idx_next_spike[i]++ ;
if(idx_next_spike[i] < spike_times[i].size()){
next_spike[i] = spike_times[i][idx_next_spike[i]];
}
#pragma omp critical
spiked.push_back(i);
}
}
#pragma omp single
{
_t++;
}
}
"""
self._specific_template['test_spike_cond'] = ""
self._specific_template['export_additional'] ="""
vector[vector[long]] spike_times
void recompute_spike_times()
"""
self._specific_template['wrapper_args'] = "size, times, delay"
self._specific_template['wrapper_init'] = """
pop%(id)s.spike_times = times
pop%(id)s.set_size(size)
pop%(id)s.set_max_delay(delay)""" % {'id': self.id}
self._specific_template['wrapper_access_additional'] = """
# Local parameter spike_times
cpdef get_spike_times(self):
return pop%(id)s.spike_times
cpdef set_spike_times(self, value):
pop%(id)s.spike_times = value
pop%(id)s.recompute_spike_times()
""" % {'id': self.id}
def _generate_cuda(self):
"""
Code generation for the CUDA paradigm.
As the spike time generation is not a very compute intensive step but
requires dynamic data structures, we don't implement it on the CUDA
devices for now. Consequently, we use the CPU side implementation and
transfer after computation the results to the GPU.
"""
self._generate_st()
# attach transfer of spiked array to gpu
# IMPORTANT: the outside transfer is necessary.
# Otherwise, previous spike counts will be not reseted.
self._specific_template['update_variables'] += """
if ( _active ) {
// Update Spike Count on GPU
spike_count = spiked.size();
cudaMemcpy( gpu_spike_count, &spike_count, sizeof(unsigned int), cudaMemcpyHostToDevice);
// Transfer generated spikes to GPU
if( spike_count > 0 ) {
cudaMemcpy( gpu_spiked, spiked.data(), spike_count * sizeof(int), cudaMemcpyHostToDevice);
}
}
"""
self._specific_template['update_variable_body'] = ""
self._specific_template['update_variable_header'] = ""
self._specific_template['update_variable_call'] = """
// host side update of neurons
pop%(id)s.update();
""" % {'id': self.id}
def _instantiate(self, module):
# Create the Cython instance
self.cyInstance = getattr(module, self.class_name+'_wrapper')(self.size, self.init['spike_times'], self.max_delay)
def __setattr__(self, name, value):
if name == 'spike_times':
if not isinstance(value[0], list): # several neurons
value = [ value ]
if not len(value) == self.size:
Global._error('SpikeSourceArray: the size of the spike_times attribute must match the number of neurons in the population.')
self.init['spike_times'] = value # when reset is called
if self.initialized:
self.cyInstance.set_spike_times(self._sort_spikes(value))
else:
Population.__setattr__(self, name, value)
def __getattr__(self, name):
if name == 'spike_times':
if self.initialized:
return [ [Global.config['dt']*time for time in neur] for neur in self.cyInstance.get_spike_times()]
else:
return self.init['spike_times']
else:
return Population.__getattribute__(self, name)
class TimedPoissonPopulation(SpecificPopulation):
"""
Poisson population whose rate vary with the provided schedule.
Example:
```python
inp = TimedPoissonPopulation(
geometry = 100,
rates = [10., 20., 100., 20., 5.],
schedule = [0., 100., 200., 500., 600.],
)
```
This creates a population of 100 Poisson neurons whose rate will be:
* 10 Hz during the first 100 ms.
* 20 HZ during the next 100 ms.
* 100 Hz during the next 300 ms.
* 20 Hz during the next 100 ms.
* 5 Hz until the end of the simulation.
If you want the TimedPoissonPopulation to "loop" over the schedule, you can specify a period:
```python
inp = TimedPoissonPopulation(
geometry = 100,
rates = [10., 20., 100., 20., 5.],
schedule = [0., 100., 200., 500., 600.],
period = 1000.,
)
```
Here the rate will become 10Hz again every 1 second of simulation. If the period is smaller than the schedule, the remaining rates will not be set.
Note that you can use the ``reset()`` method to manually reinitialize the schedule, times becoming relative to that call:
```python
simulate(1200.) # Should switch to 100 Hz due to the period of 1000.
inp.reset()
simulate(1000.) # Starts at 10 Hz again.
```
The rates were here global to the population. If you want each neuron to have a different rate, ``rates`` must have additional dimensions corresponding to the geometry of the population.
```python
inp = TimedPoissonPopulation(
geometry = 100,
rates = [
[10. + 0.05*i for i in range(100)],
[20. + 0.05*i for i in range(100)],
],
schedule = [0., 100.],
period = 1000.,
)
```
"""
def __init__(self, geometry, rates, schedule, period= -1., name=None, copied=False):
"""
:param rates: array of firing rates. The first axis corresponds to the times where the firing rate should change.
If a different rate should be used by the different neurons, the other dimensions must match with the geometr of the population.
:param schedule: list of times (in ms) where the firing rate should change.
:param period: time when the timed array will be reset and start again, allowing cycling over the schedule. Default: no cycling (-1.).
"""
neuron = Neuron(
parameters = """
proba = 1.0
""",
equations = """
p = Uniform(0.0, 1.0) * 1000.0 / dt
""",
spike = """
p < proba
""",
name="TimedPoisson",
description="Spiking neuron following a Poisson distribution."
)
SpecificPopulation.__init__(self, geometry=geometry, neuron=neuron, name=name, copied=copied)
# Check arguments
try:
rates = np.array(rates)
except:
Global._error("TimedPoissonPopulation: the rates argument must be a numpy array.")
schedule = np.array(schedule)
nb_schedules = rates.shape[0]
if nb_schedules != schedule.size:
Global._error("TimedPoissonPopulation: the first axis of the rates argument must be the same length as schedule.")
if rates.ndim == 1 : # One rate for the whole population
rates = np.array([np.full(self.size, rates[i]) for i in range(nb_schedules)])
# Initial values
self.init['schedule'] = schedule
self.init['rates'] = rates
self.init['period'] = period
def _copy(self):
"Returns a copy of the population when creating networks."
return TimedPoissonPopulation(self.geometry, self.init['rates'] , self.init['schedule'], self.init['period'], self.name, copied=True)
def _generate_st(self):
"""
adjust code templates for the specific population for single thread.
"""
self._specific_template['declare_additional'] = """
// Custom local parameters of a TimedPoissonPopulation
std::vector< int > _schedule; // List of times where new inputs should be set
std::vector< std::vector< %(float_prec)s > > _buffer; // buffer holding the data
int _period; // Period of cycling
long int _t; // Internal time
int _block; // Internal block when inputs are set not at each step
""" % {'float_prec': Global.config['precision']}
self._specific_template['access_additional'] = """
// Custom local parameters of a TimedPoissonPopulation
void set_schedule(std::vector<int> schedule) { _schedule = schedule; }
std::vector<int> get_schedule() { return _schedule; }
void set_buffer(std::vector< std::vector< %(float_prec)s > > buffer) { _buffer = buffer; r = _buffer[0]; }
std::vector< std::vector< %(float_prec)s > > get_buffer() { return _buffer; }
void set_period(int period) { _period = period; }
int get_period() { return _period; }
""" % {'float_prec': Global.config['precision']}
self._specific_template['init_additional'] = """
// Initialize counters
_t = 0;
_block = 0;
_period = -1;
"""
self._specific_template['export_additional'] = """
# Custom local parameters of a TimedPoissonPopulation
void set_schedule(vector[int])
vector[int] get_schedule()
void set_buffer(vector[vector[%(float_prec)s]])
vector[vector[%(float_prec)s]] get_buffer()
void set_period(int)
int get_period()
""" % {'float_prec': Global.config['precision']}
self._specific_template['reset_additional'] ="""
_t = 0;
_block = 0;
r.clear();
r = std::vector<%(float_prec)s>(size, 0.0);
""" % {'float_prec': Global.config['precision']}
self._specific_template['wrapper_access_additional'] = """
# Custom local parameters of a TimedArray
cpdef set_schedule( self, schedule ):
pop%(id)s.set_schedule( schedule )
cpdef np.ndarray get_schedule( self ):
return np.array(pop%(id)s.get_schedule( ))
cpdef set_rates( self, buffer ):
pop%(id)s.set_buffer( buffer )
cpdef np.ndarray get_rates( self ):
return np.array(pop%(id)s.get_buffer( ))
cpdef set_period( self, period ):
pop%(id)s.set_period(period)
cpdef int get_period(self):
return pop%(id)s.get_period()
""" % { 'id': self.id }
self._specific_template['update_variables'] = """
if(_active){
//std::cout << _t << " " << _block<< " " << _schedule[_block] << std::endl;
// Check if it is time to set the input
if(_t == _schedule[_block]){
// Set the data
proba = _buffer[_block];
// Move to the next block
_block++;
// If was the last block, go back to the first block
if (_block == _schedule.size()){
_block = 0;
}
}
// If the timedarray is periodic, check if we arrive at that point
if(_period > -1 && (_t == _period-1)){
// Reset the counters
_block=0;
_t = -1;
}
// Always increment the internal time
_t++;
}
if( _active ) {
spiked.clear();
// Updating local variables
%(float_prec)s step = 1000.0/dt;
#pragma omp simd
for(int i = 0; i < size; i++){
// p = Uniform(0.0, 1.0) * 1000.0 / dt
p[i] = step*rand_0[i];
}
} // active
""" % {'float_prec': Global.config['precision']}
self._specific_template['size_in_bytes'] = """
// schedule
size_in_bytes += _schedule.capacity() * sizeof(int);
// buffer
size_in_bytes += _buffer.capacity() * sizeof(std::vector<%(float_prec)s>);
for( auto it = _buffer.begin(); it != _buffer.end(); it++ )
size_in_bytes += it->capacity() * sizeof(%(float_prec)s);
""" % {'float_prec': Global.config['precision']}
def _generate_omp(self):
"""
adjust code templates for the specific population for openMP.
"""
self._specific_template['declare_additional'] = """
// Custom local parameters of a TimedPoissonPopulation
std::vector< int > _schedule; // List of times where new inputs should be set
std::vector< std::vector< %(float_prec)s > > _buffer; // buffer holding the data
int _period; // Period of cycling
long int _t; // Internal time
int _block; // Internal block when inputs are set not at each step
""" % {'float_prec': Global.config['precision']}
self._specific_template['access_additional'] = """
// Custom local parameters of a TimedPoissonPopulation
void set_schedule(std::vector<int> schedule) { _schedule = schedule; }
std::vector<int> get_schedule() { return _schedule; }
void set_buffer(std::vector< std::vector< %(float_prec)s > > buffer) { _buffer = buffer; r = _buffer[0]; }
std::vector< std::vector< %(float_prec)s > > get_buffer() { return _buffer; }
void set_period(int period) { _period = period; }
int get_period() { return _period; }
""" % {'float_prec': Global.config['precision']}
self._specific_template['init_additional'] = """
// Initialize counters
_t = 0;
_block = 0;
_period = -1;
"""
self._specific_template['export_additional'] = """
# Custom local parameters of a TimedPoissonPopulation
void set_schedule(vector[int])
vector[int] get_schedule()
void set_buffer(vector[vector[%(float_prec)s]])
vector[vector[%(float_prec)s]] get_buffer()
void set_period(int)
int get_period()
""" % {'float_prec': Global.config['precision']}
self._specific_template['reset_additional'] ="""
_t = 0;
_block = 0;
r.clear();
r = std::vector<%(float_prec)s>(size, 0.0);
""" % {'float_prec': Global.config['precision']}
self._specific_template['wrapper_access_additional'] = """
# Custom local parameters of a TimedArray
cpdef set_schedule( self, schedule ):
pop%(id)s.set_schedule( schedule )
cpdef np.ndarray get_schedule( self ):
return np.array(pop%(id)s.get_schedule( ))
cpdef set_rates( self, buffer ):
pop%(id)s.set_buffer( buffer )
cpdef np.ndarray get_rates( self ):
return np.array(pop%(id)s.get_buffer( ))
cpdef set_period( self, period ):
pop%(id)s.set_period(period)
cpdef int get_period(self):
return pop%(id)s.get_period()
""" % { 'id': self.id }
self._specific_template['update_variables'] = """
if(_active){
#pragma omp single
{
//std::cout << _t << " " << _block<< " " << _schedule[_block] << std::endl;
// Check if it is time to set the input
if(_t == _schedule[_block]){
// Set the data
proba = _buffer[_block];
// Move to the next block
_block++;
// If was the last block, go back to the first block
if (_block == _schedule.size()){
_block = 0;
}
}
// If the timedarray is periodic, check if we arrive at that point
if(_period > -1 && (_t == _period-1)){
// Reset the counters
_block=0;
_t = -1;
}
// Always increment the internal time
_t++;
}
}
if( _active ) {
spiked.clear();
// Updating local variables
%(float_prec)s step = 1000.0/dt;
#pragma omp for simd
for(int i = 0; i < size; i++){
// p = Uniform(0.0, 1.0) * 1000.0 / dt
p[i] = step*rand_0[i];
}
} // active
""" % {'float_prec': Global.config['precision']}
self._specific_template['size_in_bytes'] = """
// schedule
size_in_bytes += _schedule.capacity() * sizeof(int);
// buffer
size_in_bytes += _buffer.capacity() * sizeof(std::vector<%(float_prec)s>);
for( auto it = _buffer.begin(); it != _buffer.end(); it++ )
size_in_bytes += it->capacity() * sizeof(%(float_prec)s);
""" % {'float_prec': Global.config['precision']}
def _generate_cuda(self):
"""
Code generation if the CUDA paradigm is set.
"""
# I suppress the code generation for allocating the variable r on gpu, as
# well as memory transfer codes. This is only possible as no other variables
# allowed in TimedArray.
self._specific_template['init_parameters_variables'] = """
// Random numbers
cudaMalloc((void**)&gpu_rand_0, size * sizeof(curandState));
init_curand_states( size, gpu_rand_0, global_seed );
"""
self._specific_template['host_device_transfer'] = ""
self._specific_template['device_host_transfer'] = ""
#
# Code for handling the buffer and schedule parameters
self._specific_template['declare_additional'] = """
// Custom local parameter timed array
std::vector< int > _schedule;
std::vector< %(float_prec)s* > gpu_buffer;
int _period; // Period of cycling
long int _t; // Internal time
int _block; // Internal block when inputs are set not at each step
""" % {'float_prec': Global.config['precision']}
self._specific_template['access_additional'] = """
// Custom local parameter timed array
void set_schedule(std::vector<int> schedule) { _schedule = schedule; }
std::vector<int> get_schedule() { return _schedule; }
void set_buffer(std::vector< std::vector< %(float_prec)s > > buffer) {
if ( gpu_buffer.empty() ) {
gpu_buffer = std::vector< %(float_prec)s* >(buffer.size(), nullptr);
// allocate gpu arrays
for(int i = 0; i < buffer.size(); i++) {
cudaMalloc((void**)&gpu_buffer[i], buffer[i].size()*sizeof(%(float_prec)s));
}
}
auto host_it = buffer.begin();
auto dev_it = gpu_buffer.begin();
for( ; host_it < buffer.end(); host_it++, dev_it++ ) {
cudaMemcpy( *dev_it, host_it->data(), host_it->size()*sizeof(%(float_prec)s), cudaMemcpyHostToDevice);
}
gpu_proba = gpu_buffer[0];
}
std::vector< std::vector< %(float_prec)s > > get_buffer() {
std::vector< std::vector< %(float_prec)s > > buffer = std::vector< std::vector< %(float_prec)s > >( gpu_buffer.size(), std::vector<%(float_prec)s>(size,0.0) );
auto host_it = buffer.begin();
auto dev_it = gpu_buffer.begin();
for( ; host_it < buffer.end(); host_it++, dev_it++ ) {
cudaMemcpy( host_it->data(), *dev_it, size*sizeof(%(float_prec)s), cudaMemcpyDeviceToHost );
}
return buffer;
}
void set_period(int period) { _period = period; }
int get_period() { return _period; }
""" % {'float_prec': Global.config['precision']}
self._specific_template['init_additional'] = """
// counters
_t = 0;
_block = 0;
_period = -1;
"""
self._specific_template['reset_additional'] = """
// counters
_t = 0;
_block = 0;
gpu_proba = gpu_buffer[0];
"""
self._specific_template['export_additional'] = """
# Custom local parameters timed array
void set_schedule(vector[int])
vector[int] get_schedule()
void set_buffer(vector[vector[%(float_prec)s]])
vector[vector[%(float_prec)s]] get_buffer()
void set_period(int)
int get_period()
""" % {'float_prec': Global.config['precision']}
self._specific_template['wrapper_access_additional'] = """
# Custom local parameters timed array
cpdef set_schedule( self, schedule ):
pop%(id)s.set_schedule( schedule )
cpdef np.ndarray get_schedule( self ):
return np.array(pop%(id)s.get_schedule( ))
cpdef set_rates( self, buffer ):
pop%(id)s.set_buffer( buffer )
cpdef np.ndarray get_rates( self ):
return np.array(pop%(id)s.get_buffer( ))
cpdef set_period( self, period ):
pop%(id)s.set_period(period)
cpdef int get_periodic(self):
return pop%(id)s.get_period()
""" % { 'id': self.id, 'float_prec': Global.config['precision'] }
self._specific_template['update_variables'] = """
if(_active) {
// std::cout << _t << " " << _block<< " " << _schedule[_block] << std::endl;
// Check if it is time to set the input
if(_t == _schedule[_block]){
// Set the data
gpu_proba = gpu_buffer[_block];
// Move to the next block
_block++;
// If was the last block, go back to the first block
if ( _block == _schedule.size() ) {
_block = 0;
}
}
// If the timedarray is periodic, check if we arrive at that point
if( (_period > -1) && (_t == _period-1) ) {
// Reset the counters
_block=0;
_t = -1;
}
// Always increment the internal time
_t++;
}
"""
self._specific_template['update_variable_body'] = """
__global__ void cuPop%(id)s_local_step( const long int t, const double dt, curandState* rand_0, double* proba, unsigned int* num_events, int* spiked, long int* last_spike )
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
%(float_prec)s step = 1000.0/dt;
while ( i < %(size)s )
{
// p = Uniform(0.0, 1.0) * 1000.0 / dt
%(float_prec)s p = curand_uniform_double( &rand_0[i] ) * step;
if (p < proba[i]) {
int pos = atomicAdd ( num_events, 1);
spiked[pos] = i;
last_spike[i] = t;
}
i += blockDim.x;
}
__syncthreads();
}
""" % {
'id': self.id,
'size': self.size,
'float_prec': Global.config['precision']
}
self._specific_template['update_variable_header'] = "__global__ void cuPop%(id)s_local_step( const long int t, const double dt, curandState* rand_0, double* proba, unsigned int* num_events, int* spiked, long int* last_spike );" % {'id': self.id}
# Please notice, that the GPU kernels can be launched only with one block. Otherwise, the
# atomicAdd which is called inside the kernel is not working correct (HD: April 1st, 2021)
self._specific_template['update_variable_call'] = """
// host side update of neurons
pop%(id)s.update();
// Reset old events
clear_num_events<<< 1, 1, 0, pop%(id)s.stream >>>(pop%(id)s.gpu_spike_count);
#ifdef _DEBUG
cudaError_t err_clear_num_events_%(id)s = cudaGetLastError();
if(err_clear_num_events_%(id)s != cudaSuccess)
std::cout << "pop%(id)s_spike_gather: " << cudaGetErrorString(err_clear_num_events_%(id)s) << std::endl;
#endif
// Compute current events
cuPop%(id)s_local_step<<< 1, pop%(id)s._threads_per_block, 0, pop%(id)s.stream >>>(
t, dt,
pop%(id)s.gpu_rand_0,
pop%(id)s.gpu_proba,
pop%(id)s.gpu_spike_count,
pop%(id)s.gpu_spiked,
pop%(id)s.gpu_last_spike
);
#ifdef _DEBUG
cudaError_t err_pop_spike_gather_%(id)s = cudaGetLastError();
if(err_pop_spike_gather_%(id)s != cudaSuccess)
std::cout << "pop%(id)s_spike_gather: " << cudaGetErrorString(err_pop_spike_gather_%(id)s) << std::endl;
#endif
// transfer back the spike counter (needed by record)
cudaMemcpyAsync( &pop%(id)s.spike_count, pop%(id)s.gpu_spike_count, sizeof(unsigned int), cudaMemcpyDeviceToHost, pop%(id)s.stream );
#ifdef _DEBUG
cudaError_t err = cudaGetLastError();
if ( err != cudaSuccess )
std::cout << "record_spike_count: " << cudaGetErrorString(err) << std::endl;
#endif
// transfer back the spiked array (needed by record)
cudaMemcpyAsync( pop%(id)s.spiked.data(), pop%(id)s.gpu_spiked, pop%(id)s.spike_count*sizeof(int), cudaMemcpyDeviceToHost, pop%(id)s.stream );
#ifdef _DEBUG
err = cudaGetLastError();
if ( err != cudaSuccess )
std::cout << "record_spike: " << cudaGetErrorString(err) << std::endl;
#endif
""" % {'id': self.id}
self._specific_template['size_in_bytes'] = "//TODO: "
def _instantiate(self, module):
# Create the Cython instance
self.cyInstance = getattr(module, self.class_name+'_wrapper')(self.size, self.max_delay)
def __setattr__(self, name, value):
if name == 'schedule':
if self.initialized:
self.cyInstance.set_schedule( np.array(value) / Global.config['dt'] )
else:
self.init['schedule'] = value
elif name == 'rates':
if self.initialized:
if len(value.shape) > 2:
# we need to flatten the provided data
flat_values = value.reshape( (value.shape[0], self.size) )
self.cyInstance.set_rates( flat_values )
else:
self.cyInstance.set_rates( value )
else:
self.init['rates'] = value
elif name == "period":
if self.initialized:
self.cyInstance.set_period(int(value /Global.config['dt']))
else:
self.init['period'] = value
else:
Population.__setattr__(self, name, value)
def __getattr__(self, name):
if name == 'schedule':
if self.initialized:
return Global.config['dt'] * self.cyInstance.get_schedule()
else:
return self.init['schedule']
elif name == 'rates':
if self.initialized:
if len(self.geometry) > 1:
# unflatten the data
flat_values = self.cyInstance.get_rates()
values = np.zeros( tuple( [len(self.schedule)] + list(self.geometry) ) )
for x in range(len(self.schedule)):
values[x] = np.reshape( flat_values[x], self.geometry)
return values
else:
return self.cyInstance.get_rates()
else:
return self.init['rates']
elif name == 'period':
if self.initialized:
return self.cyInstance.get_period() * Global.config['dt']
else:
return self.init['period']
else:
return Population.__getattribute__(self, name)
class HomogeneousCorrelatedSpikeTrains(SpecificPopulation):
"""
Population of spiking neurons following a homogeneous distribution with correlated spike trains.
The method describing the generation of homogeneous correlated spike trains is described in:
> Brette, R. (2009). Generation of correlated spike trains. Neural Computation 21(1). <http://romainbrette.fr/WordPress3/wp-content/uploads/2014/06/Brette2008NC.pdf>
The implementation is based on the one provided by Brian <http://briansimulator.org>.
To generate correlated spike trains, the population rate of the group of Poisson-like spiking neurons varies following a stochastic differential equation:
$$\\frac{dx}{dt} = \\frac{(\\mu - x)}{\\tau} + \\sigma \\, \\frac{\\xi}{\\sqrt{\\tau}}$$
where $\\xi$ is a random variable. In short, $x$ will randomly vary around mu over time, with an amplitude determined by sigma and a speed determined by tau.
This doubly stochastic process is called a Cox process or Ornstein-Uhlenbeck process.
To avoid that x becomes negative, the values of mu and sigma are computed from a rectified Gaussian distribution, parameterized by the desired population rate **rates**, the desired correlation strength **corr** and the time constant **tau**. See Brette's paper for details.
In short, you should only define the parameters ``rates``, ``corr`` and ``tau``, and let the class compute mu and sigma for you. Changing ``rates``, ``corr`` or ``tau`` after initialization automatically recomputes mu and sigma.
Example:
```python
from ANNarchy import *
setup(dt=0.1)
pop_corr = HomogeneousCorrelatedSpikeTrains(200, rates=10., corr=0.3, tau=10.)
compile()
simulate(1000.)
pop_corr.rates=30.
simulate(1000.)
```
Alternatively, a schedule can be provided to change automatically the value of `rates` and ``corr``(but not ``tau``) at the required times (as in TimedArray or TimedPoissonPopulation):
```python
from ANNarchy import *
setup(dt=0.1)
pop_corr = HomogeneousCorrelatedSpikeTrains(
geometry=200,
rates= [10., 30.],
corr=[0.3, 0.5],
tau=10.,
schedule=[0., 1000.]
)
compile()
simulate(2000.)
```
Even when using a schedule, ``corr`` accepts a single constant value. The first value of ``schedule`` must be 0. ``period``specifies when the schedule "loops" back to its initial value.
"""
def __init__(self,
geometry,
rates,
corr,
tau,
schedule=None,
period=-1.,
name=None,
refractory=None,
copied=False):
"""
:param geometry: population geometry as tuple.
:param rates: rate in Hz of the population (must be a positive float or a list)
:param corr: total correlation strength (float in [0, 1], or a list)
:param tau: correlation time constant in ms.
:param schedule: list of times where new values of ``rates``and ``corr``will be used to computre mu and sigma.
:param period: time when the array will be reset and start again, allowing cycling over the schedule. Default: no cycling (-1.)
:param name: unique name of the population (optional).
:param refractory: refractory period in ms (careful: may break the correlation)
"""
if schedule is not None:
self._has_schedule = True
# Rates
if not isinstance(rates, (list, np.ndarray)):
Global._error("TimedHomogeneousCorrelatedSpikeTrains: the rates argument must be a list or a numpy array.")
rates = np.array(rates)
# Schedule
schedule = np.array(schedule)
nb_schedules = rates.shape[0]
if nb_schedules != schedule.size:
Global._error("TimedHomogeneousCorrelatedSpikeTrains: the length of rates must be the same length as for schedule.")
# corr
corr = np.array(corr)
if corr.size == 1:
corr = np.full(nb_schedules, corr)
else:
self._has_schedule = False
rates = np.array([float(rates)])
schedule = np.array([0.0])
corr = np.array([corr])
# Store refractory
self.refractory_init = refractory
# Correction of mu and sigma
mu_list, sigma_list = self._correction(rates, corr, tau)
self.rates = rates
self.corr = corr
self.tau = tau
# Create the neuron
corr_neuron = Neuron(
parameters = """
tau = %(tau)s : population
mu = %(mu)s : population
sigma = %(sigma)s : population
""" % {'tau': tau, 'mu': mu_list[0], 'sigma': sigma_list[0]},
equations = """
x += dt*(mu - x)/tau + sqrt(dt/tau) * sigma * Normal(0., 1.) : population, init=%(mu)s
p = Uniform(0.0, 1.0) * 1000.0 / dt
""" % {'mu': mu_list[0]},
spike = "p < x",
refractory=refractory,
name="HomogeneousCorrelated",
description="Homogeneous correlated spike trains."
)
SpecificPopulation.__init__(self, geometry=geometry, neuron=corr_neuron, name=name, copied=copied)
# Initial values
self.init['schedule'] = schedule
self.init['rates'] = rates
self.init['corr'] = corr
self.init['tau'] = tau
self.init['period'] = period
if self._has_schedule:
self.init['mu'] = mu_list
self.init['sigma'] = sigma_list
else:
self.init['mu'] = mu_list[0]
self.init['sigma'] = sigma_list[0]
def _copy(self):
"Returns a copy of the population when creating networks."
return HomogeneousCorrelatedSpikeTrains(
geometry=self.geometry,
rates=self.init['rates'],
corr=self.init['corr'],
tau=self.init['tau'],
schedule=self.init['schedule'],
period=self.init['period'],
name=self.name,
refractory=self.refractory_init,
copied=True)
def _correction(self, rates, corr, tau):
# Correction of mu and sigma
mu_list = []
sigma_list = []
for i in range(len(rates)):
mu, sigma = _rectify(rates[i], corr[i], tau)
mu_list.append(mu)
sigma_list.append(sigma)
return mu_list, sigma_list
def _generate_st(self):
"""
adjust code templates for the specific population for single thread.
"""
self._specific_template['declare_additional'] = """
// Custom local parameters of a HomogeneousCorrelatedSpikeTrains
std::vector< int > _schedule; // List of times where new inputs should be set
std::vector< %(float_prec)s > _mu; // buffer holding the data
std::vector< %(float_prec)s > _sigma; // buffer holding the data
int _period; // Period of cycling
long int _t; // Internal time
int _block; // Internal block when inputs are set not at each step
""" % {'float_prec': Global.config['precision']}
self._specific_template['access_additional'] = """
// Custom local parameters of a HomogeneousCorrelatedSpikeTrains
void set_schedule(std::vector<int> schedule) { _schedule = schedule; }
std::vector<int> get_schedule() { return _schedule; }
void set_mu_list(std::vector< %(float_prec)s > buffer) { _mu = buffer; mu = _mu[0]; }
std::vector< %(float_prec)s > get_mu_list() { return _mu; }
void set_sigma_list(std::vector< %(float_prec)s > buffer) { _sigma = buffer; sigma = _sigma[0]; }
std::vector< %(float_prec)s > get_sigma_list() { return _sigma; }
void set_period(int period) { _period = period; }
int get_period() { return _period; }
""" % {'float_prec': Global.config['precision']}
self._specific_template['init_additional'] = """
// Initialize counters
_t = 0;
_block = 0;
_period = -1;
"""
self._specific_template['export_additional'] = """
# Custom local parameters of a HomogeneousCorrelatedSpikeTrains
void set_schedule(vector[int])
vector[int] get_schedule()
void set_mu_list(vector[%(float_prec)s])
vector[%(float_prec)s] get_mu_list()
void set_sigma_list(vector[%(float_prec)s])
vector[%(float_prec)s] get_sigma_list()
void set_period(int)
int get_period()
""" % {'float_prec': Global.config['precision']}
self._specific_template['reset_additional'] ="""
_t = 0;
_block = 0;
r.clear();
r = std::vector<%(float_prec)s>(size, 0.0);
""" % {'float_prec': Global.config['precision']}
self._specific_template['wrapper_access_additional'] = """
# Custom local parameters of a HomogeneousCorrelatedSpikeTrains
cpdef set_schedule( self, schedule ):
pop%(id)s.set_schedule( schedule )
cpdef np.ndarray get_schedule( self ):
return np.array(pop%(id)s.get_schedule( ))
cpdef set_mu_list( self, buffer ):
pop%(id)s.set_mu_list( buffer )
cpdef np.ndarray get_mu_list( self ):
return np.array(pop%(id)s.get_mu_list( ))
cpdef set_sigma_list( self, buffer ):
pop%(id)s.set_sigma_list( buffer )
cpdef np.ndarray get_sigma_list( self ):
return np.array(pop%(id)s.get_sigma_list( ))
cpdef set_period( self, period ):
pop%(id)s.set_period(period)
cpdef int get_period(self):
return pop%(id)s.get_period()
""" % { 'id': self.id }
scheduling_block = """
if(_active){
// Check if it is time to set the input
if(_t == _schedule[_block]){
// Set the data
mu = _mu[_block];
sigma = _sigma[_block];
// Move to the next block
_block++;
// If was the last block, go back to the first block
if (_block == _schedule.size()){
_block = 0;
}
}
// If the timedarray is periodic, check if we arrive at that point
if(_period > -1 && (_t == _period-1)){
// Reset the counters
_block=0;
_t = -1;
}
// Always increment the internal time
_t++;
}
"""
update_block = """
if( _active ) {
spiked.clear();
// x += dt*(mu - x)/tau + sqrt(dt/tau) * sigma * Normal(0., 1.)
x += dt*(mu - x)/tau + rand_0*sigma*sqrt(dt/tau);
%(float_prec)s _step = 1000.0/dt;
#pragma omp simd
for(int i = 0; i < size; i++){
// p = Uniform(0.0, 1.0) * 1000.0 / dt
p[i] = _step*rand_1[i];
}
} // active
""" % {'float_prec': Global.config['precision']}
if self._has_schedule:
self._specific_template['update_variables'] = scheduling_block + update_block
else:
self._specific_template['update_variables'] = update_block
self._specific_template['size_in_bytes'] = """
// schedule
size_in_bytes += _schedule.capacity() * sizeof(int);
""" % {'float_prec': Global.config['precision']}
def _generate_omp(self):
"""
adjust code templates for the specific population for openMP.
"""
self._specific_template['declare_additional'] = """
// Custom local parameters of a HomogeneousCorrelatedSpikeTrains
std::vector< int > _schedule; // List of times where new inputs should be set
std::vector< %(float_prec)s > _mu; // buffer holding the data
std::vector< %(float_prec)s > _sigma; // buffer holding the data
int _period; // Period of cycling
long int _t; // Internal time
int _block; // Internal block when inputs are set not at each step
""" % {'float_prec': Global.config['precision']}
self._specific_template['access_additional'] = """
// Custom local parameters of a HomogeneousCorrelatedSpikeTrains
void set_schedule(std::vector<int> schedule) { _schedule = schedule; }
std::vector<int> get_schedule() { return _schedule; }
void set_mu_list(std::vector< %(float_prec)s > buffer) { _mu = buffer; mu = _mu[0]; }
std::vector< %(float_prec)s > get_mu_list() { return _mu; }
void set_sigma_list(std::vector< %(float_prec)s > buffer) { _sigma = buffer; sigma = _sigma[0]; }
std::vector< %(float_prec)s > get_sigma_list() { return _sigma; }
void set_period(int period) { _period = period; }
int get_period() { return _period; }
""" % {'float_prec': Global.config['precision']}
self._specific_template['init_additional'] = """
// Initialize counters
_t = 0;
_block = 0;
_period = -1;
"""
self._specific_template['export_additional'] = """
# Custom local parameters of a HomogeneousCorrelatedSpikeTrains
void set_schedule(vector[int])
vector[int] get_schedule()
void set_mu_list(vector[%(float_prec)s])
vector[%(float_prec)s] get_mu_list()
void set_sigma_list(vector[%(float_prec)s])
vector[%(float_prec)s] get_sigma_list()
void set_period(int)
int get_period()
""" % {'float_prec': Global.config['precision']}
self._specific_template['reset_additional'] ="""
_t = 0;
_block = 0;
r.clear();
r = std::vector<%(float_prec)s>(size, 0.0);
""" % {'float_prec': Global.config['precision']}
self._specific_template['wrapper_access_additional'] = """
# Custom local parameters of a HomogeneousCorrelatedSpikeTrains
cpdef set_schedule( self, schedule ):
pop%(id)s.set_schedule( schedule )
cpdef np.ndarray get_schedule( self ):
return np.array(pop%(id)s.get_schedule( ))
cpdef set_mu_list( self, buffer ):
pop%(id)s.set_mu_list( buffer )
cpdef np.ndarray get_mu_list( self ):
return np.array(pop%(id)s.get_mu_list( ))
cpdef set_sigma_list( self, buffer ):
pop%(id)s.set_sigma_list( buffer )
cpdef np.ndarray get_sigma_list( self ):
return np.array(pop%(id)s.get_sigma_list( ))
cpdef set_period( self, period ):
pop%(id)s.set_period(period)
cpdef int get_period(self):
return pop%(id)s.get_period()
""" % { 'id': self.id }
scheduling_block = """
if(_active){
// Check if it is time to set the input
if(_t == _schedule[_block]){
// Set the data
mu = _mu[_block];
sigma = _sigma[_block];
// Move to the next block
_block++;
// If was the last block, go back to the first block
if (_block == _schedule.size()){
_block = 0;
}
}
// If the timedarray is periodic, check if we arrive at that point
if(_period > -1 && (_t == _period-1)){
// Reset the counters
_block=0;
_t = -1;
}
// Always increment the internal time
_t++;
}
"""
update_block = """
if( _active ) {
#pragma omp single
{
spiked.clear();
// x += dt*(mu - x)/tau + sqrt(dt/tau) * sigma * Normal(0., 1.)
x += dt*(mu - x)/tau + rand_0*sigma*sqrt(dt/tau);
%(float_prec)s _step = 1000.0/dt;
#pragma omp simd
for(int i = 0; i < size; i++){
// p = Uniform(0.0, 1.0) * 1000.0 / dt
p[i] = _step*rand_1[i];
}
}
} // active
""" % {'float_prec': Global.config['precision']}
if self._has_schedule:
self._specific_template['update_variables'] = scheduling_block + update_block
else:
self._specific_template['update_variables'] = update_block
self._specific_template['size_in_bytes'] = """
// schedule
size_in_bytes += _schedule.capacity() * sizeof(int);
""" % {'float_prec': Global.config['precision']}
def _generate_cuda(self):
"""
Code generation if the CUDA paradigm is set.
"""
#
# Code for handling the buffer and schedule parameters
self._specific_template['declare_additional'] = """
// Custom local parameter HomogeneousCorrelatedSpikeTrains
std::vector< int > _schedule;
std::vector<%(float_prec)s> mu_buffer; // buffer
std::vector<%(float_prec)s> sigma_buffer; // buffer
int _period; // Period of cycling
long int _t; // Internal time
int _block; // Internal block when inputs are set not at each step
""" % {'float_prec': Global.config['precision']}
self._specific_template['access_additional'] = """
// Custom local parameter HomogeneousCorrelatedSpikeTrains
void set_schedule(std::vector<int> schedule) { _schedule = schedule; }
std::vector<int> get_schedule() { return _schedule; }
void set_mu_list(std::vector< %(float_prec)s > buffer) { mu_buffer = buffer; }
void set_sigma_list(std::vector< %(float_prec)s > buffer) { sigma_buffer = buffer; }
std::vector< %(float_prec)s > get_mu_list() { return mu_buffer; }
std::vector< %(float_prec)s > get_sigma_list() { return sigma_buffer; }
void set_period(int period) { _period = period; }
int get_period() { return _period; }
""" % {'float_prec': Global.config['precision'], 'id': self.id}
self._specific_template['init_additional'] = """
// counters
_t = 0;
_block = 0;
_period = -1;
"""
self._specific_template['reset_additional'] = """
// counters
_t = 0;
_block = 0;
"""
self._specific_template['export_additional'] = """
# Custom local parameters timed array
void set_schedule(vector[int])
vector[int] get_schedule()
void set_mu_list(vector[%(float_prec)s])
vector[%(float_prec)s] get_mu_list()
void set_sigma_list(vector[%(float_prec)s])
vector[%(float_prec)s] get_sigma_list()
void set_period(int)
int get_period()
""" % {'float_prec': Global.config['precision']}
self._specific_template['wrapper_access_additional'] = """
# Custom local parameters timed array
cpdef set_schedule( self, schedule ):
pop%(id)s.set_schedule( schedule )
cpdef np.ndarray get_schedule( self ):
return np.array(pop%(id)s.get_schedule( ))
cpdef set_mu_list( self, buffer ):
pop%(id)s.set_mu_list( buffer )
cpdef np.ndarray get_mu_list( self ):
return np.array(pop%(id)s.get_mu_list( ))
cpdef set_sigma_list( self, buffer ):
pop%(id)s.set_sigma_list( buffer )
cpdef np.ndarray get_sigma_list( self ):
return np.array(pop%(id)s.get_sigma_list( ))
cpdef set_period( self, period ):
pop%(id)s.set_period(period)
cpdef int get_periodic(self):
return pop%(id)s.get_period()
""" % { 'id': self.id, 'float_prec': Global.config['precision'] }
if not self._has_schedule:
# we can use the normal code generation for GPU kernels
pass
else:
self._specific_template['update_variables'] = """
if(_active) {
// Check if it is time to set the input
if(_t == _schedule[_block]){
// Set the data
mu = mu_buffer[_block];
sigma = sigma_buffer[_block];
// Move to the next block
_block++;
// If was the last block, go back to the first block
if ( _block == _schedule.size() ) {
_block = 0;
}
}
// If the timedarray is periodic, check if we arrive at that point
if( (_period > -1) && (_t == _period-1) ) {
// Reset the counters
_block=0;
_t = -1;
}
// Always increment the internal time
_t++;
}
"""
self._specific_template['update_variable_body'] = """
// Updating global variables of population %(id)s
__global__ void cuPop%(id)s_global_step( const long int t, const double dt, const double tau, double mu, double* x, curandState* rand_0, double sigma )
{
// x += dt*(mu - x)/tau + sqrt(dt/tau) * sigma * Normal(0., 1.)
x[0] += dt*(mu - x[0])/tau + curand_normal_double( &rand_0[0] )*sigma*sqrt(dt/tau);
}
// Updating local variables of population %(id)s
__global__ void cuPop%(id)s_local_step( const long int t, const double dt, curandState* rand_1, double* x, unsigned int* num_events, int* spiked, long int* last_spike )
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
%(float_prec)s step = 1000.0/dt;
while ( i < %(size)s )
{
// p = Uniform(0.0, 1.0) * 1000.0 / dt
%(float_prec)s p = curand_uniform_double( &rand_1[i] ) * step;
if (p < x[0]) {
int pos = atomicAdd ( num_events, 1);
spiked[pos] = i;
last_spike[i] = t;
}
i += blockDim.x;
}
__syncthreads();
}
""" % {
'id': self.id,
'size': self.size,
'float_prec': Global.config['precision']
}
self._specific_template['update_variable_header'] = """__global__ void cuPop%(id)s_global_step( const long int t, const double dt, const double tau, double mu, double* x, curandState* rand_0, double sigma );
__global__ void cuPop%(id)s_local_step( const long int t, const double dt, curandState* rand_1, double* x, unsigned int* num_events, int* spiked, long int* last_spike );
""" % {'id': self.id}
# Please notice, that the GPU kernels can be launched only with one block. Otherwise, the
# atomicAdd which is called inside the kernel is not working correct (HD: April 1st, 2021)
self._specific_template['update_variable_call'] = """
if (pop%(id)s._active) {
// Update the scheduling
pop%(id)s.update();
// Reset old events
clear_num_events<<< 1, 1, 0, pop%(id)s.stream >>>(pop%(id)s.gpu_spike_count);
#ifdef _DEBUG
cudaError_t err_clear_num_events_%(id)s = cudaGetLastError();
if (err_clear_num_events_%(id)s != cudaSuccess)
std::cout << "pop%(id)s_spike_gather: " << cudaGetErrorString(err_clear_num_events_%(id)s) << std::endl;
#endif
// compute the value of x based on mu/sigma
cuPop%(id)s_global_step<<< 1, 1, 0, pop%(id)s.stream >>>(
t, dt,
pop%(id)s.tau,
pop%(id)s.mu,
pop%(id)s.gpu_x,
pop%(id)s.gpu_rand_0,
pop%(id)s.sigma
);
#ifdef _DEBUG
cudaError_t err_pop%(id)s_global_step = cudaGetLastError();
if( err_pop%(id)s_global_step != cudaSuccess) {
std::cout << "pop%(id)s_step: " << cudaGetErrorString(err_pop%(id)s_global_step) << std::endl;
exit(0);
}
#endif
// Generate new spike events
cuPop%(id)s_local_step<<< 1, pop%(id)s._threads_per_block, 0, pop%(id)s.stream >>>(
t, dt,
pop%(id)s.gpu_rand_1,
pop%(id)s.gpu_x,
pop%(id)s.gpu_spike_count,
pop%(id)s.gpu_spiked,
pop%(id)s.gpu_last_spike
);
#ifdef _DEBUG
cudaError_t err_pop_spike_gather_%(id)s = cudaGetLastError();
if(err_pop_spike_gather_%(id)s != cudaSuccess) {
std::cout << "pop%(id)s_spike_gather: " << cudaGetErrorString(err_pop_spike_gather_%(id)s) << std::endl;
exit(0);
}
#endif
// transfer back the spike counter (needed by record)
cudaMemcpy( &pop%(id)s.spike_count, pop%(id)s.gpu_spike_count, sizeof(unsigned int), cudaMemcpyDeviceToHost);
#ifdef _DEBUG
cudaError_t err_pop%(id)s_async_copy = cudaGetLastError();
if ( err_pop%(id)s_async_copy != cudaSuccess ) {
std::cout << "record_spike_count: " << cudaGetErrorString(err_pop%(id)s_async_copy) << std::endl;
exit(0);
}
#endif
// transfer back the spiked array (needed by record)
if (pop%(id)s.spike_count > 0) {
cudaMemcpy( pop%(id)s.spiked.data(), pop%(id)s.gpu_spiked, pop%(id)s.spike_count*sizeof(int), cudaMemcpyDeviceToHost);
#ifdef _DEBUG
cudaError_t err_pop%(id)s_async_copy2 = cudaGetLastError();
if ( err_pop%(id)s_async_copy2 != cudaSuccess ) {
std::cout << "record_spike: " << cudaGetErrorString(err_pop%(id)s_async_copy2) << std::endl;
exit(0);
}
#endif
}
}
""" % {'id': self.id}
self._specific_template['size_in_bytes'] = "//TODO: "
def _instantiate(self, module):
# Create the Cython instance
self.cyInstance = getattr(module, self.class_name+'_wrapper')(self.size, self.max_delay)
def __setattr__(self, name, value):
if not hasattr(self, 'initialized'):
Population.__setattr__(self, name, value)
elif name == 'schedule':
if self.initialized:
self.cyInstance.set_schedule( np.array(value) / Global.config['dt'] )
else:
self.init['schedule'] = value
elif name == 'mu':
if self.initialized:
if self._has_schedule:
self.cyInstance.set_mu_list( value )
else:
self.cyInstance.set_global_attribute( "mu", value, Global.config["precision"] )
else:
self.init['mu'] = value
elif name == 'sigma':
if self.initialized:
if self._has_schedule:
self.cyInstance.set_sigma_list( value )
else:
self.cyInstance.set_global_attribute( "sigma", value, Global.config["precision"] )
else:
self.init['sigma'] = value
elif name == "period":
if self.initialized:
self.cyInstance.set_period(int(value /Global.config['dt']))
else:
self.init['period'] = value
elif name == 'rates':
if self._has_schedule:
value = np.array(value)
if not value.size == self.schedule.size:
Global._error("HomogeneousCorrelatedSpikeTrains: rates must have the same length as schedule.")
else:
value = np.array([float(value)])
if self.initialized:
Population.__setattr__(self, name, value)
# Correction of mu and sigma everytime r, c or tau is changed
try:
mu, sigma = self._correction(self.rates, self.corr, self.tau)
if self._has_schedule:
self.mu = mu
self.sigma = sigma
else:
self.mu = mu[0]
self.sigma = sigma[0]
except:
pass
else:
self.init[name] = value
Population.__setattr__(self, name, value)
elif name == 'corr':
if self._has_schedule:
if not isinstance(value, (list, np.ndarray)):
value = np.full((self.schedule.size, ), value)
else:
value = np.array(value)
if not value.size == self.schedule.size:
Global._error("HomogeneousCorrelatedSpikeTrains: corr must have the same length as schedule.")
else:
value = np.array([float(value)])
if self.initialized:
Population.__setattr__(self, name, value)
try:
# Correction of mu and sigma everytime r, c or tau is changed
mu, sigma = self._correction(self.rates, self.corr, self.tau)
if self._has_schedule:
self.mu = mu
self.sigma = sigma
else:
self.mu = mu[0]
self.sigma = sigma[0]
except:
pass
else:
self.init[name] = value
Population.__setattr__(self, name, value)
elif name == 'tau':
if self.initialized:
Population.__setattr__(self, name, value)
# Correction of mu and sigma everytime r, c or tau is changed
mu, sigma = self._correction(self.rates, self.corr, self.tau)
if self._has_schedule:
self.mu = mu
self.sigma = sigma
else:
self.mu = mu[0]
self.sigma = sigma[0]
else:
self.init[name] = value
Population.__setattr__(self, name, value)
else:
Population.__setattr__(self, name, value)
def __getattr__(self, name):
if name == 'schedule':
if self.initialized:
if self._has_schedule:
return Global.config['dt'] * self.cyInstance.get_schedule()
else:
return np.array([0.0])
else:
return self.init['schedule']
elif name == 'mu':
if self.initialized:
if self._has_schedule:
return self.cyInstance.get_mu_list()
else:
return self.cyInstance.get_global_attribute( "mu", Global.config["precision"] )
else:
return self.init['mu']
elif name == 'sigma':
if self.initialized:
if self._has_schedule:
return self.cyInstance.get_sigma_list()
else:
return self.cyInstance.get_global_attribute( "sigma", Global.config["precision"] )
else:
return self.init['sigma']
elif name == 'tau':
if self.initialized:
return self.cyInstance.get_global_attribute( "tau", Global.config["precision"] )
else:
return self.init['tau']
elif name == 'period':
if self.initialized:
return self.cyInstance.get_period() * Global.config['dt']
else:
return self.init['period']
else:
return Population.__getattribute__(self, name)
def _rectify(mu, corr, tau):
"""
Rectifies mu and sigma to ensure the rates are positive.
This part of the code is adapted from Brian's source code:
Copyright ENS, INRIA, CNRS
Authors: Romain Brette (brette@di.ens.fr) and Dan Goodman (goodman@di.ens.fr)
Licence: CeCILL
"""
from scipy.special import erf #pylint: disable=no-name-in-module
from scipy.optimize import newton
def _rectified_gaussian(mu, sigma):
"""
Calculates the mean and standard deviation for a rectified Gaussian distribution.
mu, sigma: parameters of the original distribution
Returns mur,sigmar: parameters of the rectified distribution
"""
a = 1. + erf(mu / (sigma * (2 ** .5)))
mur = (sigma / (2. * np.pi) ** .5) * np.exp(-0.5 * (mu / sigma) ** 2) + .5 * mu * a
sigmar = ((mu - mur) * mur + .5 * sigma ** 2 * a) ** .5
return (mur, sigmar)
mur = mu
sigmar = (corr * mu / (2. * tau/1000.)) ** .5
if sigmar == 0 * sigmar: # for unit consistency
return (mur, sigmar)
x0 = mur / sigmar
ratio = lambda u, v:u / v
f = lambda x:ratio(*_rectified_gaussian(x, 1.)) - x0
y = newton(f, x0 * 1.1) # Secant method
new_sigma = mur / (np.exp(-0.5 * y ** 2) / ((2. * np.pi) ** .5) + .5 * y * (1. + erf(y * (2 ** (-.5)))))
new_mu = y * new_sigma
return (new_mu, new_sigma)
|
vitay/ANNarchy
|
ANNarchy/core/SpecificPopulation.py
|
Python
|
gpl-2.0
| 94,487
|
[
"Brian",
"Gaussian",
"NEURON"
] |
a58245acb473bb6d0861581acb7f6736beaaf1ca1806fcbbc279eeee80a5d158
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright INRIA
# Contributors: Nicolas P. Rougier (Nicolas.Rougier@inria.fr)
#
# DANA is a computing framework for the simulation of distributed,
# asynchronous, numerical and adaptive models.
#
# This software is governed by the CeCILL license under French law and abiding
# by the rules of distribution of free software. You can use, modify and/ or
# redistribute the software under the terms of the CeCILL license as circulated
# by CEA, CNRS and INRIA at the following URL
# http://www.cecill.info/index.en.html.
#
# As a counterpart to the access to the source code and rights to copy, modify
# and redistribute granted by the license, users are provided only with a
# limited warranty and the software's author, the holder of the economic
# rights, and the successive licensors have only limited liability.
#
# In this respect, the user's attention is drawn to the risks associated with
# loading, using, modifying and/or developing or reproducing the software by
# the user in light of its specific status of free software, that may mean that
# it is complicated to manipulate, and that also therefore means that it is
# reserved for developers and experienced professionals having in-depth
# computer knowledge. Users are therefore encouraged to load and test the
# software's suitability as regards their requirements in conditions enabling
# the security of their systems and/or data to be ensured and, more generally,
# to use and operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# -----------------------------------------------------------------------------
""" Some useful functions """
import numpy as np
import scipy.linalg
import scipy.sparse as sp
from scipy.ndimage.filters import convolve
from group import Group
def best_fft_shape(shape):
"""
From fftw.org:
FFTW is best at handling sizes of the form 2^a*3^b*5^c*7^d*11^e*13^f,
where e+f is either 0 or 1,
This function finds the best shape for computing fftw
"""
base = [13,11,7,5,3,2]
def factorize(n):
if not n:
raise(RuntimeError, "Length n must be positive integer")
elif n == 1:
return [1,]
factors = []
for b in base:
while n % b == 0:
n /= b
factors.append(b)
if n == 1:
return factors
return []
def is_optimal(n):
factors = factorize(n)
return len(factors) > 0 \
and factors[:2] not in [[13,13],[13,11],[11,11]]
shape = np.atleast_1d(np.array(shape))
for i in range(shape.size):
while not is_optimal(shape[i]):
shape[i] += 1
return shape.astype(int)
def convolve1d(Z, K, toric=False):
""" Discrete, clamped, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_.
In probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
:param array Z:
One-dimensional array.
:param array K:
One-dimensional array.
:param bool toric:
Indicate whether convolution should be considered toric
:return:
Discrete, clamped, linear convolution of `Z` and `K`.
**Note**
The discrete convolution operation is defined as
.. math:: (f * g)[n] = \sum_{m = -\infty}^{\infty} f[m] g[n-m]
**References**
.. [1] Wikipedia, "Convolution",
http://en.wikipedia.org/wiki/Convolution.
"""
if toric:
return convolve(Z,K,mode='wrap')
else:
return convolve(Z,K,mode='constant')
#return convolve(Z,K,mode='wrap')
# R = np.convolve(Z, K, 'same')
# i0 = 0
# if R.shape[0] > Z.shape[0]:
# i0 = (R.shape[0]-Z.shape[0])/2 + 1 - Z.shape[0]%2
# i1 = i0+ Z.shape[0]
# return R[i0:i1]
def convolve2d(Z, K, USV = None, toric=False):
""" Discrete, clamped convolution of two two-dimensional arrays.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_.
In probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions. If the kernel K is separable, it is decomposed using a
singular value decomposition [2]_ and the computing is optimized
accordingly (when rank n is inferior to S.size/2)
:param array Z:
Two-dimensional array.
:param array K:
Two-dimensional array.
:param tuple USV
(U,S,V) as a result of scipy.linalg.svd(K).
:param bool toric:
Indicate whether convolution should be considered toric
:return:
Discrete, clamped, linear convolution of `Z` and `K`.
**Note**
The discrete convolution operation is defined as
.. math:: (f * g)[n] = \sum_{m = -\infty}^{\infty} f[m] g[n-m]
**References**
.. [1] Wikipedia, "Convolution",
http://en.wikipedia.org/wiki/Convolution.
.. [2] Wikipedia, "Singular Value Decomposition",
http://en.wikipedia.org/wiki/Singular_value_decomposition."
"""
if USV is None:
U,S,V = scipy.linalg.svd(K)
U,S,V = U.astype(K.dtype), S.astype(K.dtype), V.astype(K.dtype)
else:
U,S,V = USV
n = (S > 1e-12).sum()
# n = (S > 0).sum()
R = np.zeros( Z.shape )
for k in range(n):
Zt = Z.copy() * S[k]
for i in range(Zt.shape[0]):
Zt[i,:] = convolve1d(Zt[i,:], V[k,::-1], toric)
for i in range(Zt.shape[1]):
Zt[:,i] = convolve1d(Zt[:,i], U[::-1,k], toric)
R += Zt
return R
def extract(Z, shape, position, fill=0):
""" Extract a sub-array from Z using given shape and centered on position.
If some part of the sub-array is out of Z bounds, result will be padded
with fill value.
**Parameters**
`Z` : array_like
Input array.
`shape` : tuple
Shape of the output array
`position` : tuple
Position within Z
`fill` : scalar
Fill value
**Returns**
`out` : array_like
Z slice with given shape and center
**Examples**
>>> Z = np.arange(0,16).reshape((4,4))
>>> extract(Z, shape=(3,3), position=(0,0))
[[ NaN NaN NaN]
[ NaN 0. 1.]
[ NaN 4. 5.]]
Schema:
+-----------+
| 0 0 0 | = extract (Z, shape=(3,3), position=(0,0))
| +---------------+
| 0 | 0 1 | 2 3 | = Z
| | | |
| 0 | 4 5 | 6 7 |
+---|-------+ |
| 8 9 10 11 |
| |
| 12 13 14 15 |
+---------------+
>>> Z = np.arange(0,16).reshape((4,4))
>>> extract(Z, shape=(3,3), position=(3,3))
[[ 10. 11. NaN]
[ 14. 15. NaN]
[ NaN NaN NaN]]
Schema:
+---------------+
| 0 1 2 3 | = Z
| |
| 4 5 6 7 |
| +-----------+
| 8 9 |10 11 | 0 | = extract (Z, shape=(3,3), position=(3,3))
| | | |
| 12 13 |14 15 | 0 |
+---------------+ |
| 0 0 0 |
+-----------+
"""
# assert(len(position) == len(Z.shape))
# if len(shape) < len(Z.shape):
# shape = shape + Z.shape[len(Z.shape)-len(shape):]
R = np.ones(shape, dtype=Z.dtype)*fill
P = np.array(list(position)).astype(int)
Rs = np.array(list(R.shape)).astype(int)
Zs = np.array(list(Z.shape)).astype(int)
R_start = np.zeros((len(shape),)).astype(int)
R_stop = np.array(list(shape)).astype(int)
Z_start = (P-Rs//2)
Z_stop = (P+Rs//2)+Rs%2
R_start = (R_start - np.minimum(Z_start,0)).tolist()
Z_start = (np.maximum(Z_start,0)).tolist()
#R_stop = (R_stop - np.maximum(Z_stop-Zs,0)).tolist()
R_stop = np.maximum(R_start, (R_stop - np.maximum(Z_stop-Zs,0))).tolist()
Z_stop = (np.minimum(Z_stop,Zs)).tolist()
r = [slice(start,stop) for start,stop in zip(R_start,R_stop)]
z = [slice(start,stop) for start,stop in zip(Z_start,Z_stop)]
R[r] = Z[z]
return R
def convolution_matrix(src, dst, kernel, toric=False):
"""
Build a sparse convolution matrix M such that:
(M*src.ravel()).reshape(src.shape) = convolve2d(src,kernel)
You can specify whether convolution is toric or not and specify a different
output shape. If output (dst) is different, convolution is only applied at
corresponding normalized location within the src array.
Building the matrix can be pretty long if your kernel is big but it can
nonetheless saves you some time if you need to apply several convolution
compared to fft convolution (no need to go to the Fourier domain).
Parameters:
-----------
src : n-dimensional numpy array
Source shape
dst : n-dimensional numpy array
Destination shape
kernel : n-dimensional numpy array
Kernel to be used for convolution
Returns:
--------
A sparse convolution matrix
Examples:
---------
>>> Z = np.ones((3,3))
>>> M = convolution_matrix(Z,Z,Z,True)
>>> print (M*Z.ravel()).reshape(Z.shape)
[[ 9. 9. 9.]
[ 9. 9. 9.]
[ 9. 9. 9.]]
>>> M = convolution_matrix(Z,Z,Z,False)
>>> print (M*Z.ravel()).reshape(Z.shape)
[[ 4. 6. 4.]
[ 6. 9. 6.]
[ 4. 6. 4.]]
"""
# For a toric connection, it is wrong to have a kernel larger
# than the source
# if toric:
# shape = np.minimum(np.array(src.shape), np.array(kernel.shape))
# kernel = extract(kernel, shape, np.rint(np.array(kernel.shape)/2.))
# Get non NaN value from kernel and their indices.
nz = (1 - np.isnan(kernel)).nonzero()
data = kernel[nz].ravel()
indices = [0,]*(len(kernel.shape)+1)
indices[0] = np.array(nz)
indices[0] += np.atleast_2d((np.array(src.shape)//2 - np.array(kernel.shape)//2)).T
# Generate an array A for a given shape such that given an index tuple I,
# we can translate into a flat index F = (I*A).sum()
to_flat_index = np.ones((len(src.shape),1), dtype=int)
if len(src.shape) > 1:
to_flat_index[:-1] = src.shape[1]
R, C, D = [], [], []
dst_index = 0
src_indices = []
# Translate target tuple indices into source tuple indices taking care of
# possible scaling (this is done by normalizing indices)
for i in range(len(src.shape)):
z = np.rint((np.linspace(0,1,dst.shape[i])*(src.shape[i]-1))).astype(int)
src_indices.append(z)
nd = [0,]*(len(kernel.shape))
for index in np.ndindex(dst.shape):
dims = []
# Are we starting a new dimension ?
if not index[-1]:
for i in range(len(index)-1,0,-1):
if index[i]: break
dims.insert(0,i-1)
dims.append(len(dst.shape)-1)
for dim in dims:
i = index[dim]
if toric:
z = (indices[dim][dim] - src.shape[dim]//2 +(kernel.shape[dim]+1)%2 + src_indices[dim][i]) % src.shape[dim]
else:
z = (indices[dim][dim] - src.shape[dim]//2 +(kernel.shape[dim]+1)%2 + src_indices[dim][i])
# if toric:
# z = (indices[dim][dim] - src.shape[dim]/2.0 -(kernel.shape[dim]+1)%2 + src_indices[dim][i]) % src.shape[dim]
# else:
# z = (indices[dim][dim] - src.shape[dim]/2.0 -(kernel.shape[dim]+1)%2+ src_indices[dim][i])
n = np.where((z >= 0)*(z < src.shape[dim]))[0]
if not dim:
nd[dim] = n.copy()
else:
nd[dim] = nd[dim-1][n]
indices[dim+1] = np.take(indices[dim], n, 1)
indices[dim+1][dim] = z[n]
dim = len(dst.shape)-1
z = indices[dim+1]
R.extend( [dst_index,]*len(z[0]) )
C.extend( (z*to_flat_index).sum(0).tolist() )
D.extend( data[nd[-1]].tolist() )
dst_index += 1
return sp.coo_matrix( (D,(R,C)), (dst.size,src.size))
#Z = np.zeros((dst.size,src.size))
#Z[R,C] = D
#return Z
# #nz = kernel.nonzero()
# nz = (1 - np.isnan(kernel)).nonzero()
# data = kernel[nz].flatten()
# indices = [0,]*(len(kernel.shape)+1)
# indices[0] = np.array(nz)
# indices[0] += np.atleast_2d((np.array(src.shape)//2 - np.array(kernel.shape)//2)).T
# to_flat_index = np.ones((len(src.shape),1), dtype=int)
# to_flat_index[:-1] = src.shape[:-1]
# R, C, D = [], [], []
# dst_index = 0
# src_indices = []
# for i in range(len(src.shape)):
# z = np.rint((np.linspace(0,1,dst.shape[i])*(src.shape[i]-1))).astype(int)
# src_indices.append(z)
# for index in np.ndindex(dst.shape):
# dims = []
# if index[-1] == 0:
# for i in range(len(index)-1,0,-1):
# if index[i]: break
# dims.insert(0,i-1)
# dims.append(len(dst.shape)-1)
# for dim in dims:
# i = index[dim]
# if toric:
# z = (indices[dim][dim] - src.shape[dim]//2 + src_indices[dim][i]) % src.shape[dim]
# else:
# z = (indices[dim][dim] - src.shape[dim]//2 + src_indices[dim][i])
# n = np.where((z >= 0)*(z < src.shape[dim]))[0]
# indices[dim+1] = np.take(indices[dim], n, 1)
# indices[dim+1][dim] = z[n]
# dim = len(dst.shape)-1
# z = indices[dim+1]
# R.extend( [dst_index,]*len(z[0]) )
# C.extend( (z*to_flat_index).sum(0).tolist() )
# D.extend( data[n].tolist() )
# dst_index += 1
# return sp.coo_matrix( (D,(R,C)), (dst.size,src.size))
def gaussian(shape=(25,25), width=0.5, center=0.0):
""" Generate a gaussian of the form g(x) = height*exp(-(x-center)**2/width**2).
**Parameters**
shape: tuple of integers
Shape of the output array
width: float or tuple of float
Width of gaussian
center: float or tuple of float
Center of gaussian
**Returns**
a numpy array of specified shape containing a gaussian
"""
if type(shape) in [float,int]:
shape = (shape,)
if type(width) in [float,int]:
width = (width,)*len(shape)
if type(center) in [float,int]:
center = (center,)*len(shape)
grid=[]
for size in shape:
grid.append (slice(0,size))
C = np.mgrid[tuple(grid)]
R = np.zeros(shape)
for i,size in enumerate(shape):
if shape[i] > 1:
R += (((C[i]/float(size-1))*2 - 1 - center[i])/width[i])**2
return np.exp(-R/2)
def empty(shape, dtype=float):
"""
Return a new group of given shape and type, without initialising entries.
:param tuple shape:
Shape of the new group, e.g., ``(2, 3)`` or ``2``.
:param dtype:
The desired data-type for the group, e.g., `np.int8`. Default is
`np.float64`.
:return:
Group with the given shape and dtype filled with zeros.
**Notes**
`empty`, unlike `zeros`, does not set the group values to zero, and may
therefore be marginally faster. On the other hand, it requires the user
to manually set all the values in the group, and should be used with
caution.
**Examples**
>>> Group.empty((2,2))
Group([[6.94248367807e-310, 1.34841898023e-316],
[1.34841977073e-316, 0.0]],
dtype=[('f0', '<f8')])
**See also**
* :meth:`dana.zeros` : Return a new group setting values to zero.
* :meth:`dana.ones` : Return a new group setting values to one.
* :meth:`dana.zeros_like` : Return a group of zeros with shape and type of input.
* :meth:`dana.ones_like` : Return a group of ones with shape and type of input.
* :meth:`dana.empty_like` : Return a empty group with shape and type of input.
"""
return Group(shape=shape, dtype=dtype, fill=None)
def zeros(shape, dtype=float):
"""
Return a new group of given shape and type, filled with zeros.
:param tuple shape:
Shape of the new group, e.g., ``(2, 3)`` or ``2``.
:param dtype:
The desired data-type for the group, e.g., `np.int8`. Default is
`np.float64`.
:return:
Group with the given shape and dtype filled with zeros.
**Examples**
>>> dana.zeros((2,2))
Group([[0.0, 0.0],
[0.0, 0.0]],
dtype=[('f0', '<f8')])
>>> dana.zeros((2,2), dtype=int)
Group([[0, 0],
[0, 0]],
dtype=[('f0', '<f8')])
**See also**
* :meth:`dana.ones` : Return a new group setting values to one.
* :meth:`dana.empty` : Return a new uninitialized group.
* :meth:`dana.zeros_like` : Return an group of zeros with shape and type of input.
* :meth:`dana.ones_like` : Return an group of ones with shape and type of input.
* :meth:`dana.empty_like` : Return an empty group with shape and type of input.
"""
return Group(shape=shape, dtype=dtype, fill=0)
def ones(shape, dtype=float):
"""
Return a new group of given shape and type, filled with ones.
:param tuple shape:
Shape of the new group, e.g., ``(2, 3)`` or ``2``.
:param dtype:
The desired data-type for the group, e.g., `np.int8`. Default is
`np.float64`.
:return:
Group with the given shape and dtype filled with zeros.
**Examples**
>>> dana.ones((2,2))
Group([[1.0, 1.0],
[1.0, 1.0]],
dtype=[('f0', '<f8')])
>>> dana.ones((2,2), dtype=int)
Group([[1, 1],
[1, 1]],
dtype=[('f0', '<f8')])
**See also**
* :meth:`dana.zeros` : Return a new group setting values to zero.
* :meth:`dana.empty` : Return a new uninitialized group.
* :meth:`dana.zeros_like` : Return an group of zeros with shape and type of input.
* :meth:`dana.ones_like` : Return an group of ones with shape and type of input.
* :meth:`dana.empty_like` : Return an empty group with shape and type of input.
"""
return Group(shape=shape, dtype=dtype, fill=1)
def empty_like(other):
"""
Create a new group with the same shape and type as another.
:param array other:
The shape and data-type of `other` defines the parameters of the
returned group.
:return:
Uninitialized group with same shape and type as `other`.
**Examples**
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
**See also**
* :meth:`dana.zeros` : Return a new group setting values to zero.
* :meth:`dana.ones` : Return a new group setting values to one.
* :meth:`dana.empty` : Return a new uninitialized group.
* :meth:`dana.ones_like` : Return a group of ones with shape and type of input.
* :meth:`dana.zeros_like` : Return a group of zeros with shape and type of input.
"""
return Group(shape=other.shape, dtype=other.dtype, fill=None)
def zeros_like(other):
"""
Create a new group of zeros with the same shape and type as another.
:param array other:
The shape and data-type of `other` defines the parameters of the
returned group.
:return:
Group of zeros with same shape and type as `other`.
**Examples**
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
**See also**
* :meth:`dana.zeros` : Return a new group setting values to zero.
* :meth:`dana.ones` : Return a new group setting values to one.
* :meth:`dana.empty` : Return a new uninitialized group.
* :meth:`dana.empty_like` : Return an uninitialized group shape and type of input.
* :meth:`dana.ones_like` : Return a group of ones with shape and type of input.
"""
return Group(shape=other.shape, dtype=other.dtype, fill=0)
def ones_like(other):
"""
Returns a group of ones with the same shape and type as a given array.
:param array other:
The shape and data-type of `other` defines the parameters of the
returned group.
:return:
Group of ones with same shape and type as `other`.
**Examples**
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> zeros_like(x)
group([[0, 0, 0],
[0, 0, 0]])
**See also**
* :meth:`dana.zeros` : Return a new group setting values to zero.
* :meth:`dana.ones` : Return a new group setting values to one.
* :meth:`dana.empty` : Return a new uninitialized group.
* :meth:`dana.empty_like` : Return an empty group with shape and type of input.
* :meth:`dana.zeros_like` : Return a group of zeros with shape and type of input.
"""
return Group(shape=other, dtype=other.dtype, fill=1)
|
rougier/dana
|
dana/functions.py
|
Python
|
bsd-3-clause
| 21,860
|
[
"Gaussian"
] |
91e1038b0f757507c182be0d77c6ba8401ca156078a5bc4ed9903d42aac00080
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2017 The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import bpy
from bpy.types import NodeSocket, Node
from ...util import asUpdate
from ..materials import AppleseedMatLayerProps
from . import AppleseedNode, AppleseedSocket
class AppleseedDiffuseBTDFTransmittanceSocket(NodeSocket, AppleseedSocket):
bl_idname = "AppleseedDiffuseBTDFTransmittance"
bl_label = "Transmittance"
socket_value = AppleseedMatLayerProps.transmittance_color
def draw(self, context, layout, node, text):
if self.is_output or self.is_linked:
layout.label(text)
else:
layout.prop(self, "socket_value", text=text)
def draw_color(self, context, node):
return 0.8, 0.8, 0.5, 1.0
class AppleseedDiffuseBTDFMultiplierSocket(NodeSocket, AppleseedSocket):
bl_idname = "AppleseedDiffuseBTDFMultiplier"
bl_label = "Multiplier"
socket_value = AppleseedMatLayerProps.transmittance_multiplier
def draw(self, context, layout, node, text):
if self.is_output or self.is_linked:
layout.label(text)
else:
layout.prop(self, "socket_value", text=text)
def draw_color(self, context, node):
return 0.5, 0.5, 0.5, 1.0
class AppleseedDiffuseBTDFNode(Node, AppleseedNode):
bl_idname = "AppleseedDiffuseBTDFNode"
bl_label = "Diffuse BTDF"
bl_icon = 'SMOOTH'
node_type = 'diffuse_btdf'
def init(self, context):
self.inputs.new('AppleseedDiffuseBTDFTransmittance', "Transmittance")
self.inputs.new('AppleseedDiffuseBTDFMultiplier', "Multiplier")
self.outputs.new('NodeSocketShader', "BTDF")
def draw_buttons(self, context, layout):
pass
def draw_buttons_ext(self, context, layout):
pass
def copy(self, node):
pass
def free(self):
asUpdate("Removing node ", self)
def draw_label(self):
return self.bl_label
def register():
bpy.utils.register_class(AppleseedDiffuseBTDFMultiplierSocket)
bpy.utils.register_class(AppleseedDiffuseBTDFTransmittanceSocket)
bpy.utils.register_class(AppleseedDiffuseBTDFNode)
def unregister():
bpy.utils.unregister_class(AppleseedDiffuseBTDFNode)
bpy.utils.unregister_class(AppleseedDiffuseBTDFMultiplierSocket)
bpy.utils.unregister_class(AppleseedDiffuseBTDFTransmittanceSocket)
|
jasperges/blenderseed
|
properties/nodes/diffuse_btdf.py
|
Python
|
mit
| 3,573
|
[
"VisIt"
] |
5dee8b6df6885061e70c2391e82da9be12410d5cba758973c45e815ae589252c
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
def GetRGBColor(colorName):
'''
Return the red, green and blue components for a
color as doubles.
'''
rgb = [0.0, 0.0, 0.0] # black
vtk.vtkNamedColors().GetColorRGB(colorName, rgb)
return rgb
# Define loop to clip with
#
selectionPoints = vtk.vtkPoints()
selectionPoints.InsertPoint(0, -0.16553, 0.135971, 0.451972)
selectionPoints.InsertPoint(1, -0.0880123, -0.134952, 0.4747)
selectionPoints.InsertPoint(2, 0.00292618, -0.134604, 0.482459)
selectionPoints.InsertPoint(3, 0.0641941, 0.067112, 0.490947)
selectionPoints.InsertPoint(4, 0.15577, 0.0734765, 0.469245)
selectionPoints.InsertPoint(5, 0.166667, -0.129217, 0.454622)
selectionPoints.InsertPoint(6, 0.241259, -0.123363, 0.420581)
selectionPoints.InsertPoint(7, 0.240334, 0.0727106, 0.432555)
selectionPoints.InsertPoint(8, 0.308529, 0.0844311, 0.384357)
selectionPoints.InsertPoint(9, 0.32672, -0.121674, 0.359187)
selectionPoints.InsertPoint(10, 0.380721, -0.117342, 0.302527)
selectionPoints.InsertPoint(11, 0.387804, 0.0455074, 0.312375)
selectionPoints.InsertPoint(12, 0.43943, -0.111673, 0.211707)
selectionPoints.InsertPoint(13, 0.470984, -0.0801913, 0.147919)
selectionPoints.InsertPoint(14, 0.436777, 0.0688872, 0.233021)
selectionPoints.InsertPoint(15, 0.44874, 0.188852, 0.109882)
selectionPoints.InsertPoint(16, 0.391352, 0.254285, 0.176943)
selectionPoints.InsertPoint(17, 0.373274, 0.154162, 0.294296)
selectionPoints.InsertPoint(18, 0.274659, 0.311654, 0.276609)
selectionPoints.InsertPoint(19, 0.206068, 0.31396, 0.329702)
selectionPoints.InsertPoint(20, 0.263789, 0.174982, 0.387308)
selectionPoints.InsertPoint(21, 0.213034, 0.175485, 0.417142)
selectionPoints.InsertPoint(22, 0.169113, 0.261974, 0.390286)
selectionPoints.InsertPoint(23, 0.102552, 0.25997, 0.414814)
selectionPoints.InsertPoint(24, 0.131512, 0.161254, 0.454705)
selectionPoints.InsertPoint(25, 0.000192443, 0.156264, 0.475307)
selectionPoints.InsertPoint(26, -0.0392091, 0.000251724, 0.499943)
selectionPoints.InsertPoint(27, -0.096161, 0.159646, 0.46438)
sphere = vtk.vtkSphereSource()
sphere.SetPhiResolution(50)
sphere.SetThetaResolution(100)
sphere.SetStartPhi(0)
sphere.SetEndPhi(90)
loop = vtk.vtkSelectPolyData()
loop.SetInputConnection(sphere.GetOutputPort())
loop.SetLoop(selectionPoints)
loop.GenerateSelectionScalarsOn()
# negative scalars inside
loop.SetSelectionModeToSmallestRegion()
# clips out positive region
clip = vtk.vtkClipPolyData()
clip.SetInputConnection(loop.GetOutputPort())
clipMapper = vtk.vtkPolyDataMapper()
clipMapper.SetInputConnection(clip.GetOutputPort())
clipActor = vtk.vtkLODActor()
clipActor.SetMapper(clipMapper)
loop2 = vtk.vtkSelectPolyData()
loop2.SetInputConnection(sphere.GetOutputPort())
loop2.SetLoop(selectionPoints)
loop2.SetSelectionModeToSmallestRegion()
selectMapper = vtk.vtkPolyDataMapper()
selectMapper.SetInputConnection(loop2.GetOutputPort())
selectActor = vtk.vtkLODActor()
selectActor.SetMapper(selectMapper)
selectActor.AddPosition(1, 0, 0)
selectActor.GetProperty().SetColor(GetRGBColor('peacock'))
# Create graphics stuff
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(clipActor)
ren1.AddActor(selectActor)
ren1.SetBackground(.1, .2, .4)
renWin.SetSize(500, 250)
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(0.236644, 11.8322)
cam1.SetFocalPoint(0.542809, -0.0166201, 0.183931)
cam1.SetPosition(1.65945, 0.364443, 2.29141)
cam1.SetViewUp(-0.0746604, 0.986933, -0.14279)
iren.Initialize()
# render the image
#iren.Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Core/Testing/Python/cutLoop.py
|
Python
|
gpl-3.0
| 3,804
|
[
"VTK"
] |
fab72eb57fe8d6b68ea655659d6a741f9e5cacd8388a8b2c72dc281d45109360
|
#!/usr/bin/env python3
import argparse
import os
import re
from lib.coordreaders import PDBReader, GROReader
from lib.moldatabase import MolDatabase
from lib.atomdatabase import AtomDatabase
from lib.bonddatabase import BondDatabase
class NonMatchingAtomException(Exception):
def __init__(self, num, name1, name2):
line = "Atom {0} in coordinate file ({1}) does not match atom in force field ({2})."
super(NonMatchingAtomException, self).__init__(line.format(num, name1, name2))
class PolymerError(Exception):
def __init__(self, name1, name2):
line = "Molecules {0} and {1} do not have matching polymer types"
super(PolymerError, self).__init__(line.format(name1, name2))
class Counter:
__slots__ = ["total", "types"]
def __init__(self, total=0, types=0):
self.total = total
self.types = types
def __repr__(self):
return "<Counter: (total={0}, types={1})>".format(self.total, self.types)
class PDB2LMP:
def __init__(self, infile, moldb=None, atomdb=None, bonddb=None):
formats = {"pdb": PDBReader,
"gro": GROReader}
try:
ext = os.path.splitext(infile)[1][1:]
coords = formats[ext](infile)
except KeyError as e:
e.args = ("File extension '{0}' not recognised".format(ext),)
raise
self.coords = coords
self.moldb = MolDatabase() if moldb is None else moldb
self.atomdb = AtomDatabase() if atomdb is None else atomdb
self.bonddb = BondDatabase() if bonddb is None else bonddb
self.moltypes = []
self.atomtypes = []
self.lentypes = []
self.angtypes = []
self.dihtypes = []
self.imptypes = []
self.lenstyles = []
self.angstyles = []
self.dihstyles = []
self.impstyles = []
self.natoms = Counter()
self.nlengths = Counter()
self.nangles = Counter()
self.ndihedrals = Counter()
self.nimpropers = Counter()
def collect_types(self, add_water=True, allow_atom_subset=False):
"""
Collect all bead and bond types used in simulation.
Args:
add_water: Add water bead type even if not present in input coordinates?
allow_atom_subset: Allow converting only a subset of the atoms in coordinate file
"""
def collect_type(values, counter, db_vals, typelist, stylelist, nextmol_name):
for val in values:
try:
if re.fullmatch(val.ifnext, nextmol_name) is None:
continue
except AttributeError:
pass
except TypeError:
continue
counter.total += 1
if val.type not in typelist:
typelist.append(val.type)
counter.types += 1
if db_vals[val.type].style not in stylelist:
stylelist.append(db_vals[val.type].style)
atnum = 0
for i, mol in enumerate(self.coords.molecules):
dbmol = self.moldb.molecules[mol.name]
try:
nextmol_name = self.coords.molecules[i+1].name
except IndexError:
nextmol_name = None
if mol.name not in self.moltypes:
self.moltypes.append(mol.name)
collect_type(dbmol.lengths, self.nlengths, self.bonddb.length,
self.lentypes, self.lenstyles, nextmol_name)
collect_type(dbmol.angles, self.nangles, self.bonddb.angle,
self.angtypes, self.angstyles, nextmol_name)
collect_type(dbmol.dihedrals, self.ndihedrals, self.bonddb.dihedral,
self.dihtypes, self.dihstyles, nextmol_name)
collect_type(dbmol.impropers, self.nimpropers, self.bonddb.improper,
self.imptypes, self.impstyles, nextmol_name)
coordfile_atoms = [self.coords.atoms[x] for x in mol.atoms]
if allow_atom_subset:
if len(coordfile_atoms) < len(dbmol.atoms):
raise ValueError("Number of atoms is greater in coordinate file ({0}) than force field ({1}) for molecule {2}.".format(len(coordfile_atoms), len(dbmol.atoms), mol.name))
else:
if len(coordfile_atoms) != len(dbmol.atoms):
raise ValueError("Number of atoms does not match between coordinate file ({0}) and force field ({1}) for molecule {2}.".format(len(coordfile_atoms), len(dbmol.atoms), mol.name))
# Convert atoms from coordinate file that are present in database
for coordfile_atom in coordfile_atoms:
try:
dbmol_atom = dbmol.atoms[coordfile_atom.name]
except KeyError:
if allow_atom_subset:
continue
raise
if dbmol_atom.type not in self.atomtypes:
self.atomtypes.append(dbmol_atom.type)
self.natoms.types += 1
self.natoms.total += 1
atnum += 1
if add_water and "WAT" not in self.atomtypes:
self.atomtypes.append("WAT")
self.natoms.types += 1
def populate_pdb_data(self):
for mol in self.moldb.molecules.values():
for atom in mol.atoms.values():
atom.populate(self.atomdb.atoms[atom.type])
for atom in self.coords.atoms:
atom.populate(self.moldb.molecules[atom.resname].atoms[atom.name])
def write_data(self, filename):
with open(filename, "w") as data:
print("LAMMPS 'data.' input file created by PDB2LMP", file=data)
print(file=data)
print("{0:8d} atoms".format(self.natoms.total), file=data)
print("{0:8d} bonds".format(self.nlengths.total), file=data)
print("{0:8d} angles".format(self.nangles.total), file=data)
print("{0:8d} dihedrals".format(self.ndihedrals.total), file=data)
print("{0:8d} impropers".format(self.nimpropers.total), file=data)
print(file=data)
print("{0:8d} atom types".format(self.natoms.types), file=data)
print("{0:8d} bond types".format(self.nlengths.types), file=data)
print("{0:8d} angle types".format(self.nangles.types), file=data)
print("{0:8d} dihedral types".format(self.ndihedrals.types), file=data)
print("{0:8d} improper types".format(self.nimpropers.types), file=data)
print(file=data)
cell = [val / 2 for val in self.coords.cell]
if cell == [0, 0, 0]:
print("WARNING: The simulation box/unit cell size is zero.")
print(" If this is not intentional, please check your input files.")
print("{0:8.3f} {1:8.3f} xlo xhi".format(-cell[0], cell[0]), file=data)
print("{0:8.3f} {1:8.3f} ylo yhi".format(-cell[1], cell[1]), file=data)
print("{0:8.3f} {1:8.3f} zlo zhi".format(-cell[2], cell[2]), file=data)
print(file=data)
print("Atoms", file=data)
print(file=data)
atomline = "{0:6d} {1:4d} {2:8.3f} {3:8.3f} {4:8.3f} {5:4d} {6:5.2f} {7:8.3f} {8:8.3f} {9:8.3f} {10:5.2f} {11:5.2f}"
for i, atom in enumerate(self.coords.atoms, start=1):
# Write atom line
# Dipoles are all oriented up - this should equilibrate out quickly
print(atomline.format(i, self.atomtypes.index(atom.type)+1,
atom.x, atom.y, atom.z, atom.resid, atom.charge,
atom.dipole, 0, 0, atom.diameter, atom.rotmass), file=data)
def write_bonds(n, types, header):
if n <= 0:
return
print("\n" + header + "\n", file=data)
i = 1
for ii, mol in enumerate(self.coords.molecules):
mol_db = self.moldb.molecules[mol.name]
try:
nextmol_name = self.coords.molecules[ii+1].name
except IndexError:
nextmol_name = None
atom_list = list(mol_db.atoms.keys())
for bond in getattr(mol_db, header.lower()):
try:
if re.fullmatch(bond.ifnext, nextmol_name) is None:
continue
except AttributeError:
pass
except TypeError:
continue
print("{0:6d} {1:4d}".format(i, types.index(bond.type) + 1), file=data, end="")
for atom in bond.atoms:
try:
atom_num = mol.atoms[atom_list.index(atom)]
except ValueError:
if atom.startswith("+"):
other_mol = self.coords.molecules[ii + 1]
elif atom.startswith("-"):
other_mol = self.coords.molecules[ii - 1]
else:
raise
other_mol_db = self.moldb.molecules[other_mol.name]
try:
if not mol_db.polymer_type.intersection(other_mol_db.polymer_type):
raise PolymerError(mol.name, other_mol.name) from None
except AttributeError:
raise PolymerError(mol.name, other_mol.name) from None
other_atom_list = list(other_mol_db.atoms.keys())
atom_num = other_mol.atoms[other_atom_list.index(atom[1:])]
print(" {0:6d}".format(atom_num + 1), file=data, end="")
print(file=data)
i += 1
write_bonds(self.nlengths.total, self.lentypes, "Bonds")
write_bonds(self.nangles.total, self.angtypes, "Angles")
write_bonds(self.ndihedrals.total, self.dihtypes, "Dihedrals")
write_bonds(self.nimpropers.total, self.imptypes, "Impropers")
def write_forcefield(self, filename):
with open(filename, "w") as ff:
print("# Forcefield prepared by PDB2LMP", file=ff)
print(file=ff)
# TODO change these to "0.0 1.0 1.0 12.0" - ELBA standard
print("pair_style lj/sf/dipole/sf 12.0", file=ff)
print("special_bonds lj/coul 0.0 0.0 0.0", file=ff)
print(file=ff)
def write_styles(styles, header):
if styles:
print(header, file=ff, end="")
for style in styles:
print(" " + style, file=ff, end="")
print(file=ff)
write_styles(self.lenstyles, "bond_style hybrid")
write_styles(self.angstyles, "angle_style hybrid")
write_styles(self.dihstyles, "dihedral_style hybrid")
write_styles(self.impstyles, "improper_style hybrid")
print(file=ff)
line = "mass {0:4d} {1:8.3f} # {2}"
for i, atomtype in enumerate(self.atomtypes, start=1):
print(line.format(i, self.atomdb.atoms[atomtype].mass, atomtype), file=ff)
print(file=ff)
line = "set type{0:4d} diameter {1:8.3f} # {2}"
for i, atomtype in enumerate(self.atomtypes, start=1):
print(line.format(i, self.atomdb.atoms[atomtype].diameter, atomtype), file=ff)
print(file=ff)
line = "pair_coeff {0:4d} {1:4d} {2:6.3f} {3:6.3f} # {4}-{5}"
for i, atomtype in enumerate(self.atomtypes, start=1):
for j, atomtype2 in enumerate(self.atomtypes, start=1):
if i > j:
continue
sig, eps = self.atomdb.lj(atomtype, atomtype2)
print(line.format(i, j, eps, sig, atomtype, atomtype2), file=ff)
def write_types(types, db_vals, line_prefix):
if types:
print(file=ff)
line = line_prefix + " {0:4d} {1} {2} # {3}"
for i, tipe in enumerate(types, start=1):
db_type = db_vals[tipe]
print(line.format(i, db_type.style, db_type.params, tipe), file=ff)
write_types(self.lentypes, self.bonddb.length, "bond_coeff")
write_types(self.angtypes, self.bonddb.angle, "angle_coeff")
write_types(self.dihtypes, self.bonddb.dihedral, "dihedral_coeff")
write_types(self.imptypes, self.bonddb.improper, "improper_coeff")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert PDB/GRO into LAMMPS input files.")
parser.add_argument("infile", type=str,
help="PDB/GRO to convert")
parser.add_argument("outfiles", type=str, default="out",
help="output filenames")
args = parser.parse_args()
conv = PDB2LMP(args.infile)
conv.collect_types()
conv.populate_pdb_data()
conv.write_data(args.outfiles + ".data")
conv.write_forcefield(args.outfiles + ".ff")
|
jag1g13/pdb2lmp
|
pdb2lmp.py
|
Python
|
mit
| 13,610
|
[
"LAMMPS"
] |
ea8b54ca0fff55f736198d1d466db55a14476729d5d2ee120208a63d411eae0e
|
# -*- coding: utf-8 -*-
"""
sphinx.cmdline
~~~~~~~~~~~~~~
sphinx-build command-line handling.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import sys
import optparse
import traceback
from os import path
from six import text_type, binary_type
from docutils.utils import SystemMessage
from sphinx import __display_version__
from sphinx.errors import SphinxError
from sphinx.application import Sphinx
from sphinx.util import Tee, format_exception_cut_frames, save_traceback
from sphinx.util.console import red, nocolor, color_terminal
from sphinx.util.docutils import docutils_namespace
from sphinx.util.osutil import abspath, fs_encoding
from sphinx.util.pycompat import terminal_safe
USAGE = """\
Sphinx v%s
Usage: %%prog [options] sourcedir outdir [filenames...]
Filename arguments:
without -a and without filenames, write new and changed files.
with -a, write all files.
with filenames, write these.
""" % __display_version__
EPILOG = """\
For more information, visit <http://sphinx-doc.org/>.
"""
class MyFormatter(optparse.IndentedHelpFormatter):
def format_usage(self, usage):
return usage
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
def handle_exception(app, opts, exception, stderr=sys.stderr):
if opts.pdb:
import pdb
print(red('Exception occurred while building, starting debugger:'),
file=stderr)
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
else:
print(file=stderr)
if opts.verbosity or opts.traceback:
traceback.print_exc(None, stderr)
print(file=stderr)
if isinstance(exception, KeyboardInterrupt):
print('interrupted!', file=stderr)
elif isinstance(exception, SystemMessage):
print(red('reST markup error:'), file=stderr)
print(terminal_safe(exception.args[0]), file=stderr)
elif isinstance(exception, SphinxError):
print(red('%s:' % exception.category), file=stderr)
print(terminal_safe(text_type(exception)), file=stderr)
elif isinstance(exception, UnicodeError):
print(red('Encoding error:'), file=stderr)
print(terminal_safe(text_type(exception)), file=stderr)
tbpath = save_traceback(app)
print(red('The full traceback has been saved in %s, if you want '
'to report the issue to the developers.' % tbpath),
file=stderr)
elif isinstance(exception, RuntimeError) and 'recursion depth' in str(exception):
print(red('Recursion error:'), file=stderr)
print(terminal_safe(text_type(exception)), file=stderr)
print(file=stderr)
print('This can happen with very large or deeply nested source '
'files. You can carefully increase the default Python '
'recursion limit of 1000 in conf.py with e.g.:', file=stderr)
print(' import sys; sys.setrecursionlimit(1500)', file=stderr)
else:
print(red('Exception occurred:'), file=stderr)
print(format_exception_cut_frames().rstrip(), file=stderr)
tbpath = save_traceback(app)
print(red('The full traceback has been saved in %s, if you '
'want to report the issue to the developers.' % tbpath),
file=stderr)
print('Please also report this if it was a user error, so '
'that a better error message can be provided next time.',
file=stderr)
print('A bug report can be filed in the tracker at '
'<https://github.com/sphinx-doc/sphinx/issues>. Thanks!',
file=stderr)
def main(argv):
if not color_terminal():
nocolor()
parser = optparse.OptionParser(USAGE, epilog=EPILOG, formatter=MyFormatter())
parser.add_option('--version', action='store_true', dest='version',
help='show version information and exit')
group = parser.add_option_group('General options')
group.add_option('-b', metavar='BUILDER', dest='builder', default='html',
help='builder to use; default is html')
group.add_option('-a', action='store_true', dest='force_all',
help='write all files; default is to only write new and '
'changed files')
group.add_option('-E', action='store_true', dest='freshenv',
help='don\'t use a saved environment, always read '
'all files')
group.add_option('-d', metavar='PATH', default=None, dest='doctreedir',
help='path for the cached environment and doctree files '
'(default: outdir/.doctrees)')
group.add_option('-j', metavar='N', default=1, type='int', dest='jobs',
help='build in parallel with N processes where possible')
# this option never gets through to this point (it is intercepted earlier)
# group.add_option('-M', metavar='BUILDER', dest='make_mode',
# help='"make" mode -- as used by Makefile, like '
# '"sphinx-build -M html"')
group = parser.add_option_group('Build configuration options')
group.add_option('-c', metavar='PATH', dest='confdir',
help='path where configuration file (conf.py) is located '
'(default: same as sourcedir)')
group.add_option('-C', action='store_true', dest='noconfig',
help='use no config file at all, only -D options')
group.add_option('-D', metavar='setting=value', action='append',
dest='define', default=[],
help='override a setting in configuration file')
group.add_option('-A', metavar='name=value', action='append',
dest='htmldefine', default=[],
help='pass a value into HTML templates')
group.add_option('-t', metavar='TAG', action='append',
dest='tags', default=[],
help='define tag: include "only" blocks with TAG')
group.add_option('-n', action='store_true', dest='nitpicky',
help='nit-picky mode, warn about all missing references')
group = parser.add_option_group('Console output options')
group.add_option('-v', action='count', dest='verbosity', default=0,
help='increase verbosity (can be repeated)')
group.add_option('-q', action='store_true', dest='quiet',
help='no output on stdout, just warnings on stderr')
group.add_option('-Q', action='store_true', dest='really_quiet',
help='no output at all, not even warnings')
group.add_option('-N', action='store_true', dest='nocolor',
help='do not emit colored output')
group.add_option('-w', metavar='FILE', dest='warnfile',
help='write warnings (and errors) to given file')
group.add_option('-W', action='store_true', dest='warningiserror',
help='turn warnings into errors')
group.add_option('-T', action='store_true', dest='traceback',
help='show full traceback on exception')
group.add_option('-P', action='store_true', dest='pdb',
help='run Pdb on exception')
# parse options
try:
opts, args = parser.parse_args(list(argv[1:]))
except SystemExit as err:
return err.code
# handle basic options
if opts.version:
print('Sphinx (sphinx-build) %s' % __display_version__)
return 0
# get paths (first and second positional argument)
try:
srcdir = abspath(args[0])
confdir = abspath(opts.confdir or srcdir)
if opts.noconfig:
confdir = None
if not path.isdir(srcdir):
print('Error: Cannot find source directory `%s\'.' % srcdir,
file=sys.stderr)
return 1
if not opts.noconfig and not path.isfile(path.join(confdir, 'conf.py')):
print('Error: Config directory doesn\'t contain a conf.py file.',
file=sys.stderr)
return 1
outdir = abspath(args[1])
if srcdir == outdir:
print('Error: source directory and destination directory are same.',
file=sys.stderr)
return 1
except IndexError:
parser.print_help()
return 1
except UnicodeError:
print(
'Error: Multibyte filename not supported on this filesystem '
'encoding (%r).' % fs_encoding, file=sys.stderr)
return 1
# handle remaining filename arguments
filenames = args[2:]
err = 0
for filename in filenames:
if not path.isfile(filename):
print('Error: Cannot find file %r.' % filename, file=sys.stderr)
err = 1
if err:
return 1
# likely encoding used for command-line arguments
try:
locale = __import__('locale') # due to submodule of the same name
likely_encoding = locale.getpreferredencoding()
except Exception:
likely_encoding = None
if opts.force_all and filenames:
print('Error: Cannot combine -a option and filenames.', file=sys.stderr)
return 1
if opts.nocolor:
nocolor()
doctreedir = abspath(opts.doctreedir or path.join(outdir, '.doctrees'))
status = sys.stdout
warning = sys.stderr
error = sys.stderr
if opts.quiet:
status = None
if opts.really_quiet:
status = warning = None
if warning and opts.warnfile:
try:
warnfp = open(opts.warnfile, 'w')
except Exception as exc:
print('Error: Cannot open warning file %r: %s' %
(opts.warnfile, exc), file=sys.stderr)
sys.exit(1)
warning = Tee(warning, warnfp)
error = warning
confoverrides = {}
for val in opts.define:
try:
key, val = val.split('=', 1)
except ValueError:
print('Error: -D option argument must be in the form name=value.',
file=sys.stderr)
return 1
if likely_encoding and isinstance(val, binary_type):
try:
val = val.decode(likely_encoding)
except UnicodeError:
pass
confoverrides[key] = val
for val in opts.htmldefine:
try:
key, val = val.split('=')
except ValueError:
print('Error: -A option argument must be in the form name=value.',
file=sys.stderr)
return 1
try:
val = int(val)
except ValueError:
if likely_encoding and isinstance(val, binary_type):
try:
val = val.decode(likely_encoding)
except UnicodeError:
pass
confoverrides['html_context.%s' % key] = val
if opts.nitpicky:
confoverrides['nitpicky'] = True
app = None
try:
with docutils_namespace():
app = Sphinx(srcdir, confdir, outdir, doctreedir, opts.builder,
confoverrides, status, warning, opts.freshenv,
opts.warningiserror, opts.tags, opts.verbosity, opts.jobs)
app.build(opts.force_all, filenames)
return app.statuscode
except (Exception, KeyboardInterrupt) as exc:
handle_exception(app, opts, exc, error)
return 1
|
axbaretto/beam
|
sdks/python/.tox/docs/lib/python2.7/site-packages/sphinx/cmdline.py
|
Python
|
apache-2.0
| 11,907
|
[
"VisIt"
] |
7a4ca263a91acbe6e0d19deeb6f80a74151f6013bd600fc6dbbb2f01077daa2a
|
#!/usr/bin/env python
"""
Install.py tool to build the CSlib library
used to automate the steps described in the README file in this dir
"""
from __future__ import print_function
import sys, os, subprocess, shutil
from argparse import ArgumentParser
sys.path.append('..')
from install_helpers import fullpath
parser = ArgumentParser(prog='Install.py',
description="LAMMPS library build wrapper script")
# help message
HELP = """
Syntax from src dir: make lib-message args="-m"
or: make lib-message args="-s -z"
Syntax from lib dir: python Install.py -m
or: python Install.py -s -z
Example:
make lib-message args="-m -z" # build parallel CSlib with ZMQ support
make lib-message args="-s" # build serial CSlib with no ZMQ support
"""
pgroup = parser.add_mutually_exclusive_group()
pgroup.add_argument("-m", "--mpi", action="store_true",
help="parallel build of CSlib with MPI")
pgroup.add_argument("-s", "--serial", action="store_true",
help="serial build of CSlib")
parser.add_argument("-z", "--zmq", default=False, action="store_true",
help="build CSlib with ZMQ socket support, default ()")
args = parser.parse_args()
# print help message and exit, if neither build nor path options are given
if not args.mpi and not args.serial:
parser.print_help()
sys.exit(HELP)
mpiflag = args.mpi
serialflag = args.serial
zmqflag = args.zmq
# build CSlib
# copy resulting lib to cslib/src/libmessage.a
# copy appropriate Makefile.lammps.* to Makefile.lammps
print("Building CSlib ...")
srcdir = fullpath(os.path.join("cslib", "src"))
if mpiflag and zmqflag:
cmd = "make -C %s lib_parallel" % srcdir
elif mpiflag and not zmqflag:
cmd = "make -C %s lib_parallel zmq=no" % srcdir
elif not mpiflag and zmqflag:
cmd = "make -C %s lib_serial" % srcdir
elif not mpiflag and not zmqflag:
cmd = "make -C %s lib_serial zmq=no" % srcdir
print(cmd)
try:
txt = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print(txt.decode('UTF-8'))
except subprocess.CalledProcessError as e:
print("Make failed with:\n %s" % e.output.decode('UTF-8'))
sys.exit(1)
slb = os.path.join(srcdir, "libcsnompi.a")
if mpiflag:
slb = os.path.join(srcdir, "libcsmpi.a")
shutil.copyfile(slb, os.path.join(srcdir, "libmessage.a"))
smk = "Makefile.lammps.nozmq"
if zmqflag:
smk = "Makefile.lammps.zmq"
shutil.copyfile(smk, "Makefile.lammps")
print("Using %s for Makefile.lammps" % smk)
|
akohlmey/lammps
|
lib/message/Install.py
|
Python
|
gpl-2.0
| 2,521
|
[
"LAMMPS"
] |
dfb884a050e051b299b018cd5f85096847cdb6d389a1dc7b5e4ab8c64c032240
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.