text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
# encoding:utf8
"""
DragonPy - Dragon 32 emulator in Python
=======================================
http://www.6809.org.uk/dragon/hardware.shtml#sam
$ffc0-ffdf SAM (Synchronous Address Multiplexer) register bits - use
even address to clear, odd address to set
$ffc0-ffc5 SAM VDG Mode registers V0-V2
$ffc0/ffc1 SAM VDG Reg V0
$ffc2/ffc3 SAM VDG Reg V1
$ffc4/ffc5 SAM VDG Reg V2
$ffc6-ffd3 SAM Display offset in 512 byte pages F0-F6
$ffc6/ffc7 SAM Display Offset bit F0
$ffc8/ffc9 SAM Display Offset bit F1
$ffca/ffcb SAM Display Offset bit F2
$ffcc/ffcd SAM Display Offset bit F3
$ffce/ffcf SAM Display Offset bit F4
$ffd0/ffc1 SAM Display Offset bit F5
$ffd2/ffc3 SAM Display Offset bit F6
$ffd4/ffd5 SAM Page #1 bit - in D64 maps upper 32K Ram to $0000 to $7fff
$ffd6-ffd9 SAM MPU Rate R0-R1
$ffd6/ffd7 SAM MPU Rate bit R0
$ffd8/ffd9 SAM MPU Rate bit R1
$ffda-ffdd SAM Memory Size select M0-M1
$ffda/ffdb SAM Memory Size select bit M0
$ffdc/ffdd SAM Memory Size select bit M1
$ffde/ffdf SAM Map Type - in D64 switches in upper 32K RAM $8000-$feff
from http://archive.worldofdragon.org/index.php?title=Dragon_32_-_64K_Upgrade#APPENDICES. :
Most well—known of these operations is the so-called 'speed-up poke'
(POKE&HFFD7,0 and its reverse, POKE&HFFD6,0); however, of more concern to us
here is the Map Type Bit (TY), set by FFDF, cleared by FFDE; the Page Bit (Pl),
set by FFD5, cleared by FFD4; and the Memory Size Bits (M0 A Ml) set/cleared by
FFDB/FFDA & FFDD/FFDC respectively. Of the remaining addresses, FFD6 to FFD9
control the 2 clockrate bits (R0 & Rl); FFC6 to FFD3 control 7 bits (F0 to F6)
giving the base address of the current Video-RAM (in units of 512 bytes); and
FFC0 to FFC5 control 3 VDG Mode bits (V0 to V2).
:created: 2013 by Jens Diemer - www.jensdiemer.de
:copyleft: 2013-2014 by the DragonPy team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
Based on: XRoar emulator by Ciaran Anscomb (GPL license) more info, see README
"""
import logging
log=logging.getLogger(__name__)
class SAM(object):
"""
MC6883 (74LS783) Synchronous Address Multiplexer (SAM)
"""
# http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4894&p=11730#p11726
IRQ_CYCLES = 17784
def __init__(self, cfg, cpu, memory):
self.cfg = cfg
self.cpu = cpu
self.memory = memory
self.cpu.add_sync_callback(callback_cycles=self.IRQ_CYCLES, callback=self.irq_trigger)
#
# TODO: Collect this information via a decorator similar to op codes in CPU!
#
self.memory.add_read_byte_callback(self.read_VDG_mode_register_v1, 0xffc2)
self.memory.add_write_byte_callback(self.write_VDG_mode_register_v0, 0xffc0)
self.memory.add_write_byte_callback(self.write_VDG_mode_register_v1, 0xffc2)
self.memory.add_write_byte_callback(self.write_VDG_mode_register_v2, 0xffc4)
self.memory.add_write_byte_callback(self.write_display_offset_F0, 0xffc6)
self.memory.add_write_byte_callback(self.write_display_offset_F1, 0xffc8)
self.memory.add_write_byte_callback(self.write_display_offset_F2, 0xffca)
self.memory.add_write_byte_callback(self.write_display_offset_F3, 0xffcc)
self.memory.add_write_byte_callback(self.write_display_offset_F4, 0xffce)
self.memory.add_write_byte_callback(self.write_display_offset_F5, 0xffd0)
self.memory.add_write_byte_callback(self.write_display_offset_F6, 0xffd2)
self.memory.add_write_byte_callback(self.write_page_bit, 0xffd4)
self.memory.add_write_byte_callback(self.write_MPU_rate_bit0, 0xffd6)
self.memory.add_write_byte_callback(self.write_MPU_rate_bit1, 0xffd8)
self.memory.add_write_byte_callback(self.write_size_select_bit0, 0xffda)
self.memory.add_write_byte_callback(self.write_size_select_bit1, 0xffdc)
self.memory.add_write_byte_callback(self.write_map_type, 0xffde)
self.memory.add_write_byte_callback(self.write_map0, 0xffdd)
# Dragon 64 only:
self.memory.add_write_byte_callback(self.write_D64_dynamic_memory, 0xffc9)
self.memory.add_read_byte_callback(self.interrupt_vectors, 0xfff0, 0xffff)
def reset(self):
log.critical("TODO: VDG reset")
def irq_trigger(self, call_cycles):
# log.critical("%04x| SAM irq trigger called %i cycles to late",
# self.cpu.last_op_address, call_cycles - self.IRQ_CYCLES
# )
self.cpu.irq()
def interrupt_vectors(self, cpu_cycles, op_address, address):
new_address = address - 0x4000
value = self.memory.read_byte(new_address)
# log.critical("read interrupt vector $%04x redirect in SAM to $%04x use value $%02x",
# address, new_address, value
# )
return value
# def read_VDG_mode_register_v0(self, cpu_cycles, op_address, address):
# log.debug("TODO: read VDG mode register V0 $%04x", address)
# return 0x00
def read_VDG_mode_register_v1(self, cpu_cycles, op_address, address):
log.debug("TODO: read VDG mode register V1 $%04x", address)
return 0x00
#--------------------------------------------------------------------------
def write_VDG_mode_register_v0(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write VDG mode register V0 $%02x to $%04x", value, address)
def write_VDG_mode_register_v1(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write VDG mode register V1 $%02x to $%04x", value, address)
def write_VDG_mode_register_v2(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write VDG mode register V2 $%02x to $%04x", value, address)
def write_display_offset_F0(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write display_offset_F0 $%02x to $%04x", value, address)
def write_display_offset_F1(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write display_offset_F1 $%02x to $%04x", value, address)
def write_display_offset_F2(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write display_offset_F2 $%02x to $%04x", value, address)
def write_display_offset_F3(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write display_offset_F3 $%02x to $%04x", value, address)
def write_display_offset_F4(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write display_offset_F4 $%02x to $%04x", value, address)
def write_display_offset_F5(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write display_offset_F5 $%02x to $%04x", value, address)
def write_display_offset_F6(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write display_offset_F6 $%02x to $%04x", value, address)
def write_page_bit(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write page_bit $%02x to $%04x", value, address)
def write_MPU_rate_bit0(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write MPU_rate_bit0 $%02x to $%04x", value, address)
def write_MPU_rate_bit1(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write MPU_rate_bit1 $%02x to $%04x", value, address)
def write_size_select_bit0(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write size_select_bit0 $%02x to $%04x", value, address)
def write_size_select_bit1(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write size_select_bit1 $%02x to $%04x", value, address)
def write_map_type(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write map_type $%02x to $%04x", value, address)
def write_map0(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write map0 $%02x to $%04x", value, address)
def write_D64_dynamic_memory(self, cpu_cycles, op_address, address, value):
log.debug("TODO: write D64_dynamic_memory $%02x to $%04x", value, address)
#------------------------------------------------------------------------------
| JuhaniImberg/DragonPy | dragonpy/Dragon32/MC6883_SAM.py | Python | gpl-3.0 | 8,433 | [
"FEFF"
] | ad7dbeb439d52f692b6aa4a9c7615ede9aa26727dac4023292e71658a87322ad |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create a rendering window and renderer
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create cones of varying resolution
cone0 = vtk.vtkConeSource()
cone0.SetResolution(0)
cone1 = vtk.vtkConeSource()
cone1.SetResolution(1)
cone2 = vtk.vtkConeSource()
cone2.SetResolution(2)
cone8 = vtk.vtkConeSource()
cone8.SetResolution(8)
cone0Mapper = vtk.vtkPolyDataMapper()
cone0Mapper.SetInputConnection(cone0.GetOutputPort())
cone0Actor = vtk.vtkActor()
cone0Actor.SetMapper(cone0Mapper)
cone1Mapper = vtk.vtkPolyDataMapper()
cone1Mapper.SetInputConnection(cone1.GetOutputPort())
cone1Actor = vtk.vtkActor()
cone1Actor.SetMapper(cone1Mapper)
cone2Mapper = vtk.vtkPolyDataMapper()
cone2Mapper.SetInputConnection(cone2.GetOutputPort())
cone2Actor = vtk.vtkActor()
cone2Actor.SetMapper(cone2Mapper)
cone8Mapper = vtk.vtkPolyDataMapper()
cone8Mapper.SetInputConnection(cone8.GetOutputPort())
cone8Actor = vtk.vtkActor()
cone8Actor.SetMapper(cone8Mapper)
# assign our actor to the renderer
ren1.AddActor(cone0Actor)
ren1.AddActor(cone1Actor)
ren1.AddActor(cone2Actor)
ren1.AddActor(cone8Actor)
ren1.SetBackground(.5,.5,.5)
ren1.ResetCamera()
ren1.GetActiveCamera().Elevation(30)
ren1.GetActiveCamera().Dolly(1.3)
ren1.ResetCameraClippingRange()
renWin.SetSize(301,91)
cone0Actor.SetPosition(-1.5,0,0)
cone1Actor.SetPosition(-.5,0,0)
cone2Actor.SetPosition(.5,0,0)
cone8Actor.SetPosition(1.5,0,0)
cone0Actor.GetProperty().SetDiffuseColor(1,0,0)
cone1Actor.GetProperty().SetDiffuseColor(0,1,0)
cone8Actor.GetProperty().BackfaceCullingOn()
cone8Actor.GetProperty().SetDiffuseColor(0,0,1)
# enable user interface interactor
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# --- end of script --
| hlzz/dotfiles | graphics/VTK-7.0.0/Filters/Sources/Testing/Python/coneResolution.py | Python | bsd-3-clause | 2,016 | [
"VTK"
] | 422fa859e27202fbbac2367fda538cda2164812e10af45a5357acb19569c525f |
# Copyright 2009-2014 Eucalyptus Systems, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
# Please contact Eucalyptus Systems, Inc., 6755 Hollister Ave., Goleta
# CA 93117, USA or visit http://www.eucalyptus.com/licenses/ if you need
# additional information or have any questions.
#
# Order matters here. We want to make sure we initialize logging before anything
# else happens. We need to initialize the logger that boto will be using.
#
pass
| eucalyptus/eucalyptus-database-server | eucalib/__init__.py | Python | bsd-2-clause | 993 | [
"VisIt"
] | 8954a12711c889813ff7bdc8f4afaf4971ecf2f943033606b252f746bc79d6e6 |
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Function/Three
"""
# import general python modules
import os
import gdal
import pandas as pd
import numpy as np
import netCDF4
def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, NDVI_Product, ETref_Product, dict_crops, dict_non_crops, Startdate, Enddate, Simulation):
"""
This functions is the main framework for calculating sheet 3.
Parameters
----------
Basin : str
Name of the basin
P_Product : str
Name of the rainfall product that will be used
ET_Product : str
Name of the evapotranspiration product that will be used
LAI_Product : str
Name of the LAI product that will be used
NDM_Product : str
Name of the NDM product that will be used
Startdate : str
Contains the start date of the model 'yyyy-mm-dd'
Enddate : str
Contains the end date of the model 'yyyy-mm-dd'
Simulation : int
Defines the simulation
"""
######################### Import WA modules ###################################
from wa.General import raster_conversions as RC
from wa.General import data_conversions as DC
import wa.Functions.Three as Three
import wa.Functions.Two as Two
import wa.Functions.Start as Start
import wa.Functions.Four as Four
import wa.Generator.Sheet3 as Generate
import wa.Functions.Start.Get_Dictionaries as GD
######################### Set General Parameters ##############################
# Check if there is a full year selected between Startdate and Enddate, otherwise Sheet 3 cannot be produced
try:
years_end = pd.date_range(Startdate,Enddate,freq="A").year
years_start = pd.date_range(Startdate,Enddate,freq="AS").year
if (len(years_start) == 0 or len(years_end) == 0):
print "Calculation period is less than a year, which is not possible for sheet 3"
quit
years = np.unique(np.append(years_end,years_start))
except:
print "Calculation period is less than a year, which is not possible for sheet 3"
quit
# Get environmental variable for the Home folder
if WA_HOME_folder == '':
WA_env_paths = os.environ["WA_HOME"].split(';')
Dir_Home = WA_env_paths[0]
else:
Dir_Home = WA_HOME_folder
# Create the Basin folder
Dir_Basin = os.path.join(Dir_Home, Basin)
output_dir = os.path.join(Dir_Basin, "Simulations", "Simulation_%d" %Simulation)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Get the boundaries of the basin based on the shapefile of the watershed
# Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)
Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)
############################# Download Data ###################################
# Check the years that needs to be calculated
years = range(int(Startdate.split('-')[0]),int(Enddate.split('-')[0]) + 1)
# Find the maximum moving window value
ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0')
Additional_Months_tail = np.max(Moving_Window_Per_Class_dict.values())
for year in years:
# Create Start and End date for time chunk
Startdate_part = '%d-01-01' %int(year)
Enddate_part = '%s-12-31' %year
# Create .nc file if not exists
nc_outname = os.path.join(output_dir, "%d.nc" % year)
if not os.path.exists(nc_outname):
DC.Create_new_NC_file(nc_outname, Example_dataset, Basin)
#Set Startdate for moving average
if int(year) == int(years[0]):
Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_tail)
Startdate_Moving_Average_String = Startdate_Moving_Average.strftime('%Y-%m-%d')
else:
Startdate_Moving_Average_String = Startdate_part
# Open variables in netcdf
fh = netCDF4.Dataset(nc_outname)
Variables_NC = [var for var in fh.variables]
fh.close()
# Download data
if not "Precipitation" in Variables_NC:
Data_Path_P_Monthly = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, P_Product)
if not "Actual_Evapotransporation" in Variables_NC:
Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, ET_Product)
if not "Reference_Evapotranspiration" in Variables_NC:
Data_Path_ETref = Start.Download_Data.ETreference(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_part, ETref_Product)
if not "NDVI" in Variables_NC:
Data_Path_NDVI = Start.Download_Data.NDVI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part)
if not "Normalized_Dry_Matter" in Variables_NC:
Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, NDM_Product)
Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, NDM_Product)
########################### Create input data #################################
if not "Normalized_Dry_Matter" in Variables_NC:
# Create NDM based on MOD17
if NDM_Product == 'MOD17':
# Create monthly GPP
Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Data_Path_GPP, Startdate_part, Enddate_part)
Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate_part, Enddate_part)
if not "NDVI" in Variables_NC:
# Create monthly NDVI based on MOD13
if NDVI_Product == 'MOD13':
Start.Sixteendaily_to_monthly_state.Nearest_Interpolate(Data_Path_NDVI, Startdate_part, Enddate_part)
###################### Save Data as netCDF files ##############################
#______________________________Precipitation_______________________________
# 1.) Precipitation data
if not "Precipitation" in Variables_NC:
# Get the data of Precipitation and save as nc
DataCube_Prec = RC.Get3Darray_time_series_monthly(Data_Path_P_Monthly, Startdate_part, Enddate_part, Example_data = Example_dataset)
DC.Add_NC_Array_Variable(nc_outname, DataCube_Prec, "Precipitation", "mm/month", 0.01)
del DataCube_Prec
#_______________________________Evaporation________________________________
# 2.) Evapotranspiration data
if not "Actual_Evapotranspiration" in Variables_NC:
# Get the data of Evaporation and save as nc
DataCube_ET = RC.Get3Darray_time_series_monthly(Data_Path_ET, Startdate_part, Enddate_part, Example_data = Example_dataset)
DC.Add_NC_Array_Variable(nc_outname, DataCube_ET, "Actual_Evapotranspiration", "mm/month", 0.01)
del DataCube_ET
#___________________________Normalized Dry Matter__________________________
# 3.) Normalized Dry Matter
if not "Normalized_Dry_Matter" in Variables_NC:
# Get the data of Evaporation and save as nc
DataCube_NDM = RC.Get3Darray_time_series_monthly(Data_Path_NDM, Startdate_part, Enddate_part, Example_data = Example_dataset)
DC.Add_NC_Array_Variable(nc_outname, DataCube_NDM, "Normalized_Dry_Matter", "kg_ha", 0.01)
del DataCube_NDM
#_______________________Reference Evaporation______________________________
# 4.) Reference Evapotranspiration data
if not "Reference_Evapotranspiration" in Variables_NC:
# Get the data of Precipitation and save as nc
DataCube_ETref = RC.Get3Darray_time_series_monthly(Data_Path_ETref, Startdate_part, Enddate_part, Example_data = Example_dataset)
DC.Add_NC_Array_Variable(nc_outname, DataCube_ETref, "Reference_Evapotranspiration", "mm/month", 0.01)
del DataCube_ETref
#____________________________________NDVI__________________________________
# 4.) Reference Evapotranspiration data
if not "NDVI" in Variables_NC:
# Get the data of Precipitation and save as nc
DataCube_NDVI = RC.Get3Darray_time_series_monthly(Data_Path_NDVI, Startdate_part, Enddate_part, Example_data = Example_dataset)
DC.Add_NC_Array_Variable(nc_outname, DataCube_NDVI, "NDVI", "Fraction", 0.0001)
del DataCube_NDVI
############################# Calculate Sheet 3 ###########################
#____________ Evapotranspiration data split in ETblue and ETgreen ____________
if not ("Blue_Evapotranspiration" in Variables_NC or "Green_Evapotranspiration" in Variables_NC):
# Calculate Blue and Green ET
DataCube_ETblue, DataCube_ETgreen = Four.SplitET.Blue_Green(Dir_Basin, nc_outname, ETref_Product, P_Product, Startdate, Enddate)
DC.Add_NC_Array_Variable(nc_outname, DataCube_ETblue, "Blue_Evapotranspiration", "mm/month", 0.01)
DC.Add_NC_Array_Variable(nc_outname, DataCube_ETgreen, "Green_Evapotranspiration", "mm/month", 0.01)
del DataCube_ETblue, DataCube_ETgreen
#____________________________ Create the empty dictionaries ____________________________
# Create the dictionaries that are required for sheet 3
wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary = GD.get_sheet3_empties()
#____________________________________ Fill in the dictionaries ________________________
# Fill in the crops dictionaries
wp_y_irrigated_dictionary, wp_y_rainfed_dictionary = Three.Fill_Dicts.Crop_Dictionaries(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, dict_crops, nc_outname, Dir_Basin)
# Fill in the non crops dictionaries
wp_y_non_crop_dictionary = Three.Fill_Dicts.Non_Crop_Dictionaries(wp_y_non_crop_dictionary, dict_non_crops)
############################ Create CSV 3 #################################
csv_fh_a, csv_fh_b = Generate.CSV.Create(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary, Basin, Simulation, year, Dir_Basin)
############################ Create Sheet 3 ###############################
Generate.PDF.Create(Dir_Basin, Basin, Simulation, csv_fh_a, csv_fh_b)
return()
| wateraccounting/wa | Generator/Sheet3/main.py | Python | apache-2.0 | 11,082 | [
"NetCDF"
] | a429fc4cf5a8a21850d2c69bb38145959949a9e731beec646d07b3381a4b5081 |
# (c) 2017, Brian Coca <bcoca@ansible.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import optparse
from operator import attrgetter
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.inventory.host import Host
from ansible.plugins.loader import vars_loader
from ansible.parsing.dataloader import DataLoader
from ansible.utils.vars import combine_vars
from ansible.utils.display import Display
display = Display()
INTERNAL_VARS = frozenset(['ansible_diff_mode',
'ansible_facts',
'ansible_forks',
'ansible_inventory_sources',
'ansible_limit',
'ansible_playbook_python',
'ansible_run_tags',
'ansible_skip_tags',
'ansible_verbosity',
'ansible_version',
'inventory_dir',
'inventory_file',
'inventory_hostname',
'inventory_hostname_short',
'groups',
'group_names',
'omit',
'playbook_dir', ])
class InventoryCLI(CLI):
''' used to display or dump the configured inventory as Ansible sees it '''
ARGUMENTS = {'host': 'The name of a host to match in the inventory, relevant when using --list',
'group': 'The name of a group in the inventory, relevant when using --graph', }
def __init__(self, args):
super(InventoryCLI, self).__init__(args)
self.vm = None
self.loader = None
self.inventory = None
self._new_api = True
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [options] [host|group]',
epilog='Show Ansible inventory information, by default it uses the inventory script JSON format',
inventory_opts=True,
vault_opts=True,
basedir_opts=True,
)
# remove unused default options
self.parser.remove_option('--limit')
self.parser.remove_option('--list-hosts')
# Actions
action_group = optparse.OptionGroup(self.parser, "Actions", "One of following must be used on invocation, ONLY ONE!")
action_group.add_option("--list", action="store_true", default=False, dest='list', help='Output all hosts info, works as inventory script')
action_group.add_option("--host", action="store", default=None, dest='host', help='Output specific host info, works as inventory script')
action_group.add_option("--graph", action="store_true", default=False, dest='graph',
help='create inventory graph, if supplying pattern it must be a valid group name')
self.parser.add_option_group(action_group)
# graph
self.parser.add_option("-y", "--yaml", action="store_true", default=False, dest='yaml',
help='Use YAML format instead of default JSON, ignored for --graph')
self.parser.add_option('--toml', action='store_true', default=False, dest='toml',
help='Use TOML format instead of default JSON, ignored for --graph')
self.parser.add_option("--vars", action="store_true", default=False, dest='show_vars',
help='Add vars to graph display, ignored unless used with --graph')
# list
self.parser.add_option("--export", action="store_true", default=C.INVENTORY_EXPORT, dest='export',
help="When doing an --list, represent in a way that is optimized for export,"
"not as an accurate representation of how Ansible has processed it")
# self.parser.add_option("--ignore-vars-plugins", action="store_true", default=False, dest='ignore_vars_plugins',
# help="When doing an --list, skip vars data from vars plugins, by default, this would include group_vars/ and host_vars/")
super(InventoryCLI, self).parse()
display.verbosity = self.options.verbosity
self.validate_conflicts(vault_opts=True)
# there can be only one! and, at least, one!
used = 0
for opt in (self.options.list, self.options.host, self.options.graph):
if opt:
used += 1
if used == 0:
raise AnsibleOptionsError("No action selected, at least one of --host, --graph or --list needs to be specified.")
elif used > 1:
raise AnsibleOptionsError("Conflicting options used, only one of --host, --graph or --list can be used at the same time.")
# set host pattern to default if not supplied
if len(self.args) > 0:
self.options.pattern = self.args[0]
else:
self.options.pattern = 'all'
def run(self):
results = None
super(InventoryCLI, self).run()
# Initialize needed objects
if getattr(self, '_play_prereqs', False):
self.loader, self.inventory, self.vm = self._play_prereqs(self.options)
else:
# fallback to pre 2.4 way of initialzing
from ansible.vars import VariableManager
from ansible.inventory import Inventory
self._new_api = False
self.loader = DataLoader()
self.vm = VariableManager()
# use vault if needed
if self.options.vault_password_file:
vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=self.loader)
elif self.options.ask_vault_pass:
vault_pass = self.ask_vault_passwords()
else:
vault_pass = None
if vault_pass:
self.loader.set_vault_password(vault_pass)
# actually get inventory and vars
self.inventory = Inventory(loader=self.loader, variable_manager=self.vm, host_list=self.options.inventory)
self.vm.set_inventory(self.inventory)
if self.options.host:
hosts = self.inventory.get_hosts(self.options.host)
if len(hosts) != 1:
raise AnsibleOptionsError("You must pass a single valid host to --host parameter")
myvars = self._get_host_variables(host=hosts[0])
self._remove_internal(myvars)
# FIXME: should we template first?
results = self.dump(myvars)
elif self.options.graph:
results = self.inventory_graph()
elif self.options.list:
top = self._get_group('all')
if self.options.yaml:
results = self.yaml_inventory(top)
elif self.options.toml:
results = self.toml_inventory(top)
else:
results = self.json_inventory(top)
results = self.dump(results)
if results:
# FIXME: pager?
display.display(results)
exit(0)
exit(1)
def dump(self, stuff):
if self.options.yaml:
import yaml
from ansible.parsing.yaml.dumper import AnsibleDumper
results = yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False)
elif self.options.toml:
from ansible.plugins.inventory.toml import toml_dumps, HAS_TOML
if not HAS_TOML:
raise AnsibleError(
'The python "toml" library is required when using the TOML output format'
)
results = toml_dumps(stuff)
else:
import json
from ansible.parsing.ajson import AnsibleJSONEncoder
results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=True, indent=4)
return results
# FIXME: refactor to use same for VM
def get_plugin_vars(self, path, entity):
data = {}
def _get_plugin_vars(plugin, path, entities):
data = {}
try:
data = plugin.get_vars(self.loader, path, entity)
except AttributeError:
try:
if isinstance(entity, Host):
data = combine_vars(data, plugin.get_host_vars(entity.name))
else:
data = combine_vars(data, plugin.get_group_vars(entity.name))
except AttributeError:
if hasattr(plugin, 'run'):
raise AnsibleError("Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
else:
raise AnsibleError("Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
return data
for plugin in vars_loader.all():
data = combine_vars(data, _get_plugin_vars(plugin, path, entity))
return data
def _get_group_variables(self, group):
# get info from inventory source
res = group.get_vars()
# FIXME: add switch to skip vars plugins, add vars plugin info
for inventory_dir in self.inventory._sources:
res = combine_vars(res, self.get_plugin_vars(inventory_dir, group))
if group.priority != 1:
res['ansible_group_priority'] = group.priority
return res
def _get_host_variables(self, host):
if self.options.export:
hostvars = host.get_vars()
# FIXME: add switch to skip vars plugins
# add vars plugin info
for inventory_dir in self.inventory._sources:
hostvars = combine_vars(hostvars, self.get_plugin_vars(inventory_dir, host))
else:
if self._new_api:
hostvars = self.vm.get_vars(host=host, include_hostvars=False)
else:
hostvars = self.vm.get_vars(self.loader, host=host, include_hostvars=False)
return hostvars
def _get_group(self, gname):
if self._new_api:
group = self.inventory.groups.get(gname)
else:
group = self.inventory.get_group(gname)
return group
def _remove_internal(self, dump):
for internal in INTERNAL_VARS:
if internal in dump:
del dump[internal]
def _remove_empty(self, dump):
# remove empty keys
for x in ('hosts', 'vars', 'children'):
if x in dump and not dump[x]:
del dump[x]
def _show_vars(self, dump, depth):
result = []
self._remove_internal(dump)
if self.options.show_vars:
for (name, val) in sorted(dump.items()):
result.append(self._graph_name('{%s = %s}' % (name, val), depth))
return result
def _graph_name(self, name, depth=0):
if depth:
name = " |" * (depth) + "--%s" % name
return name
def _graph_group(self, group, depth=0):
result = [self._graph_name('@%s:' % group.name, depth)]
depth = depth + 1
for kid in sorted(group.child_groups, key=attrgetter('name')):
result.extend(self._graph_group(kid, depth))
if group.name != 'all':
for host in sorted(group.hosts, key=attrgetter('name')):
result.append(self._graph_name(host.name, depth))
result.extend(self._show_vars(host.get_vars(), depth + 1))
result.extend(self._show_vars(self._get_group_variables(group), depth))
return result
def inventory_graph(self):
start_at = self._get_group(self.options.pattern)
if start_at:
return '\n'.join(self._graph_group(start_at))
else:
raise AnsibleOptionsError("Pattern must be valid group name when using --graph")
def json_inventory(self, top):
seen = set()
def format_group(group):
results = {}
results[group.name] = {}
if group.name != 'all':
results[group.name]['hosts'] = [h.name for h in sorted(group.hosts, key=attrgetter('name'))]
results[group.name]['children'] = []
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
results[group.name]['children'].append(subgroup.name)
if subgroup.name not in seen:
results.update(format_group(subgroup))
seen.add(subgroup.name)
if self.options.export:
results[group.name]['vars'] = self._get_group_variables(group)
self._remove_empty(results[group.name])
if not results[group.name]:
del results[group.name]
return results
results = format_group(top)
# populate meta
results['_meta'] = {'hostvars': {}}
hosts = self.inventory.get_hosts()
for host in hosts:
hvars = self._get_host_variables(host)
if hvars:
self._remove_internal(hvars)
results['_meta']['hostvars'][host.name] = hvars
return results
def yaml_inventory(self, top):
seen = []
def format_group(group):
results = {}
# initialize group + vars
results[group.name] = {}
# subgroups
results[group.name]['children'] = {}
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
if subgroup.name != 'all':
results[group.name]['children'].update(format_group(subgroup))
# hosts for group
results[group.name]['hosts'] = {}
if group.name != 'all':
for h in sorted(group.hosts, key=attrgetter('name')):
myvars = {}
if h.name not in seen: # avoid defining host vars more than once
seen.append(h.name)
myvars = self._get_host_variables(host=h)
self._remove_internal(myvars)
results[group.name]['hosts'][h.name] = myvars
if self.options.export:
gvars = self._get_group_variables(group)
if gvars:
results[group.name]['vars'] = gvars
self._remove_empty(results[group.name])
return results
return format_group(top)
def toml_inventory(self, top):
seen = set()
has_ungrouped = bool(next(g.hosts for g in top.child_groups if g.name == 'ungrouped'))
def format_group(group):
results = {}
results[group.name] = {}
results[group.name]['children'] = []
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
if subgroup.name == 'ungrouped' and not has_ungrouped:
continue
if group.name != 'all':
results[group.name]['children'].append(subgroup.name)
results.update(format_group(subgroup))
if group.name != 'all':
for host in sorted(group.hosts, key=attrgetter('name')):
if host.name not in seen:
seen.add(host.name)
host_vars = self._get_host_variables(host=host)
self._remove_internal(host_vars)
else:
host_vars = {}
try:
results[group.name]['hosts'][host.name] = host_vars
except KeyError:
results[group.name]['hosts'] = {host.name: host_vars}
if self.options.export:
results[group.name]['vars'] = self._get_group_variables(group)
self._remove_empty(results[group.name])
if not results[group.name]:
del results[group.name]
return results
results = format_group(top)
return results
| veger/ansible | lib/ansible/cli/inventory.py | Python | gpl-3.0 | 16,843 | [
"Brian"
] | e480599ee7aca284ed913d2e1d24aada70b4b437ef3d2d3ae1255db99090adff |
from gpaw.utilities import unpack
import numpy as np
from gpaw.mpi import world, rank
from gpaw.utilities.blas import gemm
from gpaw.utilities.timing import Timer
from gpaw.utilities.lapack import inverse_general
from gpaw.transport.tools import get_matrix_index, collect_lead_mat, dot
import copy
import _gpaw
class Banded_Sparse_HSD:
#for lead's hamiltonian, overlap, and density matrix
def __init__(self, dtype, ns, npk, index=None):
self.band_index = index
self.dtype = dtype
self.H = []
self.S = []
self.D = []
self.ns = ns
self.npk = npk
self.s = 0
self.pk = 0
for s in range(ns):
self.H.append([])
self.D.append([])
for k in range(npk):
self.H[s].append([])
self.D[s].append([])
for k in range(npk):
self.S.append([])
def reset(self, s, pk, mat, flag='S', init=False):
assert mat.dtype == self.dtype
if flag == 'S':
spar = self.S
elif flag == 'H':
spar = self.H[s]
elif flag == 'D':
spar = self.D[s]
if not init:
spar[pk].reset(mat)
elif self.band_index != None:
spar[pk] = Banded_Sparse_Matrix(self.dtype, mat, self.band_index)
else:
spar[pk] = Banded_Sparse_Matrix(self.dtype, mat)
self.band_index = spar[pk].band_index
class Banded_Sparse_Matrix:
def __init__(self, dtype, mat=None, band_index=None, tol=1e-9):
self.tol = tol
self.dtype = dtype
self.band_index = band_index
if mat != None:
if band_index == None:
self.initialize(mat)
else:
self.reset(mat)
def initialize(self, mat):
# the indexing way needs mat[0][-1] = 0,otherwise will recover a
# unsymmetric full matrix
assert self.dtype == mat.dtype
#assert mat[0][-1] < self.tol
dim = mat.shape[-1]
ku = -1
kl = -1
mat_sum = np.sum(abs(mat))
spar_sum = 0
while abs(mat_sum - spar_sum) > self.tol * 10:
#ku += 1
#kl += 1
#ud_sum = 1
#dd_sum = 1
#while(ud_sum > self.tol):
# ku += 1
# ud_sum = np.sum(np.diag(abs(mat), ku))
#while(dd_sum > self.tol):
# kl += 1
# dd_sum = np.sum(np.diag(abs(mat), -kl))
#ku -= 1
#kl -= 1
ku = dim
kl = dim
# storage in the tranpose, bacause column major order for zgbsv_ function
length = (kl + ku + 1) * dim - kl * (kl + 1) / 2. - \
ku * (ku + 1) / 2.
self.spar = np.zeros([length], self.dtype)
index1 = []
index2 = []
index0 = np.zeros((dim, 2 * kl + ku + 1), int)
n = 0
for i in range(kl, -1, -1):
for j in range(dim - i):
index1.append(i + j)
index2.append(j)
index0[i + j, 2 * kl - i] = n
n += 1
for i in range(1, ku + 1):
for j in range(dim - i):
index1.append(j)
index2.append(j + i)
index0[j, 2 * kl + i] = n
n += 1
index1 = np.array(index1)
index2 = np.array(index2)
self.band_index = (kl, ku, index0, index1, index2)
self.spar = mat[index1, index2]
spar_sum = np.sum(abs(self.recover()))
def test1(self, n1, n2):
index1 ,index2 = self.band_index[-2:]
for i in range(len(index1)):
if index1[i] == n1 and index2[i] == n2:
print i
def recover(self):
index0, index1, index2 = self.band_index[-3:]
dim = index0.shape[0]
mat = np.zeros([dim, dim], self.dtype)
mat[index1, index2] = self.spar
return mat
def reset(self, mat):
index1, index2 = self.band_index[-2:]
assert self.dtype == mat.dtype
self.spar = mat[index1, index2]
def reset_from_others(self, bds_mm1, bds_mm2, c1, c2):
assert self.dtype == complex
self.spar = c1 * bds_mm1.spar + c2 * bds_mm2.spar
def reset_minus(self, mat, full=False):
assert self.dtype == complex
index1, index2 = self.band_index[-2:]
if full:
self.spar -= mat[index1, index2]
else:
self.spar -= mat.recover()[index1, index2]
def reset_plus(self, mat, full=False):
assert self.dtype == complex
index1, index2 = self.band_index[-2:]
if full:
self.spar += mat[index1, index2]
else:
self.spar += mat.recover()[index1, index2]
def test_inv_speed(self):
full_mat = self.recover()
timer = Timer()
timer.start('full_numpy')
tmp0 = np.linalg.inv(full_mat)
timer.stop('full_numpy')
timer.start('full_lapack')
inverse_general(full_mat)
timer.stop('full_lapack')
timer.start('sparse_lapack')
self.inv()
timer.stop('sparse_lapack')
times = []
methods = ['full_numpy', 'full_lapack', 'sparse_lapack']
for name in methods:
time = timer.timers[name,]
print name, time
times.append(time)
mintime = np.min(times)
self.inv_method = methods[np.argmin(times)]
print 'mintime', mintime
def inv(self):
#kl, ku, index0 = self.band_index[:3]
#dim = index0.shape[0]
#inv_mat = np.eye(dim, dtype=complex)
#ldab = 2*kl + ku + 1
#source_mat = self.spar[index0]
#assert source_mat.flags.contiguous
#info = _gpaw.linear_solve_band(source_mat, inv_mat, kl, ku)
#return inv_mat
return np.linalg.inv(self.recover()).copy()
class Tp_Sparse_HSD:
def __init__(self, dtype, ns, npk, ll_index, ex=True):
self.dtype = dtype
self.ll_index = ll_index
self.extended = ex
self.H = []
self.S = []
self.D = []
self.G = []
self.ns = ns
self.npk = npk
self.s = 0
self.pk = 0
self.band_indices = None
for s in range(ns):
self.H.append([])
self.D.append([])
for k in range(npk):
self.H[s].append([])
self.D[s].append([])
for k in range(npk):
self.S.append([])
self.G = Tp_Sparse_Matrix(complex, self.ll_index,
None, None, self.extended)
def reset(self, s, pk, mat, flag='S', init=False):
if flag == 'S':
spar = self.S
elif flag == 'H':
spar = self.H[s]
elif flag == 'D':
spar = self.D[s]
if not init:
spar[pk].reset(mat)
elif self.band_indices == None:
spar[pk] = Tp_Sparse_Matrix(self.dtype, self.ll_index, mat,
None, self.extended)
self.band_indices = spar[pk].band_indices
else:
spar[pk] = Tp_Sparse_Matrix(self.dtype, self.ll_index, mat,
self.band_indices, self.extended)
def append_lead_as_buffer(self, lead_hsd, lead_couple_hsd, ex_index, tp=None):
assert self.extended == True
clm = collect_lead_mat
if tp != None:
tp.log('append_lead_as_buffer(), npk : {0} ns : {1}'.format(self.npk, self.ns))
for pk in range(self.npk):
if tp != None:
tp.log('append_lead_as_buffer(), pk : {0}'.format(pk))
diag_h, upc_h, dwnc_h = clm(lead_hsd, lead_couple_hsd, 0, pk)
self.S[pk].append_ex_mat(diag_h, upc_h, dwnc_h, ex_index)
for s in range(self.ns):
if tp != None:
tp.log('append_lead_as_buffer(), s : {0}'.format(s))
tp.log(' clm()')
diag_h, upc_h, dwnc_h = clm(lead_hsd,
lead_couple_hsd, s, pk, 'H')
if tp != None:
tp.log(' append_ex_mat()')
self.H[s][pk].append_ex_mat(diag_h, upc_h, dwnc_h, ex_index)
if tp != None:
tp.log(' clm()')
diag_h, upc_h, dwnc_h = clm(lead_hsd,
lead_couple_hsd, s, pk, 'D')
if tp != None:
tp.log(' append_ex_mat()')
self.D[s][pk].append_ex_mat(diag_h, upc_h, dwnc_h, ex_index, tp=tp)
def calculate_eq_green_function(self, zp, sigma, ex=True, full=False):
s, pk = self.s, self.pk
#print('calculate_eq_green_function() s: {0} pk : {1}'.format(s,pk))
self.G.reset_from_others(self.S[pk], self.H[s][pk], zp, -1, init=True)
self.G.substract_sigma(sigma)
if full:
return np.linalg.inv(self.G.recover())
else:
#self.G.test_inv_eq()
self.G.inv_eq()
return self.G.recover(ex)
def calculate_ne_green_function(self, zp, sigma, ffocc, ffvir, ex=True):
s, pk = self.s, self.pk
self.G.reset_from_others(self.S[pk], self.H[s][pk], zp, -1)
self.G.substract_sigma(sigma)
gammaocc = []
gammavir = []
for ff0, ff1, tgt in zip(ffocc, ffvir, sigma):
full_tgt = tgt.recover()
gammaocc.append(ff0 * 1.j * (full_tgt - full_tgt.T.conj()))
gammavir.append(ff1 * 1.j * (full_tgt - full_tgt.T.conj()))
glesser, ggreater = self.G.calculate_non_equilibrium_green(gammaocc,
gammavir, ex)
return glesser, ggreater
def abstract_sub_green_matrix(self, zp, sigma, l1, l2, inv_mat=None):
if inv_mat == None:
s, pk = self.s, self.pk
self.G.reset_from_others(self.S[pk], self.H[s][pk], zp, -1)
self.G.substract_sigma(sigma)
inv_mat = self.G.inv_ne()
gr_sub = inv_mat[l2][l1][-1]
return gr_sub, inv_mat
else:
gr_sub = inv_mat[l2][l1][-1]
return gr_sub
class Tp_Sparse_Matrix:
def __init__(self, dtype, ll_index, mat=None, band_indices=None, ex=True):
# ll_index : lead_layer_index
# matrix stored here will be changed to inversion
self.lead_num = len(ll_index)
self.ll_index = ll_index
self.ex_ll_index = copy.deepcopy(ll_index[:])
self.extended = ex
self.dtype = dtype
self.initialize()
self.band_indices = band_indices
if self.band_indices == None:
self.initialize_band_indices()
if mat != None:
self.reset(mat, True)
def initialize_band_indices(self):
self.band_indices = [None]
for i in range(self.lead_num):
self.band_indices.append([])
for j in range(self.ex_lead_nlayer[i] - 1):
self.band_indices[i + 1].append(None)
def initialize(self):
# diag_h : diagonal lead_hamiltonian
# upc_h : superdiagonal lead hamiltonian
# dwnc_h : subdiagonal lead hamiltonian
self.diag_h = []
self.upc_h = []
self.dwnc_h = []
self.lead_nlayer = []
self.ex_lead_nlayer = []
self.mol_index = self.ll_index[0][0]
self.nl = 1
self.nb = len(self.mol_index)
self.length = self.nb * self.nb
self.mol_h = []
for i in range(self.lead_num):
self.diag_h.append([])
self.upc_h.append([])
self.dwnc_h.append([])
self.lead_nlayer.append(len(self.ll_index[i]))
if self.extended:
self.ex_lead_nlayer.append(len(self.ll_index[i]) + 1)
else:
self.ex_lead_nlayer.append(len(self.ll_index[i]))
assert (self.ll_index[i][0] == self.mol_index).all()
self.nl += self.lead_nlayer[i] - 1
for j in range(self.lead_nlayer[i] - 1):
self.diag_h[i].append([])
self.upc_h[i].append([])
self.dwnc_h[i].append([])
len1 = len(self.ll_index[i][j])
len2 = len(self.ll_index[i][j + 1])
self.length += 2 * len1 * len2 + len2 * len2
self.nb += len2
if self.extended:
self.diag_h[i].append([])
self.upc_h[i].append([])
self.dwnc_h[i].append([])
self.ex_nb = self.nb
def append_ex_mat(self, diag_h, upc_h, dwnc_h, ex_index, tp=None):
assert self.extended
if tp != None:
tp.log(' append_ex_mat()')
for i in range(self.lead_num):
if tp != None:
tp.log(' append_ex_mat() i: {0}'.format(i))
self.diag_h[i][-1] = diag_h[i]
if tp != None:
tp.log(' append_ex_mat() diag_h')
self.upc_h[i][-1] = upc_h[i]
if tp != None:
tp.log(' append_ex_mat() upc_h')
self.dwnc_h[i][-1] = dwnc_h[i]
if tp != None:
tp.log(' append_ex_mat() dwnc_h')
self.ex_ll_index[i].append(ex_index[i])
if tp != None:
tp.log(' append_ex_mat() .append')
self.ex_nb += len(ex_index[i])
if tp != None:
tp.log(' append_ex_mat() += len()')
def abstract_layer_info(self):
self.basis_to_layer = np.empty([self.nb], int)
self.neighbour_layers = np.zeros([self.nl, self.lead_num], int) - 1
for i in self.mol_index:
self.basis_to_layer[i] = 0
nl = 1
for i in range(self.lead_num):
for j in range(self.lead_nlayer[i] - 1):
for k in self.ll_index[i][j]:
self.basis_to_layer[k] = nl
nl += 1
nl = 1
for i in range(self.lead_num):
self.neighbour_layers[0][i] = nl
first = nl
for j in range(self.lead_nlayer[i] - 1):
if nl == first:
self.neighbour_layers[nl][0] = 0
if j != self.lead_nlayer[i] - 2:
self.neighbour_layers[nl][1] = nl + 1
else:
self.neighbour_layers[nl][0] = nl - 1
if j != self.lead_nlayer[i] - 2:
self.neighbour_layers[nl][1] = nl + 1
nl += 1
def reset(self, mat, init=False):
assert mat.dtype == self.dtype
ind = get_matrix_index(self.mol_index)
if init:
self.mol_h = Banded_Sparse_Matrix(self.dtype, mat[ind.T, ind],
self.band_indices[0])
if self.band_indices[0] == None:
self.band_indices[0] = self.mol_h.band_index
else:
self.mol_h.reset(mat[ind.T, ind])
for i in range(self.lead_num):
for j in range(self.lead_nlayer[i] - 1):
ind = get_matrix_index(self.ll_index[i][j])
ind1 = get_matrix_index(self.ll_index[i][j + 1])
indr1, indc1 = get_matrix_index(self.ll_index[i][j],
self.ll_index[i][j + 1])
indr2, indc2 = get_matrix_index(self.ll_index[i][j + 1],
self.ll_index[i][j])
if init:
self.diag_h[i][j] = Banded_Sparse_Matrix(self.dtype,
mat[ind1.T, ind1],
self.band_indices[i + 1][j])
if self.band_indices[i + 1][j] == None:
self.band_indices[i + 1][j] = \
self.diag_h[i][j].band_index
else:
self.diag_h[i][j].reset(mat[ind1.T, ind1])
self.upc_h[i][j] = mat[indr1, indc1]
self.dwnc_h[i][j] = mat[indr2, indc2]
def reset_from_others(self, tps_mm1, tps_mm2, c1, c2, init=False):
#self.mol_h = c1 * tps_mm1.mol_h + c2 * tps_mm2.mol_h
#print('reset_from_others {0} {1}'.format(tps_mm1, tps_mm2))
if init:
self.mol_h = Banded_Sparse_Matrix(complex)
self.mol_h.spar = c1 * tps_mm1.mol_h.spar + c2 * tps_mm2.mol_h.spar
self.mol_h.band_index = tps_mm1.mol_h.band_index
self.ex_lead_nlayer = tps_mm1.ex_lead_nlayer
self.ex_ll_index = tps_mm1.ex_ll_index
self.ex_nb = tps_mm1.ex_nb
for i in range(self.lead_num):
for j in range(self.ex_lead_nlayer[i] - 1):
assert (tps_mm1.ex_ll_index[i][j] == tps_mm2.ex_ll_index[i][j]).all()
if init:
self.diag_h[i][j] = Banded_Sparse_Matrix(complex)
self.diag_h[i][j].band_index = \
tps_mm1.diag_h[i][j].band_index
self.diag_h[i][j].spar = c1 * tps_mm1.diag_h[i][j].spar + \
c2 * tps_mm2.diag_h[i][j].spar
self.upc_h[i][j] = c1 * tps_mm1.upc_h[i][j] + \
c2 * tps_mm2.upc_h[i][j]
self.dwnc_h[i][j] = c1 * tps_mm1.dwnc_h[i][j] + \
c2 * tps_mm2.dwnc_h[i][j]
def substract_sigma(self, sigma):
if self.extended:
n = -2
else:
n = -1
for i in range(self.lead_num):
self.diag_h[i][n].reset_minus(sigma[i])
def recover(self, ex=False):
if ex:
nb = self.ex_nb
lead_nlayer = self.ex_lead_nlayer
ll_index = self.ex_ll_index
else:
nb = self.nb
lead_nlayer = self.lead_nlayer
ll_index = self.ll_index
mat = np.zeros([nb, nb], self.dtype)
ind = get_matrix_index(ll_index[0][0])
mat[ind.T, ind] = self.mol_h.recover()
gmi = get_matrix_index
for i in range(self.lead_num):
for j in range(lead_nlayer[i] - 1):
ind = gmi(ll_index[i][j])
ind1 = gmi(ll_index[i][j + 1])
indr1, indc1 = gmi(ll_index[i][j], ll_index[i][j + 1])
indr2, indc2 = gmi(ll_index[i][j + 1], ll_index[i][j])
mat[ind1.T, ind1] = self.diag_h[i][j].recover()
mat[indr1, indc1] = self.upc_h[i][j]
mat[indr2, indc2] = self.dwnc_h[i][j]
return mat
def test_inv_eq(self, tol=1e-9):
tp_mat = copy.deepcopy(self)
tp_mat.inv_eq()
mol_h = dot(tp_mat.mol_h.recover(), self.mol_h.recover())
for i in range(self.lead_num):
mol_h += dot(tp_mat.upc_h[i][0], self.dwnc_h[i][0])
diff = np.max(abs(mol_h - np.eye(mol_h.shape[0])))
if diff > tol:
print 'warning, mol_diff', diff
for i in range(self.lead_num):
for j in range(self.lead_nlayer[i] - 2):
diag_h = dot(tp_mat.diag_h[i][j].recover(),
self.diag_h[i][j].recover())
diag_h += dot(tp_mat.dwn_h[i][j], self.upc_h[i][j])
diag_h += dot(tp_mat.upc_h[i][j + 1], self.dwnc_h[i][j + 1])
diff = np.max(abs(diag_h - np.eye(diag_h.shape[0])))
if diff > tol:
print 'warning, diag_diff', i, j, diff
j = self.lead_nlayer[i] - 2
diag_h = dot(tp_mat.diag_h[i][j].recover(),
self.diag_h[i][j].recover())
diag_h += dot(tp_mat.dwnc_h[i][j], self.upc_h[i][j])
diff = np.max(abs(diag_h - np.eye(diag_h.shape[0])))
if diff > tol:
print 'warning, diag_diff', i, j, diff
def inv_eq(self):
q_mat = []
for i in range(self.lead_num):
q_mat.append([])
nll = self.lead_nlayer[i]
for j in range(nll - 1):
q_mat[i].append([])
end = nll - 2
q_mat[i][end] = self.diag_h[i][end].inv()
for j in range(end - 1, -1, -1):
self.diag_h[i][j].reset_minus(self.dotdot(
self.upc_h[i][j + 1],
q_mat[i][j + 1],
self.dwnc_h[i][j + 1]), full=True)
q_mat[i][j] = self.diag_h[i][j].inv()
h_mm = self.mol_h
for i in range(self.lead_num):
h_mm.reset_minus(self.dotdot(self.upc_h[i][0], q_mat[i][0],
self.dwnc_h[i][0]), full=True)
inv_h_mm = h_mm.inv()
h_mm.reset(inv_h_mm)
for i in range(self.lead_num):
tmp_dc = self.dwnc_h[i][0].copy()
self.dwnc_h[i][0] = -self.dotdot(q_mat[i][0], tmp_dc, inv_h_mm)
self.upc_h[i][0] = -self.dotdot(inv_h_mm, self.upc_h[i][0],
q_mat[i][0])
dim = len(self.ll_index[i][1])
self.diag_h[i][0].reset(dot(q_mat[i][0], np.eye(dim) -
dot(tmp_dc, self.upc_h[i][0])))
for j in range(1, self.lead_nlayer[i] - 1):
tmp_dc = self.dwnc_h[i][j].copy()
self.dwnc_h[i][j] = -self.dotdot(q_mat[i][j], tmp_dc,
self.diag_h[i][j - 1].recover())
self.upc_h[i][j] = -self.dotdot(self.diag_h[i][j - 1].recover(),
self.upc_h[i][j],
q_mat[i][j])
dim = len(self.ll_index[i][j + 1])
self.diag_h[i][j].reset(dot(q_mat[i][j], np.eye(dim) -
dot(tmp_dc, self.upc_h[i][j])))
def inv_ne(self):
q_mat = []
qi_mat = []
inv_mat = []
#structure of inv_mat inv_cols_1, inv_cols_2, ..., inv_cols_n (n:lead_num)
#structure of inv_cols_i inv_cols_l1, inv_cols_l2,..., inv_cols_ln, inv_cols_mm(matrix)
#structure of inv_cols_li inv_cols_ll1, inv_cols_ll2,...,inv_cols_ll3
for i in range(self.lead_num):
q_mat.append([])
qi_mat.append([])
inv_mat.append([])
nll = self.lead_nlayer[i]
for j in range(nll - 1):
q_mat[i].append([])
qi_mat[i].append([])
for j in range(self.lead_num):
inv_mat[i].append([])
nll_j = self.lead_nlayer[j]
for k in range(nll_j - 1):
inv_mat[i][j].append([])
inv_mat[i].append([])
end = nll - 2
q_mat[i][end] = self.diag_h[i][end].inv()
for j in range(end - 1, -1, -1):
tmp_diag_h = copy.deepcopy(self.diag_h[i][j])
tmp_diag_h.reset_minus(self.dotdot(self.upc_h[i][j + 1],
q_mat[i][j + 1],
self.dwnc_h[i][j + 1]),
full=True)
q_mat[i][j] = tmp_diag_h.inv()
# above get all the q matrix, then if want to solve the cols
# cooresponding to the lead i, the q_mat[i] will not be used
#q_mm = self.mol_h.recover()
q_mm = copy.deepcopy(self.mol_h)
for i in range(self.lead_num):
#q_mm -= dot(dot(self.upc_h[i][0], q_mat[i][0]),
# self.dwnc_h[i][0])
q_mm.reset_minus(self.dotdot(self.upc_h[i][0],
q_mat[i][0], self.dwnc_h[i][0]), full=True)
for i in range(self.lead_num):
# solve the corresponding cols to the lead i
nll = self.lead_nlayer[i]
#qi_mat[i][0] = q_mm + self.dotdot(self.upc_h[i][0],q_mat[i][0],
# self.dwnc_h[i][0])
q_mm_tmp = copy.deepcopy(q_mm)
q_mm_tmp.reset_plus(self.dotdot(self.upc_h[i][0],q_mat[i][0],
self.dwnc_h[i][0]), full=True)
#inv(qi_mat[i][0])
qi_mat[i][0] = q_mm_tmp.inv()
for j in range(1, nll - 1):
tmp_diag_h = copy.deepcopy(self.diag_h[i][j - 1])
tmp_diag_h.reset_minus(self.dotdot(self.dwnc_h[i][j -1],
qi_mat[i][j - 1],
self.upc_h[i][j - 1]),
full=True)
qi_mat[i][j] = tmp_diag_h.inv()
tmp_diag_h = copy.deepcopy(self.diag_h[i][nll - 2])
tmp_diag_h.reset_minus(self.dotdot(self.dwnc_h[i][nll - 2],
qi_mat[i][nll -2],
self.upc_h[i][nll -2]),
full=True)
inv_mat[i][i][nll - 2] = tmp_diag_h.inv()
for j in range(nll - 3, -1, -1):
inv_mat[i][i][j] = -self.dotdot(qi_mat[i][j + 1],
self.upc_h[i][j + 1],
inv_mat[i][i][j + 1])
inv_mat[i][self.lead_num] = -self.dotdot(qi_mat[i][0],
self.upc_h[i][0],
inv_mat[i][i][0])
for j in range(self.lead_num):
if j != i:
nlj = self.lead_nlayer[j]
inv_mat[i][j][0] = -self.dotdot(q_mat[j][0], self.dwnc_h[j][0],
inv_mat[i][self.lead_num])
for k in range(1, nlj - 1):
inv_mat[i][j][k] = -self.dotdot(q_mat[j][k], self.dwnc_h[j][k],
inv_mat[i][j][k - 1])
return inv_mat
def combine_inv_mat(self, inv_mat):
nb = self.nb
mat = np.zeros([nb, nb], complex)
for i in range(self.lead_num):
indr, indc = get_matrix_index(self.ll_index[i][0],
self.ll_index[i][-1])
mat[indr, indc] = inv_mat[i][self.lead_num]
for j in range(self.lead_num):
for k in range(1, self.lead_nlayer[j]):
indr, indc = get_matrix_index(self.ll_index[j][k],
self.ll_index[i][-1])
mat[indr, indc] = inv_mat[i][j][k - 1]
return mat
def dotdot(self, mat1, mat2, mat3):
return dot(mat1, dot(mat2, mat3))
def calculate_non_equilibrium_green(self, se_less, se_great, ex=True):
inv_mat = self.inv_ne()
glesser = self.calculate_keldysh_green(inv_mat, se_less, ex)
ggreater = self.calculate_keldysh_green(inv_mat, se_great, ex)
return glesser, ggreater
def calculate_keldysh_green(self, inv_mat, keldysh_se, ex=True):
#se_less less selfenergy, structure se_1, se_2, se_3,..., se_n
#the lead sequence of se_less should be the same to self.ll_index
self.mol_h.spar.fill(0.0)
for i in range(self.lead_num):
nll = self.lead_nlayer[i]
for j in range(nll - 1):
self.diag_h[i][j].spar.fill(0.0)
self.upc_h[i][j].fill(0.0)
self.dwnc_h[i][j].fill(0.0)
for i in range(self.lead_num):
# less selfenergy loop
self.mol_h.reset_plus(self.dotdot(inv_mat[i][self.lead_num],
keldysh_se[i],
inv_mat[i][self.lead_num].T.conj()),
full=True)
for j in range(self.lead_num):
# matrix operation loop
nlj = self.lead_nlayer[j]
self.diag_h[j][0].reset_plus(self.dotdot(inv_mat[i][j][0],
keldysh_se[i],
inv_mat[i][j][0].T.conj()),
full=True)
self.dwnc_h[j][0] += self.dotdot(inv_mat[i][j][0], keldysh_se[i],
inv_mat[i][self.lead_num].T.conj())
self.upc_h[j][0] += self.dotdot(inv_mat[i][self.lead_num],
keldysh_se[i],
inv_mat[i][j][0].T.conj())
for k in range(1, nlj -1):
self.diag_h[j][k].reset_plus(self.dotdot(inv_mat[i][j][k],
keldysh_se[i],
inv_mat[i][j][k].T.conj()),
full=True)
self.dwnc_h[j][k] += self.dotdot(inv_mat[i][j][k],
keldysh_se[i],
inv_mat[i][j][k - 1].T.conj())
self.upc_h[j][k] += self.dotdot(inv_mat[i][j][k - 1],
keldysh_se[i],
inv_mat[i][j][k].T.conj())
return self.recover(ex)
def test_inv_speed(self):
full_mat = self.recover()
timer = Timer()
timer.start('full_numpy')
tmp0 = np.linalg.inv(full_mat)
timer.stop('full_numpy')
timer.start('full_lapack')
inverse_general(full_mat)
timer.stop('full_lapack')
timer.start('sparse_lapack')
self.inv_eq()
timer.stop('sparse_lapack')
timer.start('sparse_lapack_ne')
self.inv_ne()
timer.stop('sparse_lapack_ne')
times = []
methods = ['full_numpy', 'full_lapack', 'sparse_lapack']
for name in methods:
time = timer.timers[name,]
print name, time
times.append(time)
mintime = np.min(times)
self.inv_method = methods[np.argmin(times)]
print 'mintime', mintime
print 'sparse_lapack_ne', timer.timers['sparse_lapack_ne',]
class CP_Sparse_HSD:
def __init__(self, dtype, ns, npk, index=None):
self.index = index
self.dtype = dtype
self.H = []
self.S = []
self.D = []
self.ns = ns
self.npk = npk
self.s = 0
self.pk = 0
for s in range(ns):
self.H.append([])
self.D.append([])
for k in range(npk):
self.H[s].append([])
self.D[s].append([])
for k in range(npk):
self.S.append([])
def reset(self, s, pk, mat, flag='S', init=False):
assert mat.dtype == self.dtype
if flag == 'S':
spar = self.S
elif flag == 'H':
spar = self.H[s]
elif flag == 'D':
spar = self.D[s]
if not init:
spar[pk].reset(mat)
elif self.index != None:
spar[pk] = CP_Sparse_Matrix(self.dtype, mat, self.index)
else:
spar[pk] = CP_Sparse_Matrix(self.dtype, mat)
self.index = spar[pk].index
class CP_Sparse_Matrix:
def __init__(self, dtype, mat=None, index=None, flag=None, tol=1e-9):
self.tol = tol
self.index = index
self.dtype = dtype
self.flag = flag
if mat != None:
if self.index == None:
self.initialize(mat)
else:
self.reset(mat)
def initialize(self, mat):
assert self.dtype == mat.dtype
dim = mat.shape[-1]
ud_array = np.empty([dim])
dd_array = np.empty([dim])
for i in range(dim):
ud_array[i] = np.sum(abs(np.diag(mat, i)))
dd_array[i] = np.sum(abs(np.diag(mat, -i)))
spar_sum = 0
mat_sum = np.sum(abs(mat))
if np.sum(abs(ud_array)) > np.sum(abs(dd_array)):
self.flag = 'U'
i = -1
while abs(mat_sum - spar_sum) > self.tol * 10:
i += 1
while ud_array[i] < self.tol and i < dim - 1:
i += 1
self.index = (i, dim)
ldab = dim - i
self.spar = mat[:ldab, i:].copy()
spar_sum = np.sum(abs(self.spar))
else:
self.flag = 'L'
i = -1
while abs(mat_sum - spar_sum) > self.tol * 10:
i += 1
while dd_array[i] < self.tol and i < dim - 1:
i += 1
self.index = (-i, dim)
ldab = dim - i
self.spar = mat[i:, :ldab].copy()
spar_sum = np.sum(abs(self.spar))
def reset(self, mat):
assert mat.dtype == self.dtype and mat.shape[-1] == self.index[1]
dim = mat.shape[-1]
if self.index[0] > 0:
ldab = dim - self.index[0]
self.spar = mat[:ldab, self.index[0]:].copy()
else:
ldab = dim + self.index[0]
self.spar = mat[-self.index[0]:, :ldab].copy()
def recover(self, trans='n'):
nb = self.index[1]
mat = np.zeros([nb, nb], self.dtype)
if self.index[0] > 0:
ldab = nb - self.index[0]
mat[:ldab, self.index[0]:] = self.spar
else:
ldab = nb + self.index[0]
mat[-self.index[0]:, :ldab] = self.spar
if trans == 'c':
if self.dtype == float:
mat = mat.T.copy()
else:
mat = mat.T.conj()
return mat
class Se_Sparse_Matrix:
def __init__(self, mat, tri_type, nn=None, tol=1e-9):
# coupling sparse matrix A_ij!=0 if i>dim-nn and j>dim-nn (for right selfenergy)
# or A_ij!=0 if i<nn and j<nn (for left selfenergy, dim is the shape of A)
self.tri_type = tri_type
self.tol = tol
self.nb = mat.shape[-1]
self.spar = []
if nn == None:
self.initialize(mat)
else:
self.reset(mat, nn)
def initialize(self, mat):
self.nn = 0
nb = self.nb
tol = self.tol
if self.tri_type == 'L':
while self.nn < nb and np.sum(abs(mat[self.nn])) > tol:
self.nn += 1
self.spar = mat[:self.nn, :self.nn].copy()
else:
while self.nn < nb and np.sum(abs(mat[nb - self.nn - 1])) > tol:
self.nn += 1
self.spar = mat[-self.nn:, -self.nn:].copy()
diff = abs(np.sum(abs(mat)) - np.sum(abs(self.spar)))
if diff > tol * 10:
print 'Warning! Sparse Matrix Diff', diff
def reset(self, mat, nn=None):
if nn != None:
self.nn = nn
if self.tri_type == 'L':
self.spar = mat[:self.nn, :self.nn].copy()
else:
self.spar = mat[-self.nn:, -self.nn:].copy()
def restore(self):
mat = np.zeros([self.nb, self.nb], complex)
if self.tri_type == 'L':
mat[:self.nn, :self.nn] = self.spar
else:
mat[-self.nn:, -self.nn:] = self.spar
return mat
| robwarm/gpaw-symm | gpaw/transport/sparse_matrix.py | Python | gpl-3.0 | 37,221 | [
"GPAW"
] | 3ec80967612626eb259e611734476bb2e6b0f55a448dd7ec44812489d8f68ad1 |
import os
import re
import subprocess as sp
from shutil import copyfile
from time import sleep
import pytest
import genomepy.utils
from genomepy.plugins import activate, deactivate, get_active_plugins, init_plugins
from genomepy.plugins.blacklist import BlacklistPlugin
from genomepy.plugins.bowtie2 import Bowtie2Plugin
from genomepy.plugins.bwa import BwaPlugin
from genomepy.plugins.gmap import GmapPlugin
from genomepy.plugins.hisat2 import Hisat2Plugin
from genomepy.plugins.minimap2 import Minimap2Plugin
from genomepy.plugins.star import StarPlugin
from tests import linux, travis
@pytest.fixture(autouse=True)
def activate_plugins():
# save originally active plugins
original_plugins = [p.name for p in get_active_plugins()]
# activate all plugins
[activate(p) for p in init_plugins()]
yield
# deactivate all plugins
[deactivate(p) for p in init_plugins()]
# reactivate original plugins
[activate(p) for p in original_plugins]
@pytest.fixture(scope="function", params=["unzipped", "bgzipped"])
def genome(request):
"""Create a test genome and location"""
name = "ce10" # Use fake name for blacklist test
fafile = "tests/data/small_genome.fa.gz"
# setup test dir
genomes_dir = os.path.join(os.getcwd(), ".genomepy_plugin_tests")
genome_dir = os.path.join(genomes_dir, name)
genomepy.utils.mkdir_p(genome_dir)
fname = os.path.join(genome_dir, f"{name}.fa.gz")
copyfile(fafile, fname)
# unzip genome if required
if request.param == "unzipped":
sp.check_call(["gunzip", fname])
# add annotation (for STAR and hisat2) for 1 of 2 tests
gtf_file = "tests/data/ce10.annotation.gtf.gz"
aname = os.path.join(genome_dir, f"{name}.annotation.gtf.gz")
copyfile(gtf_file, aname)
yield genomepy.Genome(name, genomes_dir=genomes_dir)
# tear down test dir
genomepy.utils.rm_rf(genomes_dir)
def dont_overwrite(p, genome, fname):
t0 = os.path.getmtime(fname)
# OSX rounds down getmtime to the second
if not linux:
sleep(1)
p.after_genome_download(genome, force=False)
t1 = os.path.getmtime(fname)
assert t0 == t1
def test_blacklist(caplog, genome):
"""Create blacklist."""
# independent of bgzipping
# no need to check for both .fa and .fa.gz.
if genome.filename.endswith(".fa.gz"):
pass
p = BlacklistPlugin()
fname = re.sub(".fa(.gz)?$", ".blacklist.bed", genome.filename)
# no blacklist found
genome.name = "ce01"
p.after_genome_download(genome, force=True)
assert f"No blacklist found for {genome.name}" in caplog.text
# error downloading blacklist
genome.name = "this was a triumph"
p.after_genome_download(genome, force=True)
link = "I'm making a note here: 'Huge success'"
assert f"Could not download blacklist file from {link}" in caplog.text
# download UCSC blacklist
genome.name = "ce10"
p.after_genome_download(genome, force=True)
assert "ce10-C.elegans/ce10-blacklist.bed.gz" in caplog.text
assert os.path.exists(fname)
with open(fname) as blacklist:
for line in blacklist:
assert line.startswith("chr")
break
os.unlink(fname)
# download Ensembl/NCBI blacklist
genome.name = "GRCh38"
p.after_genome_download(genome, force=True)
assert "ENCFF356LFX/@@download/ENCFF356LFX.bed.gz" in caplog.text
with open(fname) as blacklist:
for line in blacklist:
assert not line.startswith("chr")
break
# don't overwrite
dont_overwrite(p, genome, fname)
os.unlink(fname)
genome.name = "ce10"
def test_bowtie2(genome, threads=2):
"""Create bowtie2 index."""
# can work with bgzipped genomes natively,
# no need to check for both .fa and .fa.gz.
if genome.filename.endswith(".fa"):
pass
p = Bowtie2Plugin()
p.after_genome_download(genome, threads=threads, force=True)
dirname = os.path.dirname(genome.filename)
index_dir = os.path.join(dirname, "index", "bowtie2")
fname = os.path.join(index_dir, f"{genome.name}.1.bt2")
assert os.path.exists(index_dir)
assert os.path.exists(fname)
def test_bwa(genome, threads=2):
"""Create bwa index."""
# can work with bgzipped genomes natively,
# no need to check for both .fa and .fa.gz.
if genome.filename.endswith(".fa"):
pass
p = BwaPlugin()
p.after_genome_download(genome, threads=threads, force=True)
dirname = os.path.dirname(genome.filename)
index_dir = os.path.join(dirname, "index", "bwa")
fname = os.path.join(index_dir, f"{genome.name}.fa.sa")
assert os.path.exists(index_dir)
assert os.path.exists(fname)
@pytest.mark.skipif(not travis, reason="slow")
def test_gmap(genome, threads=2):
"""Create gmap index."""
p = GmapPlugin()
p.after_genome_download(genome, threads=threads, force=True)
dirname = os.path.dirname(genome.filename)
index_dir = os.path.join(dirname, "index", "gmap")
fname = os.path.join(index_dir, f"{genome.name}.maps")
assert os.path.exists(index_dir)
assert os.path.exists(fname)
def test_hisat2(caplog, genome, threads=2):
"""Create hisat2 index."""
p = Hisat2Plugin()
p.after_genome_download(genome, threads=threads, force=True)
dirname = os.path.dirname(genome.filename)
index_dir = os.path.join(dirname, "index", "hisat2")
fname = os.path.join(index_dir, f"{genome.name}.1.ht2")
assert os.path.exists(index_dir)
assert os.path.exists(fname)
if genome.annotation_gtf_file:
# check if splice-aware index is generated
assert os.path.exists(os.path.join(genome.genome_dir, "splice_sites.txt"))
assert os.path.exists(os.path.join(genome.genome_dir, "exon_sites.txt"))
# check if annotation file is still the same
assert os.path.exists(genome.annotation_gtf_file)
assert genome.annotation_gtf_file.endswith(".gtf.gz")
else:
assert "Creating Hisat2 index without annotation file." in caplog.text
def test_minimap2(genome, threads=2):
"""Create minimap2 index."""
# can work with bgzipped genomes natively,
# no need to check for both .fa and .fa.gz.
if genome.filename.endswith(".fa"):
pass
p = Minimap2Plugin()
p.after_genome_download(genome, threads=threads, force=True)
dirname = os.path.dirname(genome.filename)
index_dir = os.path.join(dirname, "index", "minimap2")
fname = os.path.join(index_dir, f"{genome.name}.mmi")
assert os.path.exists(index_dir)
assert os.path.exists(fname)
@pytest.mark.skipif(not travis, reason="slow")
def test_star(caplog, genome, threads=2):
"""Create star index."""
p = StarPlugin()
p.after_genome_download(genome, threads=threads, force=True)
dirname = os.path.dirname(genome.filename)
index_dir = os.path.join(dirname, "index", "star")
fname = os.path.join(index_dir, "SA")
assert os.path.exists(index_dir)
assert os.path.exists(fname)
if genome.annotation_gtf_file:
# check if splice-aware index is generated
assert "Creating star index..." in caplog.text
# check if annotation file is still the same
assert os.path.exists(genome.annotation_gtf_file)
assert genome.annotation_gtf_file.endswith(".gtf.gz")
else:
assert "Creating STAR index without annotation file." in caplog.text
| simonvh/genomepy | tests/test_16_plugins.py | Python | mit | 7,449 | [
"BWA"
] | 9d3bab1026175f232be85cc5668783f0cd0882f0f420aa2db770cdd3dab03ac9 |
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-wms-job-logging-ingo
# Author : Stuart Paterson
########################################################################
"""
Retrieve history of transitions for a DIRAC job
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC Job ID' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
exitCode = 0
errorList = []
for job in args:
result = dirac.loggingInfo( job, printOutput = True )
if not result['OK']:
errorList.append( ( job, result['Message'] ) )
exitCode = 2
for error in errorList:
print "ERROR %s: %s" % error
DIRAC.exit( exitCode )
| Sbalbp/DIRAC | Interfaces/scripts/dirac-wms-job-logging-info.py | Python | gpl-3.0 | 1,187 | [
"DIRAC"
] | 0edf266aca3fd00726baeb62a972b3ab24dd206eb50bfe21dcd55ba57a2dd9c7 |
"""
Utilities for scripts
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import functools
import os
import shlex
import subprocess
import sys
import time
import humanize
import requests
import yaml
import pysam
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
def getPathOfExecutable(executable):
"""
Returns the full path of the executable, or None if the executable
can not be found.
"""
exe_paths = os.environ['PATH'].split(':')
for exe_path in exe_paths:
exe_file = os.path.join(exe_path, executable)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
return exe_file
return None
def requireExecutables(executables):
"""
Check that all of the given executables are on the path.
If at least one of them is not, exit the script and inform
the user of the missing requirement(s).
"""
missingExecutables = []
for executable in executables:
if getPathOfExecutable(executable) is None:
missingExecutables.append(executable)
if len(missingExecutables) > 0:
log("In order to run this script, the following "
"executables need to be on the path:")
for missingExecutable in missingExecutables:
print(missingExecutable)
exit(1)
def ga4ghImportGlue():
"""
Call this method before importing a ga4gh module in the scripts dir.
Otherwise, you will be using the installed package instead of
the development package.
Assumes a certain directory structure.
"""
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(path)
def log(message):
print(message)
class Timed(object):
"""
Decorator that times a method, reporting runtime at finish
"""
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
self.start = time.time()
result = func(*args, **kwargs)
self.end = time.time()
self._report()
return result
return wrapper
def _report(self):
delta = self.end - self.start
timeString = humanize.time.naturaldelta(delta)
log("Finished in {} ({:.2f} seconds)".format(timeString, delta))
class FileDownloader(object):
"""
Base class for file downloaders of different protocols
"""
defaultStream = sys.stdout
def __init__(self, url, path, stream=defaultStream):
self.url = url
self.path = path
self.basename = path
self.basenameLength = len(self.basename)
self.stream = stream
self.bytesReceived = 0
self.displayIndex = 0
self.displayWindowSize = 20
self.fileSize = None
self.displayCounter = 0
def _printStartDownloadMessage(self):
self.stream.write("Downloading '{}' to '{}'\n".format(
self.url, self.path))
def _cleanUp(self):
self.stream.write("\n")
self.stream.flush()
def _getFileNameDisplayString(self):
if self.basenameLength <= self.displayWindowSize:
return self.basename
else:
return self.basename # TODO scrolling window here
def _updateDisplay(self, modulo=1):
self.displayCounter += 1
if self.displayCounter % modulo != 0:
return
fileName = self._getFileNameDisplayString()
if self.fileSize is None:
displayString = "{} bytes received: {}\r"
bytesReceived = humanize.filesize.naturalsize(
self.bytesReceived)
self.stream.write(displayString.format(
fileName, bytesReceived))
else:
# TODO contentlength seems to slightly under-report how many
# bytes we have to download... hence the min functions
percentage = min(self.bytesReceived / self.fileSize, 1)
numerator = humanize.filesize.naturalsize(
min(self.bytesReceived, self.fileSize))
denominator = humanize.filesize.naturalsize(
self.fileSize)
displayString = "{} {:<6.2%} ({:>9} / {:<9})\r"
self.stream.write(displayString.format(
fileName, percentage, numerator, denominator))
self.stream.flush()
class HttpFileDownloader(FileDownloader):
"""
Provides a wget-like file download and terminal display for HTTP
"""
defaultChunkSize = 1048576 # 1MB
def __init__(self, url, path, chunkSize=defaultChunkSize,
stream=FileDownloader.defaultStream):
super(HttpFileDownloader, self).__init__(
url, path, stream)
self.chunkSize = chunkSize
def download(self):
self._printStartDownloadMessage()
response = requests.get(self.url, stream=True)
response.raise_for_status()
try:
contentLength = int(response.headers['content-length'])
self.fileSize = contentLength
except KeyError:
# chunked transfer encoding
pass
with open(self.path, 'wb') as outputFile:
for chunk in response.iter_content(chunk_size=self.chunkSize):
self.bytesReceived += self.chunkSize
self._updateDisplay()
outputFile.write(chunk)
self._cleanUp()
def runCommandSplits(splits, silent=False):
"""
Run a shell command given the command's parsed command line
"""
try:
if silent:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(splits, stdout=devnull, stderr=devnull)
else:
subprocess.check_call(splits)
except OSError, e:
if e.errno == 2: # cmd not found
raise Exception(
"Can't find command while trying to run {}".format(splits))
else:
raise
def runCommand(command, silent=False):
"""
Run a shell command
"""
splits = shlex.split(command)
runCommandSplits(splits, silent=silent)
def getAuthValues(filePath='scripts/auth.yml'):
"""
Return the script authentication file as a dictionary
"""
return getYamlDocument(filePath)
def getYamlDocument(filePath):
"""
Return a yaml file's contents as a dictionary
"""
with open(filePath) as stream:
doc = yaml.load(stream)
return doc
class AlignmentFileConstants(object):
"""
A container class for constants dealing with alignment files
"""
SAM = "SAM"
BAM = "BAM"
BAI = "BAI"
class AlignmentFileTool(object):
"""
Helps with operations on BAM and SAM files
"""
def __init__(self, inputFileFormat, outputFileFormat):
self.inputFileFormat = inputFileFormat
self.outputFileFormat = outputFileFormat
self.args = None
def parseArgs(self):
description = "{} to {} conversion tool".format(
self.inputFileFormat, self.outputFileFormat)
parser = argparse.ArgumentParser(
description=description)
inputHelpText = "the name of the {} file to read".format(
self.inputFileFormat)
parser.add_argument(
"inputFile", help=inputHelpText)
outputHelpText = "the name of the {} file to write".format(
self.outputFileFormat)
defaultOutputFilePath = "out.{}".format(
self.outputFileFormat.lower())
parser.add_argument(
"--outputFile", "-o", default=defaultOutputFilePath,
help=outputHelpText)
parser.add_argument(
"--numLines", "-n", default=10,
help="the number of lines to write")
parser.add_argument(
"--skipIndexing", default=False, action='store_true',
help="don't create an index file")
args = parser.parse_args()
self.args = args
def convert(self):
# set flags
if self.inputFileFormat == AlignmentFileConstants.SAM:
inputFlags = "r"
elif self.inputFileFormat == AlignmentFileConstants.BAM:
inputFlags = "rb"
if self.outputFileFormat == AlignmentFileConstants.SAM:
outputFlags = "wh"
elif self.outputFileFormat == AlignmentFileConstants.BAM:
outputFlags = "wb"
# open files
inputFile = pysam.AlignmentFile(
self.args.inputFile, inputFlags)
outputFile = pysam.AlignmentFile(
self.args.outputFile, outputFlags, header=inputFile.header)
outputFilePath = outputFile.filename
log("Creating alignment file '{}'".format(outputFilePath))
# write new file
for _ in xrange(self.args.numLines):
alignedSegment = inputFile.next()
outputFile.write(alignedSegment)
# clean up
inputFile.close()
outputFile.close()
# create index file
if (not self.args.skipIndexing and
self.outputFileFormat == AlignmentFileConstants.BAM):
indexFilePath = "{}.{}".format(
outputFilePath, AlignmentFileConstants.BAI.lower())
log("Creating index file '{}'".format(indexFilePath))
pysam.index(outputFilePath)
| ohsu-computational-biology/server | scripts/utils.py | Python | apache-2.0 | 9,341 | [
"pysam"
] | 75c7eb136cad1782806567ef26f99ae668f748b16928fd581fd4b06ecb84178d |
"""
End-to-end test for cohorted courseware. This uses both Studio and LMS.
"""
import json
from nose.plugins.attrib import attr
from studio.base_studio_test import ContainerBase
from ..pages.studio.settings_group_configurations import GroupConfigurationsPage
from ..pages.studio.auto_auth import AutoAuthPage as StudioAutoAuthPage
from ..fixtures.course import XBlockFixtureDesc
from ..fixtures import LMS_BASE_URL
from ..pages.studio.component_editor import ComponentVisibilityEditorView
from ..pages.lms.instructor_dashboard import InstructorDashboardPage
from ..pages.lms.courseware import CoursewarePage
from ..pages.lms.auto_auth import AutoAuthPage as LmsAutoAuthPage
from ..tests.lms.test_lms_user_preview import verify_expected_problem_visibility
from bok_choy.promise import EmptyPromise
from bok_choy.page_object import XSS_INJECTION
@attr('shard_5')
class EndToEndCohortedCoursewareTest(ContainerBase):
def setUp(self, is_staff=True):
super(EndToEndCohortedCoursewareTest, self).setUp(is_staff=is_staff)
self.staff_user = self.user
self.content_group_a = "Content Group A" + XSS_INJECTION
self.content_group_b = "Content Group B" + XSS_INJECTION
# Create a student who will be in "Cohort A"
self.cohort_a_student_username = "cohort_a_student"
self.cohort_a_student_email = "cohort_a_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_a_student_username, email=self.cohort_a_student_email, no_login=True
).visit()
# Create a student who will be in "Cohort B"
self.cohort_b_student_username = "cohort_b_student"
self.cohort_b_student_email = "cohort_b_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_b_student_username, email=self.cohort_b_student_email, no_login=True
).visit()
# Create a student who will end up in the default cohort group
self.cohort_default_student_username = "cohort_default_student"
self.cohort_default_student_email = "cohort_default_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_default_student_username,
email=self.cohort_default_student_email, no_login=True
).visit()
# Start logged in as the staff user.
StudioAutoAuthPage(
self.browser, username=self.staff_user["username"], email=self.staff_user["email"]
).visit()
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
self.group_a_problem = 'GROUP A CONTENT'
self.group_b_problem = 'GROUP B CONTENT'
self.group_a_and_b_problem = 'GROUP A AND B CONTENT'
self.visible_to_all_problem = 'VISIBLE TO ALL CONTENT'
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('problem', self.group_a_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_a_and_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.visible_to_all_problem, data='<problem></problem>')
)
)
)
)
def enable_cohorting(self, course_fixture):
"""
Enables cohorting for the current course.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + '/cohorts/settings' # pylint: disable=protected-access
data = json.dumps({'is_cohorted': True})
response = course_fixture.session.patch(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to enable cohorts")
def create_content_groups(self):
"""
Creates two content groups in Studio Group Configurations Settings.
"""
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
group_configurations_page.create_first_content_group()
config = group_configurations_page.content_groups[0]
config.name = self.content_group_a
config.save()
group_configurations_page.add_content_group()
config = group_configurations_page.content_groups[1]
config.name = self.content_group_b
config.save()
def link_problems_to_content_groups_and_publish(self):
"""
Updates 3 of the 4 existing problems to limit their visibility by content group.
Publishes the modified units.
"""
container_page = self.go_to_unit_page()
def set_visibility(problem_index, content_group, second_content_group=None):
problem = container_page.xblocks[problem_index]
problem.edit_visibility()
if second_content_group:
ComponentVisibilityEditorView(self.browser, problem.locator).select_option(
second_content_group, save=False
)
ComponentVisibilityEditorView(self.browser, problem.locator).select_option(content_group)
set_visibility(1, self.content_group_a)
set_visibility(2, self.content_group_b)
set_visibility(3, self.content_group_a, self.content_group_b)
container_page.publish_action.click()
def create_cohorts_and_assign_students(self):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one student.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
cohort_management_page = instructor_dashboard_page.select_cohort_management()
def add_cohort_with_student(cohort_name, content_group, student):
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
# After adding the cohort, it should automatically be selected
EmptyPromise(
lambda: cohort_name == cohort_management_page.get_selected_cohort(), "Waiting for new cohort"
).fulfill()
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort A", self.content_group_a, self.cohort_a_student_username)
add_cohort_with_student("Cohort B", self.content_group_b, self.cohort_b_student_username)
def view_cohorted_content_as_different_users(self):
"""
View content as staff, student in Cohort A, student in Cohort B, and student in Default Cohort.
"""
courseware_page = CoursewarePage(self.browser, self.course_id)
def login_and_verify_visible_problems(username, email, expected_problems):
LmsAutoAuthPage(
self.browser, username=username, email=email, course_id=self.course_id
).visit()
courseware_page.visit()
verify_expected_problem_visibility(self, courseware_page, expected_problems)
login_and_verify_visible_problems(
self.staff_user["username"], self.staff_user["email"],
[self.group_a_problem, self.group_b_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_a_student_username, self.cohort_a_student_email,
[self.group_a_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_b_student_username, self.cohort_b_student_email,
[self.group_b_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_default_student_username, self.cohort_default_student_email,
[self.visible_to_all_problem]
)
def test_cohorted_courseware(self):
"""
Scenario: Can create content that is only visible to students in particular cohorts
Given that I have course with 4 problems, 1 staff member, and 3 students
When I enable cohorts in the course
And I create two content groups, Content Group A, and Content Group B, in the course
And I link one problem to Content Group A
And I link one problem to Content Group B
And I link one problem to both Content Group A and Content Group B
And one problem remains unlinked to any Content Group
And I create two manual cohorts, Cohort A and Cohort B,
linked to Content Group A and Content Group B, respectively
And I assign one student to each manual cohort
And one student remains in the default cohort
Then the staff member can see all 4 problems
And the student in Cohort A can see all the problems except the one linked to Content Group B
And the student in Cohort B can see all the problems except the one linked to Content Group A
And the student in the default cohort can ony see the problem that is unlinked to any Content Group
"""
self.enable_cohorting(self.course_fixture)
self.create_content_groups()
self.link_problems_to_content_groups_and_publish()
self.create_cohorts_and_assign_students()
self.view_cohorted_content_as_different_users()
| solashirai/edx-platform | common/test/acceptance/tests/test_cohorted_courseware.py | Python | agpl-3.0 | 9,833 | [
"VisIt"
] | a75b1eb84799bcbb266fc21926b6fd609fba7ab5e217277bf4d90b879c3d3e4f |
# Licensed under GPL version 3 - see LICENSE.rst
'''Add additional scattering to processed photons.
Classes in this file add additional scattering in a statistical sense.
The classes in this module do not trace the photon to a specific location, they
just add the scatter at the point of the last interaction. For example,
reflection from a (flat) mirror is implemented as a perfect reflection, but in
practice there is some roughness to the mirror that adds a small Gaussian blur
to the reflection. To represent this, the point of origin of the ray remains
unchanged, but a small random change is added to the direction vector.
'''
import numpy as np
from warnings import warn
from ..math.utils import e2h, h2e, norm_vector
from ..math.rotations import axangle2mat
from ..math.polarization import parallel_transport
from .base import FlatOpticalElement
class RadialMirrorScatter(FlatOpticalElement):
'''Add scatter to any sort of radial mirror.
Scatter is added in the plane of reflection, which is defined here
as the plane which contains (i) the current direction the ray and (ii) the
vector connecting the center of the `RadialMirrorScatter` element and the
point of last interaction for the ray.
Scatter can also be added perpendicular to the plane-of-reflection.
Parameters
----------
inplanescatter : float
sigma of Gaussian for in-plane scatter [in radian]
perpplanescatter : float
sigma of Gaussian for scatter perpendicular to the plane of reflection
[in radian] (default = 0)
'''
def __init__(self, **kwargs):
self.inplanescatter = kwargs.pop('inplanescatter') # in rad
self.perpplanescatter = kwargs.pop('perpplanescatter', 0.) # in rad
super(RadialMirrorScatter, self).__init__(**kwargs)
def specific_process_photons(self, photons, intersect, interpos, intercoos):
n = intersect.sum()
center = self.pos4d[:-1, -1]
radial = h2e(photons['pos'][intersect].data) - center
perpplane = np.cross(h2e(photons['dir'][intersect].data), radial)
# np.random.normal does not work with scale=0
# so special case that here.
if self.inplanescatter != 0:
inplaneangle = np.random.normal(loc=0., scale=self.inplanescatter, size=n)
rot = axangle2mat(perpplane, inplaneangle)
outdir = e2h(np.einsum('...ij,...i->...j', rot, h2e(photons['dir'][intersect])), 0)
else:
inplaneangle = np.zeros(n)
outdir = photons['dir'][intersect]
if self.perpplanescatter !=0:
perpangle = np.random.normal(loc=0., scale=self.perpplanescatter, size=n)
rot = axangle2mat(radial, perpangle)
outdir = e2h(np.einsum('...ij,...i->...j', rot, h2e(outdir)), 0)
else:
perpangle = np.zeros_like(inplaneangle)
pol = parallel_transport(photons['dir'].data[intersect, :], outdir,
photons['polarization'].data[intersect, :])
return {'dir': outdir, 'polarization': pol,
'inplanescatter': inplaneangle, 'perpplanescatter': perpangle}
class RandomGaussianScatter(FlatOpticalElement):
'''Add scatter to any sort of radial mirror.
This element scatters rays by a small angle, drawn from a Gaussian
distribution. The direction of the scatter is random.
Parameters
----------
scatter : float or callable
This this is a number, scattering angles will be drawn from a Gaussian
with the given sigma [in radian]. For a variable scatter, this can be a
function with the following call signature: ``angle = func(photons,
intersect, interpos, intercoos)``. The function should return an array
of angles, containing one angle for each intersecting photon. A function
passed in for this parameter can makes the scattering time, location, or
energy-dependent.
'''
scattername = 'scatter'
def __init__(self, **kwargs):
if 'scatter' in kwargs:
if hasattr(self, 'scatter'):
warn('Overriding class level "scatter" definition.')
self.scatter = kwargs.pop('scatter') # in rad
else:
if not hasattr(self, 'scatter'):
raise ValueError('Keyword "scatter" missing.')
super().__init__(**kwargs)
def specific_process_photons(self, photons, intersect, interpos, intercoos):
n = intersect.sum()
# np.random.normal does not work with scale=0
# so special case that here.
if self.scatter == 0:
angle = np.zeros(n)
out = {}
else:
pdir = norm_vector(h2e(photons['dir'][intersect].data))
# Now, find a direction that is perpendicular to the photon direction
# Any perpendicular direction will do
# Start by making a set of vectors that at least are not parallel
# to the photon direction
guessvec = np.zeros_like(pdir)
ind = np.abs(pdir[:, 0]) < 0.99999
guessvec[ind, 0] = 1
guessvec[~ind, 1] = 1
perpvec = np.cross(pdir, guessvec)
if callable(self.scatter):
angle = self.scatter(photons, intersect, interpos, intercoos)
else:
angle = np.random.normal(loc=0., scale=self.scatter, size=n)
rot = axangle2mat(perpvec, angle)
outdir = np.einsum('...ij,...i->...j', rot, pdir)
# Now rotate result by up to 2 pi to randomize direction
angle = np.random.uniform(size=n) * 2 * np.pi
rot = axangle2mat(pdir, angle)
outdir = e2h(np.einsum('...ij,...i->...j', rot, outdir), 0)
pol = parallel_transport(photons['dir'].data[intersect, :], outdir,
photons['polarization'].data[intersect, :])
out = {'dir': outdir, 'polarization': pol}
if self.scattername is not None:
out[self.scattername] = angle
return out
| hamogu/marxs | marxs/optics/scatter.py | Python | gpl-3.0 | 6,078 | [
"Gaussian"
] | e43f423d7cd55bcf8f521c54aee1359f1ce6d88fb37fd32e54cc77050bbc53d6 |
import sys
from pyneuroml import pynml
####################################################################
# Choose a LEMS/NeuroML2 file and run it with jNeuroML
example_lems_file = 'LEMS_NML2_Ex5_DetCell.xml'
results1 = pynml.run_lems_with_jneuroml(example_lems_file, nogui=True, load_saved_data=True)
####################################################################
# Convert LEMS/NeuroML2 file to NEURON with jNeuroML & run
if not '-noneuron' in sys.argv: # To allow skipping of this for ease of testing
results2 = pynml.run_lems_with_jneuroml_neuron(example_lems_file, nogui=True, load_saved_data=True)
####################################################################
# Reload & plot results
if not '-nogui' in sys.argv:
from matplotlib import pyplot as plt
for key in results1.keys():
plt.xlabel('Time (ms)')
plt.ylabel('...')
plt.grid('on')
if key != 't':
plt.plot(results1['t'],results1[key], label="jNeuroML: "+key)
if not '-noneuron' in sys.argv:
plt.plot(results2['t'],results2[key], label="jNeuroML_NEURON: "+key)
plt.legend(loc=2, fontsize = 'x-small')
plt.show()
| rgerkin/pyNeuroML | examples/run_jneuroml_plot_matplotlib.py | Python | lgpl-3.0 | 1,213 | [
"NEURON"
] | db4fdcd3ee3f47fde75849cabe1cd23d4114256a92e8368818bc1d3b0e9f9eac |
"""
Copyright (C) 2014, Jaguar Land Rover
This program is licensed under the terms and conditions of the
Mozilla Public License, version 2.0. The full text of the
Mozilla Public License is at https://www.mozilla.org/MPL/2.0/
Maintainer: Rudolf Streif (rstreif@jaguarlandrover.com)
"""
from django.contrib import admin
from dblog.models import GeneralLog, SotaLog
class LogAdmin(admin.ModelAdmin):
"""
Administration view for logging.
"""
list_display = ('time_seconds', 'level', 'message')
readonly_fields = ('time', 'level', 'message')
list_filter = ['time', 'level']
search_fields = ['message',]
def has_add_permission(self, request):
return False
def time_seconds(self, obj):
return obj.time.strftime("%Y-%m-%d %H:%M:%S")
time_seconds.short_description = 'Timestamp'
class Meta:
abstract = True
class GeneralLogAdmin(LogAdmin):
"""
Aministration view for general logging.
"""
pass
class SotaLogAdmin(LogAdmin):
"""
Administration view for SOTA logging.
"""
pass
# Register your models here.
admin.site.register(GeneralLog, GeneralLogAdmin)
admin.site.register(SotaLog, SotaLogAdmin)
| rstreif/rvi_backend | web/dblog/admin.py | Python | mpl-2.0 | 1,207 | [
"Jaguar"
] | 651bc0eac24c74a2211244ef94cea30705d50b77181e1cbf90aef75056cc5235 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layers.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
import os
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
def _sparse_id_tensor(shape, vocab_size, seed=112123):
# Returns a arbitrary `SparseTensor` with given shape and vocab size.
np.random.seed(seed)
indices = np.array(list(itertools.product(*[range(s) for s in shape])))
# In order to create some sparsity, we include a value outside the vocab.
values = np.random.randint(0, vocab_size + 1, size=np.prod(shape))
# Remove entries outside the vocabulary.
keep = values < vocab_size
indices = indices[keep]
values = values[keep]
return sparse_tensor_lib.SparseTensor(
indices=indices, values=values, dense_shape=shape)
class FeatureColumnTest(test.TestCase):
def testImmutability(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
with self.assertRaises(AttributeError):
a.column_name = "bbb"
def testSparseColumnWithHashBucket(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dtype, dtypes.string)
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, dtype=dtypes.int64)
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dtype, dtypes.int64)
with self.assertRaisesRegexp(ValueError, "dtype must be string or integer"):
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, dtype=dtypes.float32)
def testSparseColumnWithVocabularyFile(self):
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454)
self.assertEqual(b.dtype, dtypes.string)
self.assertEqual(b.lookup_config.vocab_size, 454)
self.assertEqual(b.lookup_config.vocabulary_file, "a_file")
with self.assertRaises(ValueError):
# Vocabulary size should be defined if vocabulary_file is used.
fc.sparse_column_with_vocabulary_file("bbb", vocabulary_file="somefile")
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454, dtype=dtypes.int64)
self.assertEqual(b.dtype, dtypes.int64)
with self.assertRaisesRegexp(ValueError, "dtype must be string or integer"):
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454, dtype=dtypes.float32)
def testWeightedSparseColumn(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertEqual(weighted_ids.name, "ids_weighted_by_weights")
def testWeightedSparseColumnDeepCopy(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted = fc.weighted_sparse_column(ids, "weights")
weighted_copy = copy.deepcopy(weighted)
self.assertEqual(weighted_copy.sparse_id_column.name, "ids")
self.assertEqual(weighted_copy.weight_column_name, "weights")
self.assertEqual(weighted_copy.name, "ids_weighted_by_weights")
def testEmbeddingColumn(self):
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, combiner="sum")
b = fc.embedding_column(a, dimension=4, combiner="mean")
self.assertEqual(b.sparse_id_column.name, "aaa")
self.assertEqual(b.dimension, 4)
self.assertEqual(b.combiner, "mean")
def testEmbeddingColumnDeepCopy(self):
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, combiner="sum")
column = fc.embedding_column(a, dimension=4, combiner="mean")
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.name, "aaa_embedding")
self.assertEqual(column_copy.sparse_id_column.name, "aaa")
self.assertEqual(column_copy.dimension, 4)
self.assertEqual(column_copy.combiner, "mean")
def testScatteredEmbeddingColumn(self):
column = fc.scattered_embedding_column(
"aaa", size=100, dimension=10, hash_key=1)
self.assertEqual(column.column_name, "aaa")
self.assertEqual(column.size, 100)
self.assertEqual(column.dimension, 10)
self.assertEqual(column.hash_key, 1)
self.assertEqual(column.name, "aaa_scattered_embedding")
def testScatteredEmbeddingColumnDeepCopy(self):
column = fc.scattered_embedding_column(
"aaa", size=100, dimension=10, hash_key=1)
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.column_name, "aaa")
self.assertEqual(column_copy.size, 100)
self.assertEqual(column_copy.dimension, 10)
self.assertEqual(column_copy.hash_key, 1)
self.assertEqual(column_copy.name, "aaa_scattered_embedding")
def testSharedEmbeddingColumn(self):
a1 = fc.sparse_column_with_keys("a1", ["marlo", "omar", "stringer"])
a2 = fc.sparse_column_with_keys("a2", ["marlo", "omar", "stringer"])
b = fc.shared_embedding_columns([a1, a2], dimension=4, combiner="mean")
self.assertEqual(len(b), 2)
self.assertEqual(b[0].shared_embedding_name, "a1_a2_shared_embedding")
self.assertEqual(b[1].shared_embedding_name, "a1_a2_shared_embedding")
# Create a sparse id tensor for a1.
input_tensor_c1 = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]], values=[0, 1, 2], dense_shape=[3, 3])
# Create a sparse id tensor for a2.
input_tensor_c2 = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]], values=[0, 1, 2], dense_shape=[3, 3])
with variable_scope.variable_scope("run_1"):
b1 = feature_column_ops.input_from_feature_columns({
b[0]: input_tensor_c1
}, [b[0]])
b2 = feature_column_ops.input_from_feature_columns({
b[1]: input_tensor_c2
}, [b[1]])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
b1_value = b1.eval()
b2_value = b2.eval()
for i in range(len(b1_value)):
self.assertAllClose(b1_value[i], b2_value[i])
# Test the case when a shared_embedding_name is explicitly specified.
d = fc.shared_embedding_columns(
[a1, a2],
dimension=4,
combiner="mean",
shared_embedding_name="my_shared_embedding")
# a3 is a completely different sparse column with a1 and a2, but since the
# same shared_embedding_name is passed in, a3 will have the same embedding
# as a1 and a2
a3 = fc.sparse_column_with_keys("a3", [42, 1, -1000], dtype=dtypes.int32)
e = fc.shared_embedding_columns(
[a3],
dimension=4,
combiner="mean",
shared_embedding_name="my_shared_embedding")
with variable_scope.variable_scope("run_2"):
d1 = feature_column_ops.input_from_feature_columns({
d[0]: input_tensor_c1
}, [d[0]])
e1 = feature_column_ops.input_from_feature_columns({
e[0]: input_tensor_c1
}, [e[0]])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
d1_value = d1.eval()
e1_value = e1.eval()
for i in range(len(d1_value)):
self.assertAllClose(d1_value[i], e1_value[i])
def testSharedEmbeddingColumnWithWeightedSparseColumn(self):
# Tests creation of shared embeddings containing weighted sparse columns.
sparse_col = fc.sparse_column_with_keys("a1", ["marlo", "omar", "stringer"])
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_sparse_col = fc.weighted_sparse_column(ids, "weights")
self.assertEqual(weighted_sparse_col.name, "ids_weighted_by_weights")
b = fc.shared_embedding_columns([sparse_col, weighted_sparse_col],
dimension=4, combiner="mean")
self.assertEqual(len(b), 2)
self.assertEqual(b[0].shared_embedding_name,
"a1_ids_weighted_by_weights_shared_embedding")
self.assertEqual(b[1].shared_embedding_name,
"a1_ids_weighted_by_weights_shared_embedding")
# Tries reversing order to check compatibility condition.
b = fc.shared_embedding_columns([weighted_sparse_col, sparse_col],
dimension=4, combiner="mean")
self.assertEqual(len(b), 2)
self.assertEqual(b[0].shared_embedding_name,
"a1_ids_weighted_by_weights_shared_embedding")
self.assertEqual(b[1].shared_embedding_name,
"a1_ids_weighted_by_weights_shared_embedding")
# Tries adding two weighted columns to check compatibility between them.
weighted_sparse_col_2 = fc.weighted_sparse_column(ids, "weights_2")
b = fc.shared_embedding_columns([weighted_sparse_col,
weighted_sparse_col_2],
dimension=4, combiner="mean")
self.assertEqual(len(b), 2)
self.assertEqual(
b[0].shared_embedding_name,
"ids_weighted_by_weights_ids_weighted_by_weights_2_shared_embedding"
)
self.assertEqual(
b[1].shared_embedding_name,
"ids_weighted_by_weights_ids_weighted_by_weights_2_shared_embedding"
)
def testSharedEmbeddingColumnDeterminism(self):
# Tests determinism in auto-generated shared_embedding_name.
sparse_id_columns = tuple([
fc.sparse_column_with_keys(k, ["foo", "bar"])
for k in ["07", "02", "00", "03", "05", "01", "09", "06", "04", "08"]
])
output = fc.shared_embedding_columns(
sparse_id_columns, dimension=2, combiner="mean")
self.assertEqual(len(output), 10)
for x in output:
self.assertEqual(x.shared_embedding_name,
"00_01_02_plus_7_others_shared_embedding")
def testSharedEmbeddingColumnErrors(self):
# Tries passing in a string.
with self.assertRaises(TypeError):
invalid_string = "Invalid string."
fc.shared_embedding_columns(invalid_string, dimension=2, combiner="mean")
# Tries passing in a set of sparse columns.
with self.assertRaises(TypeError):
invalid_set = set([
fc.sparse_column_with_keys("a", ["foo", "bar"]),
fc.sparse_column_with_keys("b", ["foo", "bar"]),
])
fc.shared_embedding_columns(invalid_set, dimension=2, combiner="mean")
def testSharedEmbeddingColumnDeepCopy(self):
a1 = fc.sparse_column_with_keys("a1", ["marlo", "omar", "stringer"])
a2 = fc.sparse_column_with_keys("a2", ["marlo", "omar", "stringer"])
columns = fc.shared_embedding_columns(
[a1, a2], dimension=4, combiner="mean")
columns_copy = copy.deepcopy(columns)
self.assertEqual(
columns_copy[0].shared_embedding_name, "a1_a2_shared_embedding")
self.assertEqual(
columns_copy[1].shared_embedding_name, "a1_a2_shared_embedding")
def testOneHotColumn(self):
a = fc.sparse_column_with_keys("a", ["a", "b", "c", "d"])
onehot_a = fc.one_hot_column(a)
self.assertEqual(onehot_a.sparse_id_column.name, "a")
self.assertEqual(onehot_a.length, 4)
b = fc.sparse_column_with_hash_bucket(
"b", hash_bucket_size=100, combiner="sum")
onehot_b = fc.one_hot_column(b)
self.assertEqual(onehot_b.sparse_id_column.name, "b")
self.assertEqual(onehot_b.length, 100)
def testOneHotReshaping(self):
"""Tests reshaping behavior of `OneHotColumn`."""
id_tensor_shape = [3, 2, 4, 5]
sparse_column = fc.sparse_column_with_keys(
"animals", ["squirrel", "moose", "dragon", "octopus"])
one_hot = fc.one_hot_column(sparse_column)
vocab_size = len(sparse_column.lookup_config.keys)
id_tensor = _sparse_id_tensor(id_tensor_shape, vocab_size)
for output_rank in range(1, len(id_tensor_shape) + 1):
with variable_scope.variable_scope("output_rank_{}".format(output_rank)):
one_hot_output = one_hot._to_dnn_input_layer(
id_tensor, output_rank=output_rank)
with self.test_session() as sess:
one_hot_value = sess.run(one_hot_output)
expected_shape = (id_tensor_shape[:output_rank - 1] + [vocab_size])
self.assertEquals(expected_shape, list(one_hot_value.shape))
def testOneHotColumnForWeightedSparseColumn(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
one_hot = fc.one_hot_column(weighted_ids)
self.assertEqual(one_hot.sparse_id_column.name, "ids_weighted_by_weights")
self.assertEqual(one_hot.length, 3)
def testMissingValueInOneHotColumnForWeightedSparseColumn(self):
# Github issue 12583
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
one_hot = fc.one_hot_column(weighted_ids)
features = {
'ids': constant_op.constant([['marlo', 'unknown', 'omar']]),
'weights': constant_op.constant([[2., 4., 6.]])
}
one_hot_tensor = feature_column_ops.input_from_feature_columns(
features, [one_hot])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
self.assertAllEqual([[2., 6., 0.]], one_hot_tensor.eval())
def testMissingValueInOneHotColumnForSparseColumnWithKeys(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
one_hot = fc.one_hot_column(ids)
features = {
'ids': constant_op.constant([['marlo', 'unknown', 'omar']])
}
one_hot_tensor = feature_column_ops.input_from_feature_columns(
features, [one_hot])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
self.assertAllEqual([[1., 1., 0.]], one_hot_tensor.eval())
def testOneHotColumnDeepCopy(self):
a = fc.sparse_column_with_keys("a", ["a", "b", "c", "d"])
column = fc.one_hot_column(a)
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.sparse_id_column.name, "a")
self.assertEqual(column.name, "a_one_hot")
self.assertEqual(column.length, 4)
def testRealValuedVarLenColumn(self):
c = fc._real_valued_var_len_column("ccc", is_sparse=True)
self.assertTrue(c.is_sparse)
self.assertTrue(c.default_value is None)
# default_value is an integer.
c5 = fc._real_valued_var_len_column("c5", default_value=2)
self.assertEqual(c5.default_value, 2)
# default_value is a float.
d4 = fc._real_valued_var_len_column("d4", is_sparse=True)
self.assertEqual(d4.default_value, None)
self.assertEqual(d4.is_sparse, True)
# Default value is a list but dimension is None.
with self.assertRaisesRegexp(ValueError,
"Only scalar default value.*"):
fc._real_valued_var_len_column("g5", default_value=[2., 3.])
def testRealValuedVarLenColumnDtypes(self):
rvc = fc._real_valued_var_len_column("rvc", is_sparse=True)
self.assertDictEqual(
{
"rvc": parsing_ops.VarLenFeature(dtype=dtypes.float32)
}, rvc.config)
rvc = fc._real_valued_var_len_column("rvc", default_value=0,
is_sparse=False)
self.assertDictEqual(
{
"rvc": parsing_ops.FixedLenSequenceFeature(shape=[],
dtype=dtypes.float32,
allow_missing=True,
default_value=0.0)
}, rvc.config)
rvc = fc._real_valued_var_len_column("rvc", dtype=dtypes.int32,
default_value=0, is_sparse=True)
self.assertDictEqual(
{
"rvc": parsing_ops.VarLenFeature(dtype=dtypes.int32)
}, rvc.config)
with self.assertRaisesRegexp(TypeError,
"dtype must be convertible to float"):
fc._real_valued_var_len_column("rvc", dtype=dtypes.string,
default_value="", is_sparse=True)
def testRealValuedColumn(self):
a = fc.real_valued_column("aaa")
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dimension, 1)
b = fc.real_valued_column("bbb", 10)
self.assertEqual(b.dimension, 10)
self.assertTrue(b.default_value is None)
with self.assertRaisesRegexp(TypeError, "dimension must be an integer"):
fc.real_valued_column("d3", dimension=1.0)
with self.assertRaisesRegexp(ValueError,
"dimension must be greater than 0"):
fc.real_valued_column("d3", dimension=0)
with self.assertRaisesRegexp(ValueError,
"dtype must be convertible to float"):
fc.real_valued_column("d3", dtype=dtypes.string)
# default_value is an integer.
c1 = fc.real_valued_column("c1", default_value=2)
self.assertListEqual(list(c1.default_value), [2.])
c2 = fc.real_valued_column("c2", default_value=2, dtype=dtypes.int32)
self.assertListEqual(list(c2.default_value), [2])
c3 = fc.real_valued_column("c3", dimension=4, default_value=2)
self.assertListEqual(list(c3.default_value), [2, 2, 2, 2])
c4 = fc.real_valued_column(
"c4", dimension=4, default_value=2, dtype=dtypes.int32)
self.assertListEqual(list(c4.default_value), [2, 2, 2, 2])
# default_value is a float.
d1 = fc.real_valued_column("d1", default_value=2.)
self.assertListEqual(list(d1.default_value), [2.])
d2 = fc.real_valued_column("d2", dimension=4, default_value=2.)
self.assertListEqual(list(d2.default_value), [2., 2., 2., 2.])
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("d3", default_value=2., dtype=dtypes.int32)
# default_value is neither integer nor float.
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("e1", default_value="string")
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("e1", dimension=3, default_value=[1, 3., "string"])
# default_value is a list of integers.
f1 = fc.real_valued_column("f1", default_value=[2])
self.assertListEqual(list(f1.default_value), [2])
f2 = fc.real_valued_column("f2", dimension=3, default_value=[2, 2, 2])
self.assertListEqual(list(f2.default_value), [2., 2., 2.])
f3 = fc.real_valued_column(
"f3", dimension=3, default_value=[2, 2, 2], dtype=dtypes.int32)
self.assertListEqual(list(f3.default_value), [2, 2, 2])
# default_value is a list of floats.
g1 = fc.real_valued_column("g1", default_value=[2.])
self.assertListEqual(list(g1.default_value), [2.])
g2 = fc.real_valued_column("g2", dimension=3, default_value=[2., 2, 2])
self.assertListEqual(list(g2.default_value), [2., 2., 2.])
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("g3", default_value=[2.], dtype=dtypes.int32)
with self.assertRaisesRegexp(
ValueError, "The length of default_value must be equal to dimension"):
fc.real_valued_column("g4", dimension=3, default_value=[2.])
# Test that the normalizer_fn gets stored for a real_valued_column
normalizer = lambda x: x - 1
h1 = fc.real_valued_column("h1", normalizer=normalizer)
self.assertEqual(normalizer(10), h1.normalizer_fn(10))
# Test that normalizer is not stored within key
self.assertFalse("normalizer" in g1.key)
self.assertFalse("normalizer" in g2.key)
self.assertFalse("normalizer" in h1.key)
def testRealValuedColumnReshaping(self):
"""Tests reshaping behavior of `RealValuedColumn`."""
batch_size = 4
sequence_length = 8
dimensions = [3, 4, 5]
np.random.seed(2222)
input_shape = [batch_size, sequence_length] + dimensions
real_valued_input = np.random.rand(*input_shape)
real_valued_column = fc.real_valued_column("values")
for output_rank in range(1, 3 + len(dimensions)):
with variable_scope.variable_scope("output_rank_{}".format(output_rank)):
real_valued_output = real_valued_column._to_dnn_input_layer(
constant_op.constant(
real_valued_input, dtype=dtypes.float32),
output_rank=output_rank)
with self.test_session() as sess:
real_valued_eval = sess.run(real_valued_output)
expected_shape = (input_shape[:output_rank - 1] +
[np.prod(input_shape[output_rank - 1:])])
self.assertEquals(expected_shape, list(real_valued_eval.shape))
def testRealValuedColumnDensification(self):
"""Tests densification behavior of `RealValuedColumn`."""
# No default value, dimension 1 float.
real_valued_column = fc._real_valued_var_len_column(
"sparse_real_valued1", is_sparse=True)
sparse_tensor = sparse_tensor_lib.SparseTensor(
values=[2.0, 5.0], indices=[[0, 0], [2, 0]], dense_shape=[3, 1])
with self.assertRaisesRegexp(
ValueError, "Set is_sparse to False"):
real_valued_column._to_dnn_input_layer(sparse_tensor)
def testRealValuedColumnDeepCopy(self):
column = fc.real_valued_column(
"aaa", dimension=3, default_value=[1, 2, 3], dtype=dtypes.int32)
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.name, "aaa")
self.assertEqual(column_copy.dimension, 3)
self.assertEqual(column_copy.default_value, (1, 2, 3))
def testBucketizedColumnNameEndsWithUnderscoreBucketized(self):
a = fc.bucketized_column(fc.real_valued_column("aaa"), [0, 4])
self.assertEqual(a.name, "aaa_bucketized")
def testBucketizedColumnRequiresRealValuedColumn(self):
with self.assertRaisesRegexp(
TypeError, "source_column must be an instance of _RealValuedColumn"):
fc.bucketized_column("bbb", [0])
with self.assertRaisesRegexp(
TypeError, "source_column must be an instance of _RealValuedColumn"):
fc.bucketized_column(
fc.sparse_column_with_integerized_feature(
column_name="bbb", bucket_size=10), [0])
def testBucketizedColumnRequiresRealValuedColumnDimension(self):
with self.assertRaisesRegexp(
TypeError, "source_column must be an instance of _RealValuedColumn.*"):
fc.bucketized_column(fc._real_valued_var_len_column("bbb",
is_sparse=True),
[0])
def testBucketizedColumnRequiresSortedBuckets(self):
with self.assertRaisesRegexp(ValueError,
"boundaries must be a sorted list"):
fc.bucketized_column(fc.real_valued_column("ccc"), [5, 0, 4])
def testBucketizedColumnWithSameBucketBoundaries(self):
a_bucketized = fc.bucketized_column(
fc.real_valued_column("a"), [1., 2., 2., 3., 3.])
self.assertEqual(a_bucketized.name, "a_bucketized")
self.assertTupleEqual(a_bucketized.boundaries, (1., 2., 3.))
def testBucketizedColumnDeepCopy(self):
"""Tests that we can do a deepcopy of a bucketized column.
This test requires that the bucketized column also accept boundaries
as tuples.
"""
bucketized = fc.bucketized_column(
fc.real_valued_column("a"), [1., 2., 2., 3., 3.])
self.assertEqual(bucketized.name, "a_bucketized")
self.assertTupleEqual(bucketized.boundaries, (1., 2., 3.))
bucketized_copy = copy.deepcopy(bucketized)
self.assertEqual(bucketized_copy.name, "a_bucketized")
self.assertTupleEqual(bucketized_copy.boundaries, (1., 2., 3.))
def testCrossedColumnNameCreatesSortedNames(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
b = fc.sparse_column_with_hash_bucket("bbb", hash_bucket_size=100)
bucket = fc.bucketized_column(fc.real_valued_column("cost"), [0, 4])
crossed = fc.crossed_column(set([b, bucket, a]), hash_bucket_size=10000)
self.assertEqual("aaa_X_bbb_X_cost_bucketized", crossed.name,
"name should be generated by sorted column names")
self.assertEqual("aaa", crossed.columns[0].name)
self.assertEqual("bbb", crossed.columns[1].name)
self.assertEqual("cost_bucketized", crossed.columns[2].name)
def testCrossedColumnNotSupportRealValuedColumn(self):
b = fc.sparse_column_with_hash_bucket("bbb", hash_bucket_size=100)
with self.assertRaisesRegexp(
TypeError, "columns must be a set of _SparseColumn, _CrossedColumn, "
"or _BucketizedColumn instances"):
fc.crossed_column(
set([b, fc.real_valued_column("real")]), hash_bucket_size=10000)
def testCrossedColumnDeepCopy(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
b = fc.sparse_column_with_hash_bucket("bbb", hash_bucket_size=100)
bucket = fc.bucketized_column(fc.real_valued_column("cost"), [0, 4])
crossed = fc.crossed_column(set([b, bucket, a]), hash_bucket_size=10000)
crossed_copy = copy.deepcopy(crossed)
self.assertEqual("aaa_X_bbb_X_cost_bucketized", crossed_copy.name,
"name should be generated by sorted column names")
self.assertEqual("aaa", crossed_copy.columns[0].name)
self.assertEqual("bbb", crossed_copy.columns[1].name)
self.assertEqual("cost_bucketized", crossed_copy.columns[2].name)
def testFloat32WeightedSparseInt32ColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", [42, 1, -1000], dtype=dtypes.int32)
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.int32),
"weights": parsing_ops.VarLenFeature(dtypes.float32)
}, weighted_ids.config)
def testFloat32WeightedSparseStringColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.string),
"weights": parsing_ops.VarLenFeature(dtypes.float32)
}, weighted_ids.config)
def testInt32WeightedSparseStringColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights", dtype=dtypes.int32)
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.string),
"weights": parsing_ops.VarLenFeature(dtypes.int32)
}, weighted_ids.config)
with self.assertRaisesRegexp(ValueError,
"dtype is not convertible to float"):
weighted_ids = fc.weighted_sparse_column(
ids, "weights", dtype=dtypes.string)
def testInt32WeightedSparseInt64ColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", [42, 1, -1000], dtype=dtypes.int64)
weighted_ids = fc.weighted_sparse_column(ids, "weights", dtype=dtypes.int32)
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.int64),
"weights": parsing_ops.VarLenFeature(dtypes.int32)
}, weighted_ids.config)
with self.assertRaisesRegexp(ValueError,
"dtype is not convertible to float"):
weighted_ids = fc.weighted_sparse_column(
ids, "weights", dtype=dtypes.string)
def testRealValuedColumnDtypes(self):
rvc = fc.real_valued_column("rvc")
self.assertDictEqual(
{
"rvc": parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32)
},
rvc.config)
rvc = fc.real_valued_column("rvc", dtype=dtypes.int32)
self.assertDictEqual(
{
"rvc": parsing_ops.FixedLenFeature(
[1], dtype=dtypes.int32)
},
rvc.config)
with self.assertRaisesRegexp(ValueError,
"dtype must be convertible to float"):
fc.real_valued_column("rvc", dtype=dtypes.string)
def testSparseColumnDtypes(self):
sc = fc.sparse_column_with_integerized_feature("sc", 10)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int64)
}, sc.config)
sc = fc.sparse_column_with_integerized_feature("sc", 10, dtype=dtypes.int32)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int32)
}, sc.config)
with self.assertRaisesRegexp(ValueError, "dtype must be an integer"):
fc.sparse_column_with_integerized_feature("sc", 10, dtype=dtypes.float32)
def testSparseColumnSingleBucket(self):
sc = fc.sparse_column_with_integerized_feature("sc", 1)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int64)
}, sc.config)
self.assertEqual(1, sc._wide_embedding_lookup_arguments(None).vocab_size)
def testSparseColumnAcceptsDenseScalar(self):
"""Tests that `SparseColumn`s accept dense scalar inputs."""
batch_size = 4
dense_scalar_input = [1, 2, 3, 4]
sparse_column = fc.sparse_column_with_integerized_feature("values", 10)
features = {"values":
constant_op.constant(dense_scalar_input, dtype=dtypes.int64)}
sparse_column.insert_transformed_feature(features)
sparse_output = features[sparse_column]
expected_shape = [batch_size, 1]
with self.test_session() as sess:
sparse_result = sess.run(sparse_output)
self.assertEquals(expected_shape, list(sparse_result.dense_shape))
def testSparseColumnIntegerizedDeepCopy(self):
"""Tests deepcopy of sparse_column_with_integerized_feature."""
column = fc.sparse_column_with_integerized_feature("a", 10)
self.assertEqual("a", column.name)
column_copy = copy.deepcopy(column)
self.assertEqual("a", column_copy.name)
self.assertEqual(10, column_copy.bucket_size)
self.assertTrue(column_copy.is_integerized)
def testSparseColumnHashBucketDeepCopy(self):
"""Tests deepcopy of sparse_column_with_hash_bucket."""
column = fc.sparse_column_with_hash_bucket("a", 10)
self.assertEqual("a", column.name)
column_copy = copy.deepcopy(column)
self.assertEqual("a", column_copy.name)
self.assertEqual(10, column_copy.bucket_size)
self.assertFalse(column_copy.is_integerized)
def testSparseColumnKeysDeepCopy(self):
"""Tests deepcopy of sparse_column_with_keys."""
column = fc.sparse_column_with_keys(
"a", keys=["key0", "key1", "key2"])
self.assertEqual("a", column.name)
column_copy = copy.deepcopy(column)
self.assertEqual("a", column_copy.name)
self.assertEqual(
fc._SparseIdLookupConfig( # pylint: disable=protected-access
keys=("key0", "key1", "key2"),
vocab_size=3,
default_value=-1),
column_copy.lookup_config)
self.assertFalse(column_copy.is_integerized)
def testSparseColumnVocabularyDeepCopy(self):
"""Tests deepcopy of sparse_column_with_vocabulary_file."""
column = fc.sparse_column_with_vocabulary_file(
"a", vocabulary_file="path_to_file", vocab_size=3)
self.assertEqual("a", column.name)
column_copy = copy.deepcopy(column)
self.assertEqual("a", column_copy.name)
self.assertEqual(
fc._SparseIdLookupConfig( # pylint: disable=protected-access
vocabulary_file="path_to_file",
num_oov_buckets=0,
vocab_size=3,
default_value=-1),
column_copy.lookup_config)
self.assertFalse(column_copy.is_integerized)
def testCreateFeatureSpec(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
str_sparse_id_col = fc.sparse_column_with_keys(
"str_id_column", ["marlo", "omar", "stringer"])
int32_sparse_id_col = fc.sparse_column_with_keys(
"int32_id_column", [42, 1, -1000], dtype=dtypes.int32)
int64_sparse_id_col = fc.sparse_column_with_keys(
"int64_id_column", [42, 1, -1000], dtype=dtypes.int64)
weighted_id_col = fc.weighted_sparse_column(str_sparse_id_col,
"str_id_weights_column")
real_valued_col1 = fc.real_valued_column("real_valued_column1")
real_valued_col2 = fc.real_valued_column("real_valued_column2", 5)
bucketized_col1 = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization1"), [0, 4])
bucketized_col2 = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization2", 4),
[0, 4])
a = fc.sparse_column_with_hash_bucket("cross_aaa", hash_bucket_size=100)
b = fc.sparse_column_with_hash_bucket("cross_bbb", hash_bucket_size=100)
cross_col = fc.crossed_column(set([a, b]), hash_bucket_size=10000)
one_hot_col = fc.one_hot_column(fc.sparse_column_with_hash_bucket(
"sparse_column_for_one_hot", hash_bucket_size=100))
scattered_embedding_col = fc.scattered_embedding_column(
"scattered_embedding_column", size=100, dimension=10, hash_key=1)
feature_columns = set([
sparse_col, embedding_col, weighted_id_col, int32_sparse_id_col,
int64_sparse_id_col, real_valued_col1, real_valued_col2,
bucketized_col1, bucketized_col2, cross_col, one_hot_col,
scattered_embedding_col
])
expected_config = {
"sparse_column":
parsing_ops.VarLenFeature(dtypes.string),
"sparse_column_for_embedding":
parsing_ops.VarLenFeature(dtypes.string),
"str_id_column":
parsing_ops.VarLenFeature(dtypes.string),
"int32_id_column":
parsing_ops.VarLenFeature(dtypes.int32),
"int64_id_column":
parsing_ops.VarLenFeature(dtypes.int64),
"str_id_weights_column":
parsing_ops.VarLenFeature(dtypes.float32),
"real_valued_column1":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32),
"real_valued_column2":
parsing_ops.FixedLenFeature(
[5], dtype=dtypes.float32),
"real_valued_column_for_bucketization1":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32),
"real_valued_column_for_bucketization2":
parsing_ops.FixedLenFeature(
[4], dtype=dtypes.float32),
"cross_aaa":
parsing_ops.VarLenFeature(dtypes.string),
"cross_bbb":
parsing_ops.VarLenFeature(dtypes.string),
"sparse_column_for_one_hot":
parsing_ops.VarLenFeature(dtypes.string),
"scattered_embedding_column":
parsing_ops.VarLenFeature(dtypes.string),
}
config = fc.create_feature_spec_for_parsing(feature_columns)
self.assertDictEqual(expected_config, config)
# Tests that contrib feature columns work with core library:
config_core = fc_core.make_parse_example_spec(feature_columns)
self.assertDictEqual(expected_config, config_core)
# Test that the same config is parsed out if we pass a dictionary.
feature_columns_dict = {
str(i): val
for i, val in enumerate(feature_columns)
}
config = fc.create_feature_spec_for_parsing(feature_columns_dict)
self.assertDictEqual(expected_config, config)
def testCreateFeatureSpec_ExperimentalColumns(self):
real_valued_col0 = fc._real_valued_var_len_column(
"real_valued_column0", is_sparse=True)
real_valued_col1 = fc._real_valued_var_len_column(
"real_valued_column1", dtype=dtypes.int64, default_value=0,
is_sparse=False)
feature_columns = set([real_valued_col0, real_valued_col1])
expected_config = {
"real_valued_column0": parsing_ops.VarLenFeature(dtype=dtypes.float32),
"real_valued_column1":
parsing_ops.FixedLenSequenceFeature(
[], dtype=dtypes.int64, allow_missing=True, default_value=0),
}
config = fc.create_feature_spec_for_parsing(feature_columns)
self.assertDictEqual(expected_config, config)
def testCreateFeatureSpec_RealValuedColumnWithDefaultValue(self):
real_valued_col1 = fc.real_valued_column(
"real_valued_column1", default_value=2)
real_valued_col2 = fc.real_valued_column(
"real_valued_column2", 5, default_value=4)
real_valued_col3 = fc.real_valued_column(
"real_valued_column3", default_value=[8])
real_valued_col4 = fc.real_valued_column(
"real_valued_column4", 3, default_value=[1, 0, 6])
real_valued_col5 = fc._real_valued_var_len_column(
"real_valued_column5", default_value=2, is_sparse=True)
real_valued_col6 = fc._real_valued_var_len_column(
"real_valued_column6", dtype=dtypes.int64, default_value=1,
is_sparse=False)
feature_columns = [
real_valued_col1, real_valued_col2, real_valued_col3, real_valued_col4,
real_valued_col5, real_valued_col6
]
config = fc.create_feature_spec_for_parsing(feature_columns)
self.assertEqual(6, len(config))
self.assertDictEqual(
{
"real_valued_column1":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32, default_value=[2.]),
"real_valued_column2":
parsing_ops.FixedLenFeature(
[5],
dtype=dtypes.float32,
default_value=[4., 4., 4., 4., 4.]),
"real_valued_column3":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32, default_value=[8.]),
"real_valued_column4":
parsing_ops.FixedLenFeature(
[3], dtype=dtypes.float32, default_value=[1., 0., 6.]),
"real_valued_column5":
parsing_ops.VarLenFeature(dtype=dtypes.float32),
"real_valued_column6":
parsing_ops.FixedLenSequenceFeature(
[], dtype=dtypes.int64, allow_missing=True,
default_value=1)
},
config)
def testCreateSequenceFeatureSpec(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
sparse_id_col = fc.sparse_column_with_keys("id_column",
["marlo", "omar", "stringer"])
weighted_id_col = fc.weighted_sparse_column(sparse_id_col,
"id_weights_column")
real_valued_col1 = fc.real_valued_column("real_valued_column", dimension=2)
real_valued_col2 = fc.real_valued_column(
"real_valued_default_column", dimension=5, default_value=3.0)
real_valued_col3 = fc._real_valued_var_len_column(
"real_valued_var_len_column", default_value=3.0, is_sparse=True)
real_valued_col4 = fc._real_valued_var_len_column(
"real_valued_var_len_dense_column", default_value=4.0, is_sparse=False)
feature_columns = set([
sparse_col, embedding_col, weighted_id_col, real_valued_col1,
real_valued_col2, real_valued_col3, real_valued_col4
])
feature_spec = fc._create_sequence_feature_spec_for_parsing(feature_columns)
expected_feature_spec = {
"sparse_column":
parsing_ops.VarLenFeature(dtypes.string),
"sparse_column_for_embedding":
parsing_ops.VarLenFeature(dtypes.string),
"id_column":
parsing_ops.VarLenFeature(dtypes.string),
"id_weights_column":
parsing_ops.VarLenFeature(dtypes.float32),
"real_valued_column":
parsing_ops.FixedLenSequenceFeature(
shape=[2], dtype=dtypes.float32, allow_missing=False),
"real_valued_default_column":
parsing_ops.FixedLenSequenceFeature(
shape=[5], dtype=dtypes.float32, allow_missing=True),
"real_valued_var_len_column":
parsing_ops.VarLenFeature(dtype=dtypes.float32),
"real_valued_var_len_dense_column":
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.float32, allow_missing=True),
}
self.assertDictEqual(expected_feature_spec, feature_spec)
def testMakePlaceHolderTensorsForBaseFeatures(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
real_valued_col = fc.real_valued_column("real_valued_column", 5)
vlen_real_valued_col = fc._real_valued_var_len_column(
"vlen_real_valued_column", is_sparse=True)
bucketized_col = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization"), [0, 4])
feature_columns = set(
[sparse_col, real_valued_col, vlen_real_valued_col, bucketized_col])
placeholders = (
fc.make_place_holder_tensors_for_base_features(feature_columns))
self.assertEqual(4, len(placeholders))
self.assertTrue(
isinstance(placeholders["sparse_column"],
sparse_tensor_lib.SparseTensor))
self.assertTrue(
isinstance(placeholders["vlen_real_valued_column"],
sparse_tensor_lib.SparseTensor))
placeholder = placeholders["real_valued_column"]
self.assertGreaterEqual(
placeholder.name.find(u"Placeholder_real_valued_column"), 0)
self.assertEqual(dtypes.float32, placeholder.dtype)
self.assertEqual([None, 5], placeholder.get_shape().as_list())
placeholder = placeholders["real_valued_column_for_bucketization"]
self.assertGreaterEqual(
placeholder.name.find(
u"Placeholder_real_valued_column_for_bucketization"), 0)
self.assertEqual(dtypes.float32, placeholder.dtype)
self.assertEqual([None, 1], placeholder.get_shape().as_list())
def testInitEmbeddingColumnWeightsFromCkpt(self):
sparse_col = fc.sparse_column_with_hash_bucket(
column_name="object_in_image", hash_bucket_size=4)
# Create _EmbeddingColumn which randomly initializes embedding of size
# [4, 16].
embedding_col = fc.embedding_column(sparse_col, dimension=16)
# Creating a SparseTensor which has all the ids possible for the given
# vocab.
input_tensor = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2], [3, 3]],
values=[0, 1, 2, 3],
dense_shape=[4, 4])
# Invoking 'layers.input_from_feature_columns' will create the embedding
# variable. Creating under scope 'run_1' so as to prevent name conflicts
# when creating embedding variable for 'embedding_column_pretrained'.
with variable_scope.variable_scope("run_1"):
with variable_scope.variable_scope(embedding_col.name):
# This will return a [4, 16] tensor which is same as embedding variable.
embeddings = feature_column_ops.input_from_feature_columns({
embedding_col: input_tensor
}, [embedding_col])
save = saver.Saver()
ckpt_dir_prefix = os.path.join(self.get_temp_dir(),
"init_embedding_col_w_from_ckpt")
ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix)
checkpoint_path = os.path.join(ckpt_dir, "model.ckpt")
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
saved_embedding = embeddings.eval()
save.save(sess, checkpoint_path)
embedding_col_initialized = fc.embedding_column(
sparse_id_column=sparse_col,
dimension=16,
ckpt_to_load_from=checkpoint_path,
tensor_name_in_ckpt=("run_1/object_in_image_embedding/"
"input_from_feature_columns/object"
"_in_image_embedding/weights"))
with variable_scope.variable_scope("run_2"):
# This will initialize the embedding from provided checkpoint and return a
# [4, 16] tensor which is same as embedding variable. Since we didn't
# modify embeddings, this should be same as 'saved_embedding'.
pretrained_embeddings = feature_column_ops.input_from_feature_columns({
embedding_col_initialized: input_tensor
}, [embedding_col_initialized])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
loaded_embedding = pretrained_embeddings.eval()
self.assertAllClose(saved_embedding, loaded_embedding)
def testInitCrossedColumnWeightsFromCkpt(self):
sparse_col_1 = fc.sparse_column_with_hash_bucket(
column_name="col_1", hash_bucket_size=4)
sparse_col_2 = fc.sparse_column_with_keys(
column_name="col_2", keys=("foo", "bar", "baz"))
sparse_col_3 = fc.sparse_column_with_keys(
column_name="col_3", keys=(42, 1, -1000), dtype=dtypes.int64)
crossed_col = fc.crossed_column(
columns=[sparse_col_1, sparse_col_2, sparse_col_3], hash_bucket_size=4)
input_tensor = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2], [3, 3]],
values=[0, 1, 2, 3],
dense_shape=[4, 4])
# Invoking 'weighted_sum_from_feature_columns' will create the crossed
# column weights variable.
with variable_scope.variable_scope("run_1"):
with variable_scope.variable_scope(crossed_col.name):
# Returns looked up column weights which is same as crossed column
# weights as well as actual references to weights variables.
_, col_weights, _ = (
feature_column_ops.weighted_sum_from_feature_columns({
sparse_col_1.name: input_tensor,
sparse_col_2.name: input_tensor,
sparse_col_3.name: input_tensor
}, [crossed_col], 1))
# Update the weights since default initializer initializes all weights
# to 0.0.
for weight in col_weights.values():
assign_op = state_ops.assign(weight[0], weight[0] + 0.5)
save = saver.Saver()
ckpt_dir_prefix = os.path.join(self.get_temp_dir(),
"init_crossed_col_w_from_ckpt")
ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix)
checkpoint_path = os.path.join(ckpt_dir, "model.ckpt")
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(assign_op)
saved_col_weights = col_weights[crossed_col][0].eval()
save.save(sess, checkpoint_path)
crossed_col_initialized = fc.crossed_column(
columns=[sparse_col_1, sparse_col_2],
hash_bucket_size=4,
ckpt_to_load_from=checkpoint_path,
tensor_name_in_ckpt=("run_1/col_1_X_col_2_X_col_3/"
"weighted_sum_from_feature_columns/"
"col_1_X_col_2_X_col_3/weights"))
with variable_scope.variable_scope("run_2"):
# This will initialize the crossed column weights from provided checkpoint
# and return a [4, 1] tensor which is same as weights variable. Since we
# won't modify weights, this should be same as 'saved_col_weights'.
_, col_weights, _ = (feature_column_ops.weighted_sum_from_feature_columns(
{
sparse_col_1.name: input_tensor,
sparse_col_2.name: input_tensor
}, [crossed_col_initialized], 1))
col_weights_from_ckpt = col_weights[crossed_col_initialized][0]
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
loaded_col_weights = col_weights_from_ckpt.eval()
self.assertAllClose(saved_col_weights, loaded_col_weights)
if __name__ == "__main__":
test.main()
| eadgarchen/tensorflow | tensorflow/contrib/layers/python/layers/feature_column_test.py | Python | apache-2.0 | 49,241 | [
"MOOSE",
"Octopus"
] | 863b1bb40ccb1c0f9b1694b210c38150a27dc6d659f1651a7075db42c6630497 |
#!/usr/bin/env python
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#Process the database for essential pathways
#Essential Pathways v1.0.2
#Author:realasking
#Email:realasking@gmail.com,tomwangsim@163.com
#Aug-23,2017,in USTB
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
import sys
import os
import re
import sqlite3
import shutil
from epw import displayops as dpops
from epw import cmds
from epw import einit as ei
from prettytable import PrettyTable
__version__ = '1.0.2'
__author__ = 'realasking <realasking@gmail.com,tomwangsim@163.com>'
__license__ = 'LGPLv3'
class pathwayops(ei.einit):
#def __init__(self,df,mn,mf):
def __init__(self,conf_folder,dbfile,module_name):
ei.einit.__init__(self,conf_folder,dbfile,module_name)
if not os.path.isfile(self.df):
self.epwdb=sqlite3.connect(self.df)
self.db=self.epwdb.cursor()
self.db.execute('''CREATE TABLE ep(pathway_name text,folder_path text,comment text)''')
self.epwdb.commit()
else:
self.epwdb=sqlite3.connect(self.df)
self.db=self.epwdb.cursor()
#self.module_name=mn
#self.module_file=mf
self.info=cmds.warnings()
self.cmds=cmds.ops()
def close(self):
self.epwdb.close()
def dbdelete(self,pn):
self.db.execute('DELETE FROM ep WHERE pathway_name=?',(pn,))
self.epwdb.commit()
def dbcheck(self):
#Please unload module epath in system before use this function
self.db.execute('DELETE FROM ep WHERE rowid NOT IN (SELECT min(rowid) FROM ep GROUP BY folder_path)')
self.epwdb.commit()
self.db.execute('DELETE FROM ep WHERE rowid NOT IN (SELECT min(rowid) FROM ep GROUP BY pathway_name)')
self.epwdb.commit()
#Delete the pathways which conflict with other environment variables of the system
for i in os.popen('env|cut -d\"=\" -f1').read().splitlines():
self.dbdelete(i)
def module_create(self):
fp=open(self.module_file,'w')
fp.write("#%Module1.0"+'\n')
fp.write(" ## Module settings for essential pathways \n")
fp.write(" ## This file is automatically generated by ep, do not modify it manually\n")
fp.write(" proc ModulesHelp { } {\n")
fp.write(" puts stderr \"Essential Pathways are defined in this file,do not modify it manually\"\n")
fp.write(" }\n")
fp.write("\n")
fp.write(" module-whatis \"Essential Pathways\"\n")
self.dbcheck()
for row in self.db.execute('SELECT * FROM ep ORDER BY pathway_name'):
fp.write(" setenv "+row[0]+" \""+row[1]+"\"\n")
fp.close()
#In consideration
def path_mod(self):
#remove duplicates in db
self.dbcheck()
#Check if module file exists
if os.path.isfile(self.module_file):
os.remove(self.module_file)
def path_refresh(self):
#self.dbcheck()
#Check if module file exists
if os.path.isfile(self.module_file):
#Check if module file has pathways undfined in db
for i in os.popen('module show '+self.module_name+' 2>&1 |grep setenv|awk \'{for(i=2;i<=NF;i++) printf $i" ";print""}\'').read().splitlines():
#split a string at the first blank
sbuff=i.strip().split(' ',1)
j=self.db.execute('SELECT * FROM ep WHERE pathway_name=?',(sbuff[0],))
if len(j.fetchall())==0:
self.db.execute('''INSERT INTO ep VALUES(?,?,?)''',(sbuff[0],sbuff[1],''))
self.epwdb.commit()
else:
for k in j.fetchall():
if sbuff[1]!=k[1]:
self.db.execute('UPDATE ep SET folder_path=? WHERE pathway_name=?',(sbuff[1],sbuff[0]))
self.epwdb.commit()
os.remove(self.module_file)
self.module_create()
self.close()
def path_list(self):
print('Defined PATHWAYS:')
screeninfo=dpops.screenformat()
screeninfo.size_calc(3)
#fplen,namelen,waylen=screeninfo.size_calc(3)
t1 = PrettyTable(['PATHWAY NAME', 'FOLDER PATH', 'COMMENT'])
t1.align['PATHWAY NAME']='c'
t1.padding_width = 1
for row in self.db.execute('SELECT * FROM ep ORDER BY pathway_name'):
row_1=screeninfo.string_formatting(row[0],screeninfo.namelengthmax)
row_2=screeninfo.string_formatting(row[1],screeninfo.waylengthmax)
row_3=screeninfo.string_formatting(row[2],screeninfo.waylengthmax)
t1.add_row([row_1,row_2,row_3])
print(t1)
screeninfo2=dpops.screenformat()
screeninfo2.size_calc(2)
#fplen,namelen,waylen=screeninfo2.size_calc(2)
print('Set PATHWAYS:')
t2 = PrettyTable(['PATHWAY NAME', 'FOLDER PATH'])
t2.align['PATHWAY NAME']='c'
t2.padding_width = 1
for row1 in os.popen('module show '+self.module_name+' 2>&1 |grep setenv|awk \'{for(i=2;i<=NF;i++) printf $i" ";print""}\'').read().splitlines():
tbuff=row1.strip().split(' ',1)
row1_1=screeninfo2.string_formatting(tbuff[0],screeninfo2.namelengthmax)
row1_2=screeninfo2.string_formatting(tbuff[1],screeninfo2.waylengthmax)
t2.add_row([row1_1,row1_2])
print(t2)
self.close()
def path_create(self,pn,pw='',pc=''):
self.path_mod()
#print(pn)
if not os.path.exists(pw):
self.info.Perror()
exit()
j=self.db.execute('SELECT * FROM ep WHERE pathway_name=?',(pn,))
if len(j.fetchall())==0:
self.db.execute('INSERT INTO ep VALUES(?,?,?)',(pn,pw,pc))
self.epwdb.commit()
self.module_create()
self.close()
def path_comment(self,pn):
j=self.db.execute('SELECT * FROM ep WHERE pathway_name=?',(pn,))
if len(j.fetchall())!=0:
print("Pathway name: "+pn+"\n")
print("Exsits comment:\n")
for row in self.db.execute('SELECT * FROM ep WHERE pathway_name=?',(pn,)):
print(row[2])
pc=input("New comment (Ended by Enter):\n")
self.db.execute('UPDATE ep SET comment=? WHERE pathway_name=?',(pc,pn))
self.epwdb.commit()
else:
self.info.Pderror()
self.close()
def path_modify(self,pn,pw):
if not os.path.exists(pw):
self.info.Perror()
self.epwdb.close()
else:
j=self.db.execute('SELECT * FROM ep WHERE pathway_name=?',(pn,))
if len(j.fetchall())!=0:
self.db.execute('UPDATE ep SET folder_path=? WHERE pathway_name=?',(pw,pn))
self.epwdb.commit()
else:
self.info.Pderror()
self.module_create()
self.close()
def path_delete(self,pn):
self.dbdelete(pn)
self.module_create()
self.close()
def cmodfile(self):
if not os.path.isfile(self.module_file):
self.module_create()
def path_backup(self):
self.cmds.module_backup(self.df,self.bf)
def path_restore(self):
self.cmds.module_restore(self.bf+'/'+self.dbfile,self.df)
self.module_create()
self.close()
def path_uninstall(self):
self.close()
self.cmds.uninstall(self.module_name,self.module_file)
| realasking/essential-pathway | epw/epprocess.py | Python | lgpl-3.0 | 7,919 | [
"EPW"
] | 8b67bfb3867bde0463ed13da0aa6be4427b8af4aa4d2e1d637528f056d387265 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Test single point logfiles in cclib."""
import os
import unittest
import numpy
from common import get_minimum_carbon_separation
from skip import skipForParser
from skip import skipForLogfile
__filedir__ = os.path.realpath(os.path.dirname(__file__))
class GenericSPTest(unittest.TestCase):
"""Generic restricted single point unittest"""
# Molecular mass of DVB in mD, and expected precision.
molecularmass = 130078.25
mass_precision = 0.10
# In STO-3G, H has 1, C has 5 (1 S and 4 SP).
nbasisdict = {1:1, 6:5}
# Approximate B3LYP energy of dvb after SCF in STO-3G.
b3lyp_energy = -10365
# Overlap first two atomic orbitals.
overlap01 = 0.24
def testnatom(self):
"""Is the number of atoms equal to 20?"""
self.assertEquals(self.data.natom, 20)
def testatomnos(self):
"""Are the atomnos correct?"""
# The nuclear charges should be integer values in a NumPy array.
self.failUnless(numpy.alltrue([numpy.issubdtype(atomno, int) for atomno in self.data.atomnos]))
self.assertEquals(self.data.atomnos.dtype.char, 'i')
self.assertEquals(self.data.atomnos.shape, (20,) )
self.assertEquals(sum(self.data.atomnos == 6) + sum(self.data.atomnos == 1), 20)
@skipForParser('DALTON', 'DALTON has a very low accuracy for the printed values of all populations (2 decimals rounded in a weird way), so let it slide for now')
@skipForLogfile('Jaguar/basicJaguar7', 'We did not print the atomic partial charges in the unit tests for this version')
@skipForLogfile('Molpro/basicMolpro2006', "These tests were run a long time ago and since we don't have access to Molpro 2006 anymore, we can skip this test (it is tested in 2012)")
@skipForLogfile('Psi/basicPsi3', 'Psi3 did not print partial atomic charges')
def testatomcharges(self):
"""Are atomcharges (at least Mulliken) consistent with natom and sum to zero?"""
for type in set(['mulliken'] + list(self.data.atomcharges.keys())):
charges = self.data.atomcharges[type]
self.assertEquals(len(charges), self.data.natom)
self.assertAlmostEquals(sum(charges), 0.0, delta=0.001)
def testatomcoords(self):
"""Are the dimensions of atomcoords 1 x natom x 3?"""
expected_shape = (1, self.data.natom, 3)
self.assertEquals(self.data.atomcoords.shape, expected_shape)
def testatomcoords_units(self):
"""Are atomcoords consistent with Angstroms?"""
min_carbon_dist = get_minimum_carbon_separation(self.data)
dev = abs(min_carbon_dist - 1.34)
self.assertTrue(dev < 0.03, "Minimum carbon dist is %.2f (not 1.34)" % min_carbon_dist)
def testcharge_and_mult(self):
"""Are the charge and multiplicity correct?"""
self.assertEquals(self.data.charge, 0)
self.assertEquals(self.data.mult, 1)
def testnbasis(self):
"""Is the number of basis set functions correct?"""
count = sum([self.nbasisdict[n] for n in self.data.atomnos])
self.assertEquals(self.data.nbasis, count)
@skipForParser('ADF', 'ADF parser does not extract atombasis')
@skipForLogfile('Jaguar/basicJaguar7', 'Data file does not contain enough information. Can we make a new one?')
def testatombasis(self):
"""Are the indices in atombasis the right amount and unique?"""
all = []
for i, atom in enumerate(self.data.atombasis):
self.assertEquals(len(atom), self.nbasisdict[self.data.atomnos[i]])
all += atom
# Test if there are as many indices as atomic orbitals.
self.assertEquals(len(all), self.data.nbasis)
# Check if all are different (every orbital indexed once).
self.assertEquals(len(set(all)), len(all))
@skipForParser('GAMESS', 'atommasses not implemented yet')
@skipForParser('GAMESSUK', 'atommasses not implemented yet')
@skipForParser('Jaguar', 'atommasses not implemented yet')
@skipForParser('Molpro', 'atommasses not implemented yet')
@skipForParser('NWChem', 'atommasses not implemented yet')
@skipForParser('ORCA', 'atommasses not implemented yet')
@skipForLogfile('Psi/basicPsi3', 'atommasses not implemented yet')
@skipForLogfile('Psi/basicPsi4.0b5', 'atommasses not implemented yet')
@skipForParser('QChem', 'atommasses not implemented yet')
def testatommasses(self):
"""Do the atom masses sum up to the molecular mass?"""
mm = 1000*sum(self.data.atommasses)
msg = "Molecule mass: %f not %f +- %fmD" % (mm, self.molecularmass, self.mass_precision)
self.assertAlmostEquals(mm, self.molecularmass, delta=self.mass_precision, msg=msg)
def testcoreelectrons(self):
"""Are the coreelectrons all 0?"""
ans = numpy.zeros(self.data.natom, 'i')
numpy.testing.assert_array_equal(self.data.coreelectrons, ans)
def testnormalisesym(self):
"""Did this subclass overwrite normalisesym?"""
self.assertNotEquals(self.logfile.normalisesym("A"), "ERROR: This should be overwritten by this subclass")
@skipForParser('Molpro', '?')
@skipForParser('ORCA', 'ORCA has no support for symmetry yet')
def testsymlabels(self):
"""Are all the symmetry labels either Ag/u or Bg/u?"""
sumwronglabels = sum([x not in ['Ag', 'Bu', 'Au', 'Bg'] for x in self.data.mosyms[0]])
self.assertEquals(sumwronglabels, 0)
def testhomos(self):
"""Is the index of the HOMO equal to 34?"""
numpy.testing.assert_array_equal(self.data.homos, numpy.array([34],"i"), "%s != array([34],'i')" % numpy.array_repr(self.data.homos))
def testscfvaluetype(self):
"""Are scfvalues and its elements the right type??"""
self.assertEquals(type(self.data.scfvalues),type([]))
self.assertEquals(type(self.data.scfvalues[0]),type(numpy.array([])))
def testscfenergy(self):
"""Is the SCF energy within the target?"""
self.assertAlmostEquals(self.data.scfenergies[-1], self.b3lyp_energy, delta=40, msg="Final scf energy: %f not %i +- 40eV" %(self.data.scfenergies[-1], self.b3lyp_energy))
def testscftargetdim(self):
"""Do the scf targets have the right dimensions?"""
self.assertEquals(self.data.scftargets.shape, (len(self.data.scfvalues), len(self.data.scfvalues[0][0])))
def testlengthmoenergies(self):
"""Is the number of evalues equal to nmo?"""
self.assertEquals(len(self.data.moenergies[0]), self.data.nmo)
def testtypemoenergies(self):
"""Is moenergies a list containing one numpy array?"""
self.assertEquals(type(self.data.moenergies), type([]))
self.assertEquals(type(self.data.moenergies[0]), type(numpy.array([])))
@skipForParser('DALTON', 'mocoeffs not implemented yet')
@skipForLogfile('Jaguar/basicJaguar7', 'Data file does not contain enough information. Can we make a new one?')
@skipForLogfile('Psi/basicPsi3', 'MO coefficients are printed separately for each SALC')
def testdimmocoeffs(self):
"""Are the dimensions of mocoeffs equal to 1 x nmo x nbasis?"""
self.assertEquals(type(self.data.mocoeffs), type([]))
self.assertEquals(len(self.data.mocoeffs), 1)
self.assertEquals(self.data.mocoeffs[0].shape,
(self.data.nmo, self.data.nbasis))
@skipForParser('DALTON', 'To print: **INTEGRALS\n.PROPRI')
@skipForParser('Psi', 'Psi does not currently have the option to print the overlap matrix')
@skipForParser('QChem', 'QChem cannot print the overlap matrix')
def testaooverlaps(self):
"""Are the dims and values of the overlap matrix correct?"""
self.assertEquals(self.data.aooverlaps.shape, (self.data.nbasis, self.data.nbasis))
# The matrix is symmetric.
row = self.data.aooverlaps[0,:]
col = self.data.aooverlaps[:,0]
self.assertEquals(sum(col - row), 0.0)
# All values on diagonal should be exactly zero.
for i in range(self.data.nbasis):
self.assertEquals(self.data.aooverlaps[i,i], 1.0)
# Check some additional values that don't seem to move around between programs.
self.assertAlmostEquals(self.data.aooverlaps[0, 1], self.overlap01, delta=0.01)
self.assertAlmostEquals(self.data.aooverlaps[1, 0], self.overlap01, delta=0.01)
self.assertEquals(self.data.aooverlaps[3,0], 0.0)
self.assertEquals(self.data.aooverlaps[0,3], 0.0)
def testoptdone(self):
"""There should be no optdone attribute set."""
self.assertFalse(hasattr(self.data, 'optdone'))
@skipForParser('Gaussian', 'Logfile needs to be updated')
@skipForParser('Jaguar', 'No dipole moments in the logfile')
def testmoments(self):
"""Does the dipole and possible higher molecular moments look reasonable?"""
# The reference point is always a vector, but not necessarily the
# origin or center of mass. In this case, however, the center of mass
# is at the origin, so we now what to expect.
reference = self.data.moments[0]
self.assertEquals(len(reference), 3)
for x in reference:
self.assertEquals(x, 0.0)
# Length and value of dipole moment should always be correct (zero for this test).
dipole = self.data.moments[1]
self.assertEquals(len(dipole), 3)
for d in dipole:
self.assertAlmostEquals(d, 0.0, places=7)
# If the quadrupole is there, we can expect roughly -50B for the XX moment,
# -50B for the YY moment and and -60B for the ZZ moment.
if len(self.data.moments) > 2:
quadrupole = self.data.moments[2]
self.assertEquals(len(quadrupole), 6)
self.assertAlmostEquals(quadrupole[0], -50, delta=2.5)
self.assertAlmostEquals(quadrupole[3], -50, delta=2.5)
self.assertAlmostEquals(quadrupole[5], -60, delta=3)
# If the octupole is there, it should have 10 components and be zero.
if len(self.data.moments) > 3:
octupole = self.data.moments[3]
self.assertEquals(len(octupole), 10)
for m in octupole:
self.assertAlmostEquals(m, 0.0, delta=0.001)
# The hexadecapole should have 15 elements, an XXXX component of around -1900 Debye*ang^2,
# a YYYY component of -330B and a ZZZZ component of -50B.
if len(self.data.moments) > 4:
hexadecapole = self.data.moments[4]
self.assertEquals(len(hexadecapole), 15)
self.assertAlmostEquals(hexadecapole[0], -1900, delta=90)
self.assertAlmostEquals(hexadecapole[10], -330, delta=11)
self.assertAlmostEquals(hexadecapole[14], -50, delta=2.5)
# The are 21 unique 32-pole moments, and all are zero in this test case.
if len(self.data.moments) > 5:
moment32 = self.data.moments[5]
self.assertEquals(len(moment32), 21)
for m in moment32:
self.assertEquals(m, 0.0)
@skipForParser('ADF', 'Does not support metadata yet')
@skipForParser('GAMESS', 'Does not support metadata yet')
@skipForParser('GAMESSUK', 'Does not support metadata yet')
@skipForParser('Gaussian', 'Does not support metadata yet')
@skipForParser('Jaguar', 'Does not support metadata yet')
@skipForParser('Molpro', 'Does not support metadata yet')
@skipForParser('NWChem', 'Does not support metadata yet')
@skipForParser('ORCA', 'Does not support metadata yet')
@skipForParser('Psi', 'Does not support metadata yet')
@skipForParser('QChem', 'Does not support metadata yet')
def testmetadata(self):
"""Does metadata have expected keys and values?"""
self.assertTrue(hasattr(self.data, "metadata"))
self.assertIn("basis_set", self.data.metadata)
self.assertIn("methods", self.data.metadata)
self.assertIn("package", self.data.metadata)
self.assertIn("package_version", self.data.metadata)
class ADFSPTest(GenericSPTest):
"""Customized restricted single point unittest"""
# ADF only prints up to 0.1mD per atom, so the precision here is worse than 0.1mD.
mass_precision = 0.3
foverlap00 = 1.00003
foverlap11 = 1.02672
foverlap22 = 1.03585
b3lyp_energy = -140
def testfoverlaps(self):
"""Are the dims and values of the fragment orbital overlap matrix correct?"""
self.assertEquals(self.data.fooverlaps.shape, (self.data.nbasis, self.data.nbasis))
# The matrix is symmetric.
row = self.data.fooverlaps[0,:]
col = self.data.fooverlaps[:,0]
self.assertEquals(sum(col - row), 0.0)
# Although the diagonal elements are close to zero, the SFOs
# are generally not normalized, so test for a few specific values.
self.assertAlmostEquals(self.data.fooverlaps[0, 0], self.foverlap00, delta=0.0001)
self.assertAlmostEquals(self.data.fooverlaps[1, 1], self.foverlap11, delta=0.0001)
self.assertAlmostEquals(self.data.fooverlaps[2, 2], self.foverlap22, delta=0.0001)
class Jaguar7SPTest(GenericSPTest):
"""Customized restricted single point unittest"""
# Jaguar prints only 10 virtual MOs by default. Can we re-run with full output?
def testlengthmoenergies(self):
"""Is the number of evalues equal to the number of occ. MOs + 10?"""
self.assertEquals(len(self.data.moenergies[0]), self.data.homos[0]+11)
class Psi3SPTest(GenericSPTest):
"""Customized restricted single point HF/KS unittest"""
# The final energy is also a bit higher here, I think due to the fact
# that a SALC calculation is done instead of a full LCAO.
b3lyp_energy = -10300
if __name__=="__main__":
import sys
sys.path.append(os.path.join(__filedir__, ".."))
from test_data import DataSuite
suite = DataSuite(['SP'])
suite.testall()
| Schamnad/cclib | test/data/testSP.py | Python | bsd-3-clause | 14,485 | [
"ADF",
"Dalton",
"GAMESS",
"Gaussian",
"Jaguar",
"Molpro",
"NWChem",
"ORCA",
"cclib"
] | 08554816f5a4ea8a08173f8f0ba766278b7b69f394292c2577c6192af7588837 |
# Copyright 2008-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from robot.result.visitor import ResultVisitor
from robot.utils import XmlWriter
class XUnitWriter(object):
def __init__(self, execution_result, skip_noncritical):
self._execution_result = execution_result
self._skip_noncritical = skip_noncritical
def write(self, output):
writer = XUnitFileWriter(XmlWriter(output, encoding='UTF-8'), self._skip_noncritical)
self._execution_result.visit(writer)
class XUnitFileWriter(ResultVisitor):
"""Provides an xUnit-compatible result file.
Attempts to adhere to the de facto schema guessed by Peter Reilly, see:
http://marc.info/?l=ant-dev&m=123551933508682
"""
def __init__(self, xml_writer, skip_noncritical=False):
self._writer = xml_writer
self._root_suite = None
self._skip_noncritical = skip_noncritical
def start_suite(self, suite):
if self._root_suite:
return
self._root_suite = suite
tests, failures, skip = self._get_stats(suite.statistics)
attrs = {'name': suite.name,
'tests': tests,
'errors': '0',
'failures': failures,
'skip': skip}
self._writer.start('testsuite', attrs)
def _get_stats(self, statistics):
if self._skip_noncritical:
failures = statistics.critical.failed
skip = statistics.all.total - statistics.critical.total
else:
failures = statistics.all.failed
skip = 0
return str(statistics.all.total), str(failures), str(skip)
def end_suite(self, suite):
if suite is self._root_suite:
self._writer.end('testsuite')
def visit_test(self, test):
self._writer.start('testcase',
{'classname': test.parent.longname,
'name': test.name,
'time': self._time_as_seconds(test.elapsedtime)})
if self._skip_noncritical and not test.critical:
self._skip_test(test)
elif not test.passed:
self._fail_test(test)
self._writer.end('testcase')
def _skip_test(self, test):
self._writer.element('skipped', '%s: %s' % (test.status, test.message)
if test.message else test.status)
def _fail_test(self, test):
self._writer.element('failure', attrs={'message': test.message,
'type': 'AssertionError'})
def _time_as_seconds(self, millis):
return str(int(round(millis, -3) / 1000))
def visit_keyword(self, kw):
pass
def visit_statistics(self, stats):
pass
def visit_errors(self, errors):
pass
def end_result(self, result):
self._writer.close()
| ktan2020/legacy-automation | win/Lib/site-packages/robot/reporting/xunitwriter.py | Python | mit | 3,433 | [
"VisIt"
] | 491f81a3372bbd8261449de0927dd299c2ecf9d69677b64e2e59f807feef415b |
#!/usr/bin/python
'''
TODO:
write all the atoms of the solvent molecule to the index file.
'''
import math
import sys
import os
import numpy
import MDAnalysis
from itertools import izip
from MDPackage import Index
from MDPackage import Simple_atom
from MDPackage import usage
import MDPackage
import time as Time
import matplotlib.pyplot as plt
def Min_dist(solute_atoms,solu_l,solv_coor,coor_list,R_solute,dmax=10):
'''
atom_l: A full atom list.
solu_l: A solute atom index list.
solv_i: Index for one solvent molecule.
'''
_test_1 = (coor_list[0]-solv_coor[0])**2 \
+ (coor_list[1] - solv_coor[1])**2 \
+ (coor_list[2] - solv_coor[2])**2
if _test_1 > R_solute:
return 0,0
# dist_temp=[[abs(solute_atoms[i].atom_coor_x -solv_coor[0]),\
# abs(solute_atoms[i].atom_coor_y - solv_coor[1]),
# abs(solute_atoms[i].atom_coor_z - solv_coor[2])] for i in solu_l]
# print dist_temp
# min_dist =min(dist_temp)
# min_index =solu_l[dist_temp.index(min_dist)]
# min_dist = (coor_list[0]-solv_coor[0])**2 \
# + (coor_list[1] - solv_coor[1])**2 \
# + (coor_list[2] - solv_coor[2])**2
min_dist = math.sqrt(_test_1)
min_index =0
# print min_dist
# print solu_l
for i in solu_l:
_tmp_x=abs(solute_atoms[i].atom_coor_x -solv_coor[0])
if _tmp_x > dmax or _tmp_x > min_dist:
continue
_tmp_y=abs(solute_atoms[i].atom_coor_y -solv_coor[1])
if _tmp_y > dmax or _tmp_y > min_dist:
continue
_tmp_z=abs(solute_atoms[i].atom_coor_z -solv_coor[2])
if _tmp_z > dmax or _tmp_z > min_dist:
continue
tmp = math.sqrt((_tmp_x)**2 + (_tmp_y)**2 + (_tmp_z)**2)
if tmp < min_dist:
min_dist = tmp
min_index = i
# print min_dist, min_index
# if min_dist < 1:
# print min_dist,min_index
return min_dist,min_index
def Dist_Square(vect_1,vect_2):
dist=(vect_2[0]-vect_1[0])**2 + (vect_2[1]-vect_1[1])**2 + (vect_2[2]-vect_1[2])**2
return dist
def Get_solvent_list(atom_list):
solvent_list = list()
for atom in atom_list:
if atom_list[atom].residue_name == "WAT" and atom_list[atom].atom_name == "O":
solvent_list.append(atom)
elif atom_list[atom].residue_name == "SOL" and atom_list[atom].atom_name == "OW":
solvent_list.append(atom)
return solvent_list
def hist(value_list,number_list,min_value, max_value, nbins=100):
'''
temp_list in the form [dist,number]
'''
# print value_list
# print number_list
# sys.exit()
bin = (max_value - min_value) / nbins
grid_list =[0 for i in range(nbins)]
numb_list =[0 for i in range(nbins)]
for i in range(len(value_list)):
temp = int( (value_list[i] - min_value) / bin )
if value_list[i] < max_value:
grid_list [temp] += 1
numb_list[temp] += number_list[i]
else:
# print "continued as vaule=%f" %value_list[i]
continue
#print grid_list
#print numb_list
for i in range(nbins):
if grid_list[i] > 0:
numb_list[i] /= float(grid_list[i])
else:
continue
return numb_list
def pRDF(traj_file,coor_file,index_file,solute_index,dmax=20):
'''
A simple pRDF test here.
'''
START_TIME =Time.time()
HAS_TRAJFILE = False
if os.path.isfile(traj_file):
HAS_TRAJFILE = True
atom_list =Simple_atom.Get_atom_list(coor_file)
index_list =Index.Read_index_to_Inclass(index_file)
solute_list =index_list[solute_index].group_list
solvent_list =Get_solvent_list(atom_list)
if HAS_TRAJFILE:
U=MDAnalysis.Universe(coor_file,traj_file)
else:
U=MDAnalysis.Universe(coor_file)
GR=numpy.zeros((100),dtype=numpy.float64)
EXIST_NUM = 0
if os.path.isfile("datafile.xvg"):
Data_file = open("datafile.xvg",'r+')
lines = Data_file.readlines()
EXIST_NUM = len(lines)
try:
last_line = lines[-1].split()
for x in range(100):
GR[x] = float(last_line[x])*EXIST_NUM
except:
pass
else:
Data_file=open("datafile.xvg",'w')
#step 1
# Get the center of the solute.
for ts in U.trajectory:
print "Checking frame number: %d" %(ts.frame)
if ts.frame < EXIST_NUM +1:
continue
solute_atoms = dict()
coor_x=0.0
coor_y=0.0
coor_z=0.0
X_min = ts._x[0]
X_max = ts._x[0]
Y_min = ts._y[0]
Y_max = ts._y[0]
Z_min = ts._z[0]
Z_max = ts._z[0]
R_solute =0.0
for i in solute_list:
coor_x += ts._x[i-1]
coor_y += ts._y[i-1]
coor_z += ts._z[i-1]
u_atom = MDPackage.Coor.unit_atom.unit_atom(atom_coor_x=ts._x[i-1],atom_coor_y=ts._y[i-1],atom_coor_z=ts._z[i-1])
solute_atoms[i]=u_atom
if ts._x[i-1] < X_min:
X_min = ts._x[i-1]
elif ts._x[i-1] > X_max:
X_max = ts._x[i-1]
if ts._y[i-1] < Y_min:
Y_min = ts._y[i-1]
elif ts._y[i-1] > Y_max:
Y_max = ts._y[i-1]
if ts._z[i-1] < Z_min:
Z_min = ts._z[i-1]
elif ts._z[i-1] > Z_max:
Z_max = ts._z[i-1]
coor_x /= len(solute_list)
coor_y /= len(solute_list)
coor_z /= len(solute_list)
for i in solute_list:
_R_tmp = ( solute_atoms[i].atom_coor_x - coor_x ) **2 + \
( solute_atoms[i].atom_coor_y - coor_y ) **2 +\
( solute_atoms[i].atom_coor_z - coor_z ) **2
if _R_tmp > R_solute:
R_solute = _R_tmp
R_solute = math.sqrt(R_solute) + dmax
R_solute = R_solute **2
# print R_solute
# sys.exit()
# print "Step 1 finished."
#print "center %f\t%f\t%f" %(coor_x,coor_y,coor_z)
# print X_min,X_max
#step 2
#Get the range of the box.
X_min = X_min - dmax
Y_min = Y_min - dmax
Z_min = Z_min - dmax
X_max = X_max + dmax
Y_max = Y_max + dmax
Z_max = Z_max + dmax
# print X_min,X_max
# bin = dmax *2.0 / nbins
bin = 1.0
x_bins = int((X_max - X_min) /bin) +1
y_bins = int((Y_max - Y_min) /bin) +1
z_bins = int((Z_max - Z_min) /bin) +1
#print "bin:",bin
#step 4
#assign each grid to solute atoms.
#grid_in_solute contains that each grid blongs to which solute atom.
grids_in_which_solute =dict()
solute_contain_grids =dict()
# print x_bins,y_bins,z_bins
_gauss_value = -1
for i in range(x_bins * y_bins * z_bins ):
z_temp = i / ( x_bins * y_bins)
y_temp = (i % (x_bins * y_bins)) / x_bins
x_temp = i % x_bins
grid_site=[X_min+(x_temp+0.5)*bin, Y_min+(y_temp+0.5)*bin, Z_min+(z_temp+0.5)*bin]
#print grid_site
min_dist, min_index= Min_dist(solute_atoms,solute_list,grid_site,[coor_x,coor_y,coor_z],R_solute)
if min_index == 0:
continue
# _gauss_value = min_dist
if i % 10 ==0:
NOW_TIME=Time.time()
BIN_TIME=NOW_TIME-START_TIME
sys.stderr.write("grid ID %10d, time used: %6.2f s\r" %(i,BIN_TIME))
sys.stderr.flush()
try:
grids_in_which_solute[i]=[min_index,min_dist]
except:
print "hello to see you"
try:
solute_contain_grids[min_index].append(i)
except:
solute_contain_grids[min_index]=list()
solute_contain_grids[min_index].append(i)
#print solute_contain_grids
# for i in solute_contain_grids:
# print i,len(solute_contain_grids[i])
# sys.exit()
# print "\nStep 4 finished."
#step 5.
#assign each solvent atom to grids.
grid_in_solvent=[list() for i in range(x_bins * y_bins * z_bins)]
for i in solvent_list:
SV_x = ts._x[i-1]
SV_y = ts._y[i-1]
SV_z = ts._z[i-1]
if SV_x > X_min and SV_x < X_max:
x_temp = int( (SV_x - X_min) / bin )
else:
continue
if SV_y > Y_min and SV_y < Y_max:
y_temp = int( (SV_y - Y_min) / bin )
else:
continue
if SV_z > Z_min and SV_z < Z_max:
z_temp = int( (SV_z - Z_min) / bin )
else:
continue
grid_in_solvent[z_temp*x_bins*y_bins + y_temp*x_bins + x_temp].append(i)
# print "append solvent %d" %i
# print "Step 5 finished."
# step 6.
#calculating the g(r) for each solute atom.
# density = MDAnalysis.core.units.convert(1.0, 'water', 'Angstrom^{-3}')*math.pow(10,3)
density = MDAnalysis.core.units.convert(1.0, 'water', 'Angstrom^{-3}')
# print density
unit_conc = ((bin)**3 * density) #unit solvent atom density.
# print unit_conc
temp1 =list() #A list used to contain grid_dist.
temp2 =list() #A list used to contain sol number for each grad.
# TOTAL_ATOMS = 0
for i in solute_list:
try:
temp_grids=solute_contain_grids[i]
#print "solute %d, grids number %d" %(i,len(temp_grids))
except:
continue
#rdf_atom=[0 for i in range(50)]
# bin =float(dmax)/nbins
for grid in temp_grids:
sol_number=len(grid_in_solvent[grid])
# if sol_number ==0:
# continue
#print " %d" %sol_number,
# try:
blabla,dist=grids_in_which_solute[grid]
# except:
# continue
temp1.append(dist)
temp2.append(sol_number)
# if len(temp1) == 0:
# continue
# print temp1
# print temp2
#if i == 10:
# sys.exit()
rdf_atom=hist(temp1, temp2, 0.0, dmax, 100)
# print rdf_atom
# print unit_conc
#print rdf_atom
# if sum(rdf_atom) > 0:
# TOTAL_ATOMS += 1
rdf_atom=numpy.array(rdf_atom) / unit_conc
# sys.exit()
GR += rdf_atom
# print GR
plt.clf()
ax = plt.gca()
ax.set_xlabel("Distance (nm)")
ax.set_ylabel("pRDF(r)")
x_label=[i*dmax/1000. for i in range(100)]
y_label=[GR[i]/ts.frame for i in range(100)]
ax.plot(x_label,y_label,'b',)
plt.draw()
temp_filename="temp"+"%d"%(ts.frame)+".png"
plt.savefig(temp_filename)
for i in range(100):
Data_file.write("%12.4f" %y_label[i])
Data_file.write("\n")
Data_file.flush()
GR = GR / U.trajectory.numframes
Data_file.close()
# print TOTAL_ATOMS
# print len(solute_index)
for i in range(32):
print "%f\t%f" %(2.0/32*(i+0.5),GR[i])
# print GR
def Check_args():
if len(sys.argv) != 5:
print "Usage: pRDF_test.py coor_file traj_file index_file solute_index"
else:
coor_file = sys.argv[1]
traj_file = sys.argv[2]
index_file = sys.argv[3]
solute_index = int(sys.argv[4])
pRDF(traj_file,coor_file,index_file,solute_index,10)
if __name__ == '__main__':
plt.ion()
Check_args()
| zhuhong/pRDF | pRDF_test.py | Python | gpl-2.0 | 11,895 | [
"MDAnalysis"
] | c4e8fb9ca4a2c111710ec77c3f2fcbe365adec4f39e70f0d0d117f41a7f1b73f |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflowcx_v3.services.webhooks import WebhooksAsyncClient
from google.cloud.dialogflowcx_v3.services.webhooks import WebhooksClient
from google.cloud.dialogflowcx_v3.services.webhooks import pagers
from google.cloud.dialogflowcx_v3.services.webhooks import transports
from google.cloud.dialogflowcx_v3.types import webhook
from google.cloud.dialogflowcx_v3.types import webhook as gcdc_webhook
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert WebhooksClient._get_default_mtls_endpoint(None) is None
assert WebhooksClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
WebhooksClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
WebhooksClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
WebhooksClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert WebhooksClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [WebhooksClient, WebhooksAsyncClient,])
def test_webhooks_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.WebhooksGrpcTransport, "grpc"),
(transports.WebhooksGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_webhooks_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [WebhooksClient, WebhooksAsyncClient,])
def test_webhooks_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_webhooks_client_get_transport_class():
transport = WebhooksClient.get_transport_class()
available_transports = [
transports.WebhooksGrpcTransport,
]
assert transport in available_transports
transport = WebhooksClient.get_transport_class("grpc")
assert transport == transports.WebhooksGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(WebhooksClient, transports.WebhooksGrpcTransport, "grpc"),
(WebhooksAsyncClient, transports.WebhooksGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
WebhooksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WebhooksClient)
)
@mock.patch.object(
WebhooksAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(WebhooksAsyncClient),
)
def test_webhooks_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(WebhooksClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(WebhooksClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(WebhooksClient, transports.WebhooksGrpcTransport, "grpc", "true"),
(
WebhooksAsyncClient,
transports.WebhooksGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(WebhooksClient, transports.WebhooksGrpcTransport, "grpc", "false"),
(
WebhooksAsyncClient,
transports.WebhooksGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
WebhooksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WebhooksClient)
)
@mock.patch.object(
WebhooksAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(WebhooksAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_webhooks_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [WebhooksClient, WebhooksAsyncClient])
@mock.patch.object(
WebhooksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WebhooksClient)
)
@mock.patch.object(
WebhooksAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(WebhooksAsyncClient),
)
def test_webhooks_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(WebhooksClient, transports.WebhooksGrpcTransport, "grpc"),
(WebhooksAsyncClient, transports.WebhooksGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_webhooks_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(WebhooksClient, transports.WebhooksGrpcTransport, "grpc", grpc_helpers),
(
WebhooksAsyncClient,
transports.WebhooksGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_webhooks_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_webhooks_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflowcx_v3.services.webhooks.transports.WebhooksGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = WebhooksClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(WebhooksClient, transports.WebhooksGrpcTransport, "grpc", grpc_helpers),
(
WebhooksAsyncClient,
transports.WebhooksGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_webhooks_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=None,
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [webhook.ListWebhooksRequest, dict,])
def test_list_webhooks(request_type, transport: str = "grpc"):
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_webhooks), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = webhook.ListWebhooksResponse(
next_page_token="next_page_token_value",
)
response = client.list_webhooks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == webhook.ListWebhooksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListWebhooksPager)
assert response.next_page_token == "next_page_token_value"
def test_list_webhooks_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_webhooks), "__call__") as call:
client.list_webhooks()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == webhook.ListWebhooksRequest()
@pytest.mark.asyncio
async def test_list_webhooks_async(
transport: str = "grpc_asyncio", request_type=webhook.ListWebhooksRequest
):
client = WebhooksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_webhooks), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
webhook.ListWebhooksResponse(next_page_token="next_page_token_value",)
)
response = await client.list_webhooks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == webhook.ListWebhooksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListWebhooksAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_webhooks_async_from_dict():
await test_list_webhooks_async(request_type=dict)
def test_list_webhooks_field_headers():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = webhook.ListWebhooksRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_webhooks), "__call__") as call:
call.return_value = webhook.ListWebhooksResponse()
client.list_webhooks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_webhooks_field_headers_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = webhook.ListWebhooksRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_webhooks), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
webhook.ListWebhooksResponse()
)
await client.list_webhooks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_webhooks_flattened():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_webhooks), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = webhook.ListWebhooksResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_webhooks(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_webhooks_flattened_error():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_webhooks(
webhook.ListWebhooksRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_webhooks_flattened_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_webhooks), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = webhook.ListWebhooksResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
webhook.ListWebhooksResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_webhooks(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_webhooks_flattened_error_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_webhooks(
webhook.ListWebhooksRequest(), parent="parent_value",
)
def test_list_webhooks_pager(transport_name: str = "grpc"):
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_webhooks), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
webhook.ListWebhooksResponse(
webhooks=[webhook.Webhook(), webhook.Webhook(), webhook.Webhook(),],
next_page_token="abc",
),
webhook.ListWebhooksResponse(webhooks=[], next_page_token="def",),
webhook.ListWebhooksResponse(
webhooks=[webhook.Webhook(),], next_page_token="ghi",
),
webhook.ListWebhooksResponse(
webhooks=[webhook.Webhook(), webhook.Webhook(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_webhooks(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, webhook.Webhook) for i in results)
def test_list_webhooks_pages(transport_name: str = "grpc"):
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_webhooks), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
webhook.ListWebhooksResponse(
webhooks=[webhook.Webhook(), webhook.Webhook(), webhook.Webhook(),],
next_page_token="abc",
),
webhook.ListWebhooksResponse(webhooks=[], next_page_token="def",),
webhook.ListWebhooksResponse(
webhooks=[webhook.Webhook(),], next_page_token="ghi",
),
webhook.ListWebhooksResponse(
webhooks=[webhook.Webhook(), webhook.Webhook(),],
),
RuntimeError,
)
pages = list(client.list_webhooks(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_webhooks_async_pager():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_webhooks), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
webhook.ListWebhooksResponse(
webhooks=[webhook.Webhook(), webhook.Webhook(), webhook.Webhook(),],
next_page_token="abc",
),
webhook.ListWebhooksResponse(webhooks=[], next_page_token="def",),
webhook.ListWebhooksResponse(
webhooks=[webhook.Webhook(),], next_page_token="ghi",
),
webhook.ListWebhooksResponse(
webhooks=[webhook.Webhook(), webhook.Webhook(),],
),
RuntimeError,
)
async_pager = await client.list_webhooks(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, webhook.Webhook) for i in responses)
@pytest.mark.asyncio
async def test_list_webhooks_async_pages():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_webhooks), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
webhook.ListWebhooksResponse(
webhooks=[webhook.Webhook(), webhook.Webhook(), webhook.Webhook(),],
next_page_token="abc",
),
webhook.ListWebhooksResponse(webhooks=[], next_page_token="def",),
webhook.ListWebhooksResponse(
webhooks=[webhook.Webhook(),], next_page_token="ghi",
),
webhook.ListWebhooksResponse(
webhooks=[webhook.Webhook(), webhook.Webhook(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_webhooks(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [webhook.GetWebhookRequest, dict,])
def test_get_webhook(request_type, transport: str = "grpc"):
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = webhook.Webhook(
name="name_value",
display_name="display_name_value",
disabled=True,
generic_web_service=webhook.Webhook.GenericWebService(uri="uri_value"),
)
response = client.get_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == webhook.GetWebhookRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, webhook.Webhook)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.disabled is True
def test_get_webhook_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_webhook), "__call__") as call:
client.get_webhook()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == webhook.GetWebhookRequest()
@pytest.mark.asyncio
async def test_get_webhook_async(
transport: str = "grpc_asyncio", request_type=webhook.GetWebhookRequest
):
client = WebhooksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
webhook.Webhook(
name="name_value", display_name="display_name_value", disabled=True,
)
)
response = await client.get_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == webhook.GetWebhookRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, webhook.Webhook)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.disabled is True
@pytest.mark.asyncio
async def test_get_webhook_async_from_dict():
await test_get_webhook_async(request_type=dict)
def test_get_webhook_field_headers():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = webhook.GetWebhookRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_webhook), "__call__") as call:
call.return_value = webhook.Webhook()
client.get_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_webhook_field_headers_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = webhook.GetWebhookRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_webhook), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(webhook.Webhook())
await client.get_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_webhook_flattened():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = webhook.Webhook()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_webhook(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_webhook_flattened_error():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_webhook(
webhook.GetWebhookRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_webhook_flattened_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = webhook.Webhook()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(webhook.Webhook())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_webhook(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_webhook_flattened_error_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_webhook(
webhook.GetWebhookRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [gcdc_webhook.CreateWebhookRequest, dict,])
def test_create_webhook(request_type, transport: str = "grpc"):
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_webhook.Webhook(
name="name_value",
display_name="display_name_value",
disabled=True,
generic_web_service=gcdc_webhook.Webhook.GenericWebService(uri="uri_value"),
)
response = client.create_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_webhook.CreateWebhookRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_webhook.Webhook)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.disabled is True
def test_create_webhook_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_webhook), "__call__") as call:
client.create_webhook()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_webhook.CreateWebhookRequest()
@pytest.mark.asyncio
async def test_create_webhook_async(
transport: str = "grpc_asyncio", request_type=gcdc_webhook.CreateWebhookRequest
):
client = WebhooksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_webhook.Webhook(
name="name_value", display_name="display_name_value", disabled=True,
)
)
response = await client.create_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_webhook.CreateWebhookRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_webhook.Webhook)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.disabled is True
@pytest.mark.asyncio
async def test_create_webhook_async_from_dict():
await test_create_webhook_async(request_type=dict)
def test_create_webhook_field_headers():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_webhook.CreateWebhookRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_webhook), "__call__") as call:
call.return_value = gcdc_webhook.Webhook()
client.create_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_webhook_field_headers_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_webhook.CreateWebhookRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_webhook), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_webhook.Webhook()
)
await client.create_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_webhook_flattened():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_webhook.Webhook()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_webhook(
parent="parent_value", webhook=gcdc_webhook.Webhook(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].webhook
mock_val = gcdc_webhook.Webhook(name="name_value")
assert arg == mock_val
def test_create_webhook_flattened_error():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_webhook(
gcdc_webhook.CreateWebhookRequest(),
parent="parent_value",
webhook=gcdc_webhook.Webhook(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_webhook_flattened_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_webhook.Webhook()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_webhook.Webhook()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_webhook(
parent="parent_value", webhook=gcdc_webhook.Webhook(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].webhook
mock_val = gcdc_webhook.Webhook(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_webhook_flattened_error_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_webhook(
gcdc_webhook.CreateWebhookRequest(),
parent="parent_value",
webhook=gcdc_webhook.Webhook(name="name_value"),
)
@pytest.mark.parametrize("request_type", [gcdc_webhook.UpdateWebhookRequest, dict,])
def test_update_webhook(request_type, transport: str = "grpc"):
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_webhook.Webhook(
name="name_value",
display_name="display_name_value",
disabled=True,
generic_web_service=gcdc_webhook.Webhook.GenericWebService(uri="uri_value"),
)
response = client.update_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_webhook.UpdateWebhookRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_webhook.Webhook)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.disabled is True
def test_update_webhook_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_webhook), "__call__") as call:
client.update_webhook()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_webhook.UpdateWebhookRequest()
@pytest.mark.asyncio
async def test_update_webhook_async(
transport: str = "grpc_asyncio", request_type=gcdc_webhook.UpdateWebhookRequest
):
client = WebhooksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_webhook.Webhook(
name="name_value", display_name="display_name_value", disabled=True,
)
)
response = await client.update_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_webhook.UpdateWebhookRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_webhook.Webhook)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.disabled is True
@pytest.mark.asyncio
async def test_update_webhook_async_from_dict():
await test_update_webhook_async(request_type=dict)
def test_update_webhook_field_headers():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_webhook.UpdateWebhookRequest()
request.webhook.name = "webhook.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_webhook), "__call__") as call:
call.return_value = gcdc_webhook.Webhook()
client.update_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "webhook.name=webhook.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_webhook_field_headers_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_webhook.UpdateWebhookRequest()
request.webhook.name = "webhook.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_webhook), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_webhook.Webhook()
)
await client.update_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "webhook.name=webhook.name/value",) in kw[
"metadata"
]
def test_update_webhook_flattened():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_webhook.Webhook()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_webhook(
webhook=gcdc_webhook.Webhook(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].webhook
mock_val = gcdc_webhook.Webhook(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_webhook_flattened_error():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_webhook(
gcdc_webhook.UpdateWebhookRequest(),
webhook=gcdc_webhook.Webhook(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_webhook_flattened_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_webhook.Webhook()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_webhook.Webhook()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_webhook(
webhook=gcdc_webhook.Webhook(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].webhook
mock_val = gcdc_webhook.Webhook(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_webhook_flattened_error_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_webhook(
gcdc_webhook.UpdateWebhookRequest(),
webhook=gcdc_webhook.Webhook(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [webhook.DeleteWebhookRequest, dict,])
def test_delete_webhook(request_type, transport: str = "grpc"):
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == webhook.DeleteWebhookRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_webhook_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_webhook), "__call__") as call:
client.delete_webhook()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == webhook.DeleteWebhookRequest()
@pytest.mark.asyncio
async def test_delete_webhook_async(
transport: str = "grpc_asyncio", request_type=webhook.DeleteWebhookRequest
):
client = WebhooksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == webhook.DeleteWebhookRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_webhook_async_from_dict():
await test_delete_webhook_async(request_type=dict)
def test_delete_webhook_field_headers():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = webhook.DeleteWebhookRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_webhook), "__call__") as call:
call.return_value = None
client.delete_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_webhook_field_headers_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = webhook.DeleteWebhookRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_webhook), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_webhook_flattened():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_webhook(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_webhook_flattened_error():
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_webhook(
webhook.DeleteWebhookRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_webhook_flattened_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_webhook), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_webhook(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_webhook_flattened_error_async():
client = WebhooksAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_webhook(
webhook.DeleteWebhookRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.WebhooksGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.WebhooksGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = WebhooksClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.WebhooksGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = WebhooksClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = WebhooksClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.WebhooksGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = WebhooksClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.WebhooksGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = WebhooksClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.WebhooksGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.WebhooksGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.WebhooksGrpcTransport, transports.WebhooksGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = WebhooksClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.WebhooksGrpcTransport,)
def test_webhooks_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.WebhooksTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_webhooks_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflowcx_v3.services.webhooks.transports.WebhooksTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.WebhooksTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_webhooks",
"get_webhook",
"create_webhook",
"update_webhook",
"delete_webhook",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_webhooks_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflowcx_v3.services.webhooks.transports.WebhooksTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.WebhooksTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_webhooks_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflowcx_v3.services.webhooks.transports.WebhooksTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.WebhooksTransport()
adc.assert_called_once()
def test_webhooks_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
WebhooksClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.WebhooksGrpcTransport, transports.WebhooksGrpcAsyncIOTransport,],
)
def test_webhooks_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.WebhooksGrpcTransport, grpc_helpers),
(transports.WebhooksGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_webhooks_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.WebhooksGrpcTransport, transports.WebhooksGrpcAsyncIOTransport],
)
def test_webhooks_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_webhooks_host_no_port():
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_webhooks_host_with_port():
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_webhooks_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.WebhooksGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_webhooks_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.WebhooksGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.WebhooksGrpcTransport, transports.WebhooksGrpcAsyncIOTransport],
)
def test_webhooks_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.WebhooksGrpcTransport, transports.WebhooksGrpcAsyncIOTransport],
)
def test_webhooks_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_service_path():
project = "squid"
location = "clam"
namespace = "whelk"
service = "octopus"
expected = "projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}".format(
project=project, location=location, namespace=namespace, service=service,
)
actual = WebhooksClient.service_path(project, location, namespace, service)
assert expected == actual
def test_parse_service_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"namespace": "cuttlefish",
"service": "mussel",
}
path = WebhooksClient.service_path(**expected)
# Check that the path construction is reversible.
actual = WebhooksClient.parse_service_path(path)
assert expected == actual
def test_webhook_path():
project = "winkle"
location = "nautilus"
agent = "scallop"
webhook = "abalone"
expected = "projects/{project}/locations/{location}/agents/{agent}/webhooks/{webhook}".format(
project=project, location=location, agent=agent, webhook=webhook,
)
actual = WebhooksClient.webhook_path(project, location, agent, webhook)
assert expected == actual
def test_parse_webhook_path():
expected = {
"project": "squid",
"location": "clam",
"agent": "whelk",
"webhook": "octopus",
}
path = WebhooksClient.webhook_path(**expected)
# Check that the path construction is reversible.
actual = WebhooksClient.parse_webhook_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = WebhooksClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = WebhooksClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = WebhooksClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = WebhooksClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = WebhooksClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = WebhooksClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = WebhooksClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = WebhooksClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = WebhooksClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = WebhooksClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = WebhooksClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = WebhooksClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = WebhooksClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = WebhooksClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = WebhooksClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.WebhooksTransport, "_prep_wrapped_messages"
) as prep:
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.WebhooksTransport, "_prep_wrapped_messages"
) as prep:
transport_class = WebhooksClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = WebhooksAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = WebhooksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(WebhooksClient, transports.WebhooksGrpcTransport),
(WebhooksAsyncClient, transports.WebhooksGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| googleapis/python-dialogflow-cx | tests/unit/gapic/dialogflowcx_v3/test_webhooks.py | Python | apache-2.0 | 93,229 | [
"Octopus"
] | e6504d5773acea97f2c65a9f569efd4bda7c05fc3c5e0bf798198c9375895c75 |
# coding: utf-8
from __future__ import unicode_literals
"""
Module containing class to create an ion
"""
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.0"
__maintainer__ = "Sai Jayaraman"
__email__ = "sjayaram@mit.edu"
__status__ = "Production"
__date__ = "Dec 10, 2012"
import re
import numpy as np
from pymatgen.core.composition import Composition
from pymatgen.serializers.json_coders import PMGSONable
from pymatgen.util.string_utils import formula_double_format
class Ion(PMGSONable):
"""
Basic ion object. It is just a Composition object with an additional
variable to store charge.
The net charge can either be represented as Mn++, or Mn+2, or Mn[2+].
Note the order of the sign and magnitude in each representation.
"""
def __init__(self, composition, charge=0.0, properties=None):
"""
Flexible Ion construction, similar to Composition.
For more information, please see pymatgen.core.Composition
"""
self._composition = Composition(composition)
self._charge = charge
self._properties = properties if properties else {}
def __getattr__(self, a):
if a in self._properties:
return self._properties[a]
try:
return getattr(self._composition, a)
except:
raise AttributeError(a)
@staticmethod
def from_formula(formula):
charge = 0.0
f = formula
m = re.search(r"\[([^\[\]]+)\]", f)
if m:
m_chg = re.search("([\.\d]*)([+-])", m.group(1))
if m_chg:
if m_chg.group(1) != "":
charge += float(m_chg.group(1)) * \
(float(m_chg.group(2) + "1"))
else:
charge += float(m_chg.group(2) + "1")
f = f.replace(m.group(), "", 1)
m = re.search(r"\(aq\)", f)
if m:
f = f.replace(m.group(), "", 1)
for m_chg in re.finditer("([+-])([\.\d]*)", f):
sign = m_chg.group(1)
sgn = float(str(sign + "1"))
if m_chg.group(2).strip() != "":
charge += float(m_chg.group(2)) * sgn
else:
charge += sgn
f = f.replace(m_chg.group(), "", 1)
composition = Composition(f)
return Ion(composition, charge)
@property
def formula(self):
"""
Returns a formula string, with elements sorted by electronegativity,
e.g., Li4 Fe4 P4 O16.
"""
formula = self._composition.formula
chg_str = ""
if self._charge > 0:
chg_str = " +" + formula_double_format(self._charge, False)
elif self._charge < 0:
chg_str = " " + formula_double_format(self._charge, False)
return formula + chg_str
@property
def anonymized_formula(self):
"""
An anonymized formula. Appends charge to the end
of anonymized composition
"""
anon_formula = self._composition.anonymized_formula
chg = self._charge
chg_str = ""
if chg > 0:
chg_str += ("{}{}".format('+', str(int(chg))))
elif chg < 0:
chg_str += ("{}{}".format('-', str(int(np.abs(chg)))))
return anon_formula + chg_str
@property
def reduced_formula(self):
"""
Returns a reduced formula string with appended charge.
"""
reduced_formula = self._composition.reduced_formula
charge = self._charge / float(self._composition.
get_reduced_composition_and_factor()[1])
if charge > 0:
if abs(charge) == 1:
chg_str = "[+]"
else:
chg_str = "[" + formula_double_format(charge, False) + "+]"
elif charge < 0:
if abs(charge) == 1:
chg_str = "[-]"
else:
chg_str = "[{}-]".format(formula_double_format(abs(charge),
False))
else:
chg_str = "(aq)"
return reduced_formula + chg_str
@property
def alphabetical_formula(self):
"""
Returns a reduced formula string with appended charge
"""
alph_formula = self._composition.alphabetical_formula
chg_str = ""
if self._charge > 0:
chg_str = " +" + formula_double_format(self._charge, False)
elif self._charge < 0:
chg_str = " " + formula_double_format(self._charge, False)
return alph_formula + chg_str
@property
def charge(self):
"""
Charge of the ion
"""
return self._charge
@property
def composition(self):
"""
Return composition object
"""
return self._composition
def as_dict(self):
"""
Returns:
dict with composition, as well as charge
"""
d = self._composition.as_dict()
d['charge'] = self._charge
return d
@classmethod
def from_dict(cls, d):
"""
Generates an ion object from a dict created by as_dict().
Args:
d:
{symbol: amount} dict.
"""
# composition = Composition.from_dict(d['composition'])
charge = d['charge']
composition = Composition({i: d[i] for i in d if i != 'charge'})
return Ion(composition, charge)
@property
def to_reduced_dict(self):
"""
Returns:
dict with element symbol and reduced amount e.g.,
{"Fe": 2.0, "O":3.0}.
"""
reduced_formula = self._composition.reduced_formula
c = Composition(reduced_formula)
d = c.as_dict()
d['charge'] = self._charge
return d
def __eq__(self, other):
if self.composition != other.composition:
return False
if self.charge != other.charge:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
"""
Addition of two ions.
"""
new_composition = self.composition + other.composition
new_charge = self.charge + other.charge
return Ion(new_composition, new_charge)
def __sub__(self, other):
"""
Subtraction of two ions
"""
new_composition = self.composition - other.composition
new_charge = self.charge - other.charge
return Ion(new_composition, new_charge)
def __mul__(self, other):
"""
Multiplication of an Ion with a factor
"""
new_composition = self.composition * other
new_charge = self.charge * other
return Ion(new_composition, new_charge)
def __hash__(self):
#for now, just use the composition hash code.
return self._composition.__hash__()
def __len__(self):
return len(self._composition)
def __str__(self):
return self.formula
def __repr__(self):
return "Ion: " + self.formula
def __getitem__(self, el):
return self._composition.get(el, 0)
| Dioptas/pymatgen | pymatgen/core/ion.py | Python | mit | 7,247 | [
"pymatgen"
] | 73354b072970c87612de6ee6cf2dc69b2ee9fd643734db6906801ffe29821638 |
"""
Copyright (c) 2011-2015 Nathan Boley, Marcus Stoiber
This file is part of GRIT.
GRIT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GRIT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GRIT. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
import os.path
import numpy
import pickle
import pysam
import math
from random import random
from collections import defaultdict
import tempfile
DEFAULT_QUALITY_SCORE = 'r'
DEFAULT_BASE = 'A'
DEFAULT_FRAG_LENGTH = 150
DEFAULT_READ_LENGTH = 100
DEFAULT_NUM_FRAGS = 100
NUM_NORM_SDS = 4
FREQ_GTF_STRINGS = [ 'freq', 'frac' ]
# add slide dir to sys.path and import frag_len mod
#sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), ".." ))
sys.path.insert(0, "/home/nboley/grit/grit/")
import grit.frag_len as frag_len
from grit.files.gtf import load_gtf
from grit.files.reads import clean_chr_name
def fix_chr_name(x):
return "chr" + clean_chr_name(x)
def get_transcript_sequence(transcript, fasta):
""" get the mRNA sequence of the transcript from the gene seq
"""
trans_seq = []
for start, stop in transcript.exons:
seq = fasta.fetch(fix_chr_name(transcript.chrm), start, stop+1)
trans_seq.append( seq.upper() )
trans_seq = "".join(trans_seq)
return trans_seq
def get_cigar( transcript, start, stop ):
"""loop through introns within the read and add #N to the cigar for each
intron add #M for portions of read which map to exons
"""
def calc_len(interval):
return interval[1]-interval[0]+1
cigar = []
# find the exon index of the start
genome_start = transcript.genome_pos(start)
start_exon = next(i for i, (e_start, e_stop) in enumerate(transcript.exons)
if genome_start >= e_start and genome_start <= e_stop)
genome_stop = transcript.genome_pos(stop-1)
stop_exon = next(i for i, (e_start, e_stop) in enumerate(transcript.exons)
if genome_stop >= e_start and genome_stop <= e_stop)
if start_exon == stop_exon:
return "%iM" % (stop-start)
tl = 0
# add the first overlap match
skipped_bases = sum(calc_len(e) for e in transcript.exons[:start_exon+1])
cigar.append("%iM" % (skipped_bases-start))
tl += skipped_bases-start
# add the first overlap intron
cigar.append("%iN" % calc_len(transcript.introns[start_exon]))
# add the internal exon and intron matches
for i in xrange(start_exon+1, stop_exon):
cigar.append("%iM" % calc_len(transcript.exons[i]))
cigar.append("%iN" % calc_len(transcript.introns[i]))
tl += calc_len(transcript.exons[i])
# add the last overlap match
skipped_bases = sum(e[1]-e[0]+1 for e in transcript.exons[:stop_exon])
cigar.append("%iM" % (stop-skipped_bases))
tl += stop - skipped_bases
assert tl == (stop-start)
return "".join(cigar)
def build_sam_line( transcript, read_len, offset, read_identifier, quality_string ):
"""build a single ended SAM formatted line with given inforamtion
"""
# set flag to indcate strandedness of read matching that of the transcript
flag = 0
if transcript.strand == '+': flag += 16
# adjust start position to correct genomic position
start = transcript.genome_pos(offset)
# set cigar string corresponding to transcript and read offset
cigar = get_cigar( transcript, offset, (offset + read_len) )
# calculate insert size by difference of genomic offset and genomic offset+read_len
insert_size = transcript.genome_pos(offset+read_len) - transcript.genome_pos(offset)
# get slice of seq from transcript
seq = ( transcript.seq[ offset : (offset + read_len) ]
if transcript.seq != None else '*' )
# initialize sam lines with read identifiers and then add appropriate fields
sam_line = '\t'.join( (
read_identifier, str( flag ), fix_chr_name(transcript.chrm),
str(start+1),
'255', cigar, "*", '0', str( insert_size ), seq, quality_string,
"NM:i:0", "NH:i:1" ) ) + "\n"
return sam_line
def build_sam_lines( transcript, read_len, frag_len, offset,
read_identifier, read_quals ):
"""build paired end SAM formatted lines with given information
"""
# set ordered quals and reverse the qualities for the read on the negative strand
ordered_quals = read_quals
# determine whether read1 should be the 5' read or visa verses
# and initialize attributes that are specific to a read number
# instead of 5' or 3' attribute
if transcript.strand == '+':
up_strm_read, dn_strm_read = (0, 1)
flag = [ 99, 147 ]
ordered_quals[1] = ordered_quals[1][::-1]
else:
up_strm_read, dn_strm_read = (1, 0)
flag = [ 83, 163 ]
ordered_quals[0] = ordered_quals[0][::-1]
# get slice of seq from transcript
seq = ['*', '*']
if transcript.seq != None:
seq[ up_strm_read ] = transcript.seq[offset:(offset + read_len)]
seq[ dn_strm_read ] = transcript.seq[
(offset + frag_len - read_len):(offset + frag_len)]
# adjust five and three prime read start positions to correct genomic positions
start = [ transcript.start, transcript.start ]
start[ up_strm_read ] = transcript.genome_pos(offset)
start[ dn_strm_read ] = transcript.genome_pos(offset + frag_len - read_len)
# set cigar string for five and three prime reads
cigar = [ None, None ]
cigar[ up_strm_read ] = get_cigar( transcript, offset, (offset+read_len) )
cigar[ dn_strm_read ] = get_cigar(
transcript, (offset+frag_len-read_len), (offset + frag_len))
# calculate insert size by difference of the mapped start and end
insert_size = (
transcript.genome_pos(offset+read_len) - transcript.genome_pos(offset))
insert_size = [ insert_size, insert_size ]
insert_size[ dn_strm_read ] *= -1
# initialize sam lines with read identifiers and then add appropriate fields
sam_lines = [ read_identifier + '\t', read_identifier + '\t' ]
for i in (0,1):
other_i = 0 if i else 1
sam_lines[i] += '\t'.join( (
str( flag[i] ), fix_chr_name(transcript.chrm),
str( start[i]+1 ),"255",
cigar[i], "=", str( start[other_i]+1 ), str( insert_size[i] ),
seq[i], ordered_quals[i], "NM:i:0", "NH:i:1" ) ) + "\n"
return sam_lines
def write_fastq_lines( fp1, fp2, transcript, read_len, frag_len, offset,
read_identifier ):
"""STUB for writing fastq lines to running through alignment pipeline
"""
pass
def simulate_reads( genes, fl_dist, fasta, quals, num_frags, single_end,
full_fragment, read_len, assay='RNAseq'):
"""write a SAM format file with the specified options
"""
# global variable that stores the current read number, we use this to
# generate a unique id for each read.
global curr_read_index
curr_read_index = 1
def sample_fragment_length( fl_dist, transcript ):
"""Choose a random fragment length from fl_dist
"""
if assay == 'CAGE':
return read_len
# if the fl_dist is constant
if isinstance( fl_dist, int ):
assert fl_dist <= transcript.calc_length(), 'Transcript which ' + \
'cannot contain a valid fragment was included in transcripts.'
return fl_dist
# Choose a valid fragment length from the distribution
while True:
fl_index = fl_dist.fl_density_cumsum.searchsorted( random() ) - 1
fl = fl_index + fl_dist.fl_min
# if fragment_length is valid return it
if fl <= transcript.calc_length():
return fl
assert False
def sample_read_offset( transcript, fl ):
# calculate maximum offset
max_offset = transcript.calc_length() - fl
if assay in ('CAGE', 'RAMPAGE'):
if transcript.strand == '+': return 0
else: return max_offset
elif assay == 'RNAseq':
return int( random() * max_offset )
elif assay == 'PASseq':
if transcript.strand == '-': return 0
else: return max_offset
def get_random_qual_score( read_len ):
# if no quality score were provided
if not quals:
return DEFAULT_QUALITY_SCORE * read_len
# else return quality string from input quality file
# scores are concatenated to match read_len if necessary
else:
qual_string = ''
while len( qual_string ) < read_len:
qual_string += str( quals[ int(random() * len(quals) ) ] )
return qual_string[0:read_len]
def get_random_read_pos( transcript ):
while True:
# find a valid fragment length
fl = sample_fragment_length( fl_dist, transcript )
if (fl >= read_len) or full_fragment: break
# find a valid random read start position
offset = sample_read_offset( transcript, fl )
# get a unique string for this fragment
global curr_read_index
read_identifier = 'SIM:%015d:%s' % (curr_read_index, transcript.id)
curr_read_index += 1
return fl, offset, read_identifier
def build_random_sam_line( transcript, read_len ):
"""build a random single ended sam line
"""
fl, offset, read_identifier = get_random_read_pos( transcript )
if full_fragment:
read_len = fl
# get a random quality scores
if transcript.seq == None:
read_qual = '*'
else:
read_qual = get_random_qual_score( read_len )
# build the sam lines
return build_sam_line(
transcript, read_len, offset, read_identifier, read_qual )
def build_random_sam_lines( transcript, read_len ):
"""build random paired end sam lines
"""
fl, offset, read_identifier = get_random_read_pos( transcript )
# adjust read length so that paired end read covers the entire fragment
if full_fragment:
read_len = int( math.ceil( fl / float(2) ) )
# get two random quality scores
if transcript.seq == None:
read_quals = ['*', '*']
else:
read_quals = [ get_random_qual_score( read_len ),
get_random_qual_score( read_len ) ]
sam_lines = build_sam_lines(
transcript, read_len, fl, offset, read_identifier, read_quals )
return sam_lines
def get_fl_min():
if isinstance( fl_dist, int ):
return fl_dist
else:
return fl_dist.fl_min
def calc_scale_factor(t):
if assay in ('RNAseq',):
length = t.calc_length()
if length < fl_dist.fl_min: return 0
fl_min, fl_max = fl_dist.fl_min, min(length, fl_dist.fl_max)
allowed_fl_lens = numpy.arange(fl_min, fl_max+1)
weights = fl_dist.fl_density[
fl_min-fl_dist.fl_min:fl_max-fl_dist.fl_min+1]
mean_fl_len = float((allowed_fl_lens*weights).sum())
return length - mean_fl_len
elif assay in ('CAGE', 'RAMPAGE', 'PASseq'):
return 1.0
# initialize the transcript objects, and calculate their relative weights
transcript_weights = []
transcripts = []
contig_lens = defaultdict(int)
min_transcript_length = get_fl_min()
for gene in genes:
contig_lens[fix_chr_name(gene.chrm)] = max(
gene.stop+1000, contig_lens[fix_chr_name(gene.chrm)])
for transcript in gene.transcripts:
if fasta != None:
transcript.seq = get_transcript_sequence(transcript, fasta)
else:
transcript.seq = None
if transcript.fpkm != None:
weight = transcript.fpkm*calc_scale_factor(transcript)
elif transcript.frac != None:
assert len(genes) == 1
weight = transcript.frac
else:
weight = 1./len(gene.transcripts)
#assert False, "Transcript has neither an FPKM nor a frac"
transcripts.append( transcript )
transcript_weights.append( weight )
#assert False
assert len( transcripts ) > 0, "No valid trancripts."
# normalize the transcript weights to be on 0,1
transcript_weights = numpy.array(transcript_weights, dtype=float)
transcript_weights = transcript_weights/transcript_weights.sum()
transcript_weights_cumsum = transcript_weights.cumsum()
# update the contig lens from the fasta file, if available
if fasta != None:
for name, length in zip(fasta.references, fasta.lengths):
if fix_chr_name(name) in contig_lens:
contig_lens[fix_chr_name(name)] = max(
length, contig_lens[name])
# create the output directory
bam_prefix = assay + ".sorted"
with tempfile.NamedTemporaryFile( mode='w+' ) as sam_fp:
# write out the header
for contig, contig_len in contig_lens.iteritems():
data = ["@SQ", "SN:%s" % contig, "LN:%i" % contig_len]
sam_fp.write("\t".join(data) + "\n")
while curr_read_index <= num_frags:
# pick a transcript to randomly take a read from. Note that they
# should be chosen in proportion to the *expected number of reads*,
# not their relative frequencies.
transcript_index = \
transcript_weights_cumsum.searchsorted( random(), side='left' )
transcript = transcripts[ transcript_index ]
if single_end:
sam_line_s = build_random_sam_line( transcript, read_len )
else:
sam_line_s = build_random_sam_lines( transcript, read_len )
sam_fp.writelines( sam_line_s )
# create sorted bam file and index it
sam_fp.flush()
#sam_fp.seek(0)
#print sam_fp.read()
call = 'samtools view -bS {} | samtools sort - {}'
os.system( call.format( sam_fp.name, bam_prefix ) )
os.system( 'samtools index {}.bam'.format( bam_prefix ) )
return
def build_objs( gtf_fp, fl_dist_const,
fl_dist_norm, full_fragment,
read_len, fasta_fn, qual_fn ):
genes = load_gtf( gtf_fp )
gtf_fp.close()
def build_normal_fl_dist( fl_mean, fl_sd ):
fl_min = max( 0, fl_mean - (fl_sd * NUM_NORM_SDS) )
fl_max = fl_mean + (fl_sd * NUM_NORM_SDS)
fl_dist = frag_len.build_normal_density( fl_min, fl_max, fl_mean, fl_sd )
return fl_dist
if fl_dist_norm:
fl_dist = build_normal_fl_dist( fl_dist_norm[0], fl_dist_norm[1] )
assert fl_dist.fl_max > read_len or full_fragment, \
'Invalid fragment length distribution and read length!!!'
else:
assert read_len < fl_dist_const or full_fragment, \
'Invalid read length and constant fragment length!!!'
fl_dist = fl_dist_const
if fasta_fn:
# create indexed fasta file handle object with pysam
fasta = pysam.Fastafile( fasta_fn )
else:
fasta = None
# if quals_fn is None, quals remains empty and reads will default to
# all base qualities of DEFAULT_BASE_QUALITY_SCORE
quals = []
if qual_fn:
with open( quals_fn ) as quals_fp:
for line in quals_fp:
quals.append( line.strip() )
quals = numpy.array( quals )
return genes, fl_dist, fasta, quals
def parse_arguments():
import argparse
parser = argparse.ArgumentParser(\
description='Produce simulated reads in a perfecty aligned BAM file.' )
# gtf is the only required argument
parser.add_argument( 'gtf', type=file, \
help='GTF file from which to produce simulated reads ' + \
'(Note: Only the first trascript from this file will ' + \
'be simulated)' )
parser.add_argument(
'--assay', choices=['RNAseq', 'RAMPAGE', 'CAGE', 'PASseq'],
default='RNAseq', help='Which assay type to simulate from' )
# fragment length distribution options
parser.add_argument( '--fl-dist-const', type=int, default=DEFAULT_FRAG_LENGTH, \
help='Constant length fragments. (default: ' + \
'%(default)s)' )
parser.add_argument( '--fl-dist-norm', \
help='Mean and standard deviation (format "mn:sd") ' + \
'used to create normally distributed fragment lengths.' )
# files providing quality and sequnce information
parser.add_argument( '--fasta', '-f', \
help='Fasta file from which to create reads ' + \
'(default: all sequences are "' + DEFAULT_BASE + \
'" * length of sequence)' )
parser.add_argument( '--quality', '-q', \
help='Flat file containing one FASTQ quality score ' + \
'per line, created with get_quals.sh. (default: ' + \
'quality strings are "' + str(DEFAULT_QUALITY_SCORE) + \
'" * length of sequence.)' )
# type and number of fragments requested
parser.add_argument(
'--num-frags', '-n', type=int, default=1000,
help='Total number of fragments to create across all trascripts')
parser.add_argument('--single-end', action='store_true', default=False,
help='Produce single-end reads.' )
parser.add_argument('--paired-end', dest='single_end', action='store_false',
help='Produce paired-end reads. (default)' )
# XXX not sure if this works
#parser.add_argument(
# '--full-fragment', action='store_true', default=False,
# help='Produce reads spanning the entire fragment.')
parser.add_argument( '--read-len', '-r', type=int, default=DEFAULT_READ_LENGTH, \
help='Length of reads to produce in base pairs ' + \
'(default: %(default)s)' )
# output options
parser.add_argument( '--out_prefix', '-o', default='simulated_reads', \
help='Prefix for output FASTQ/BAM file ' + \
'(default: %(default)s)' )
parser.add_argument( '--verbose', '-v', default=False, action='store_true', \
help='Print status information.' )
args = parser.parse_args()
# set to false, but we may want to bring this option back
args.full_fragment = False
global VERBOSE
VERBOSE = args.verbose
if args.assay == 'CAGE':
args.read_len = 28
args.single_end = True
# parse normal distribution argument
if args.fl_dist_norm:
try:
mean, sd = args.fl_dist_norm.split( ':' )
args.fl_dist_norm = [ int( mean ), int( sd ) ]
except ValueError:
args.fl_dist_norm = None
print >> sys.stderr, \
"WARNING: User input mean and sd are not formatted correctly.\n"+\
"\tUsing default values.\n"
return ( args.gtf, args.fl_dist_const, args.fl_dist_norm,
args.fasta, args.quality, args.num_frags,
args.single_end, args.full_fragment,
args.read_len, args.out_prefix, args.assay )
def main():
( gtf_fp, fl_dist_const, fl_dist_norm, fasta_fn, qual_fn,
num_frags, single_end, full_fragment, read_len, out_prefix, assay )\
= parse_arguments()
try: os.mkdir(out_prefix)
except OSError:
ofname = os.path.join(out_prefix, assay + '.sorted.bam')
if os.path.isfile(ofname):
raise OSError, "File '%s' already exists" % ofname
os.chdir(out_prefix)
genes, fl_dist, fasta, quals = build_objs(
gtf_fp, fl_dist_const,
fl_dist_norm, full_fragment, read_len,
fasta_fn, qual_fn )
"""
for gene in genes:
for t in gene.transcripts:
t.chrm = "chr" + t.chrm
print t.build_gtf_lines(gene.id, {})
assert False
"""
simulate_reads( genes, fl_dist, fasta, quals, num_frags, single_end,
full_fragment, read_len, assay=assay )
if __name__ == "__main__":
main()
| nboley/grit | simulator/reads_simulator.py | Python | gpl-3.0 | 21,253 | [
"pysam"
] | 71bdac6736b4bab6af3fbc25adfc62e4ba6519b73817f81c11c439f437a88df9 |
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import django.db.transaction
import tldap.transaction
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Regular cleanup of application db models."
@django.db.transaction.atomic
@tldap.transaction.commit_on_success
def handle(self, **options):
from django.db.models import Count
from ...models import Application, Applicant
import datetime
now = datetime.datetime.now()
verbose = int(options.get('verbosity'))
# Delete all expired unsubmitted applications
for application in Application.objects.filter(
expires__lte=now, submitted_date__isnull=True):
if verbose >= 1:
print(
"Deleted expired unsubmitted application #%s"
% application.id)
application.delete()
month_ago = now - datetime.timedelta(days=30)
# Delete all unsubmitted applications that have been around for 1 month
for application in Application.objects.filter(
created_date__lte=month_ago, submitted_date__isnull=True):
if verbose >= 1:
print("Deleted unsubmitted application #%s" % application.id)
application.delete()
# Delete all applications that have been complete/declined for 1 month
for application in Application.objects.filter(
complete_date__isnull=False, complete_date__lte=month_ago):
if verbose >= 1:
print("Deleted completed application #%s" % application.id)
application.delete()
# Delete all orphaned applicants
for applicant in Applicant.objects.annotate(
cc=Count('application')).filter(cc=0):
if verbose >= 1:
print("Deleted orphaned applicant #%s" % applicant.id)
applicant.delete()
| brianmay/karaage | karaage/plugins/kgapplications/management/commands/application_cleanup.py | Python | gpl-3.0 | 2,645 | [
"Brian"
] | b0fc3a4ce6b9dd1efd1ea19b486b6b3ea0f4c906e6ff206363afb7d3152ba966 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
hist.py |github|
----------------
Histograms of the occultation events as a function of mean longitude, duration,
and impact parameter for each of the seven TRAPPIST-1 planets, as well as a
marginalized histogram of the total number of potentially detectable
planet-planet occultations in one Earth year.
.. note:: When I sample too many times from the prior in a single run, the \
code often hangs. There may be a memory leak somewhere, but I \
haven't been able to find it yet. If you want to run large \
ensembles, I recommend using the parallelization scheme I \
implemented below. Alternatively, a brain-dead \
way of doing it is to instantiate a bunch of **screen** \
sessions: \
:py:obj:`screen -dm python -c "import hist; hist.Compute(nsamp = 100)"`
The figures below show the distributions for the eyeball planet case (left)
and the limb-darkened planet case (right). Click on them to enlarge.
TRAPPIST-1b
~~~~~~~~~~~
.. image:: /b_corner_eyeball.jpg
:width: 49 %
.. image:: /b_corner_limbdark.jpg
:width: 49 %
TRAPPIST-1c
~~~~~~~~~~~
.. image:: /c_corner_eyeball.jpg
:width: 49 %
.. image:: /c_corner_limbdark.jpg
:width: 49 %
TRAPPIST-1d
~~~~~~~~~~~
.. image:: /d_corner_eyeball.jpg
:width: 49 %
.. image:: /d_corner_limbdark.jpg
:width: 49 %
TRAPPIST-1e
~~~~~~~~~~~
.. image:: /e_corner_eyeball.jpg
:width: 49 %
.. image:: /e_corner_limbdark.jpg
:width: 49 %
TRAPPIST-1f
~~~~~~~~~~~
.. image:: /f_corner_eyeball.jpg
:width: 49 %
.. image:: /f_corner_limbdark.jpg
:width: 49 %
TRAPPIST-1g
~~~~~~~~~~~
.. image:: /g_corner_eyeball.jpg
:width: 49 %
.. image:: /g_corner_limbdark.jpg
:width: 49 %
TRAPPIST-1h
~~~~~~~~~~~
.. image:: /h_corner_eyeball.jpg
:width: 49 %
.. image:: /h_corner_limbdark.jpg
:width: 49 %
Marginal distributions
~~~~~~~~~~~~~~~~~~~~~~
.. image:: /hist_eyeball.jpg
:width: 49 %
.. image:: /hist_limbdark.jpg
:width: 49 %
.. image:: /snr_eyeball.jpg
:width: 49 %
.. image:: /snr_limbdark.jpg
:width: 49 %
.. role:: raw-html(raw)
:format: html
.. |github| replace:: :raw-html:`<a href = "https://github.com/rodluger/planetplanet/blob/master/scripts/hist.py"><i class="fa fa-github" aria-hidden="true"></i></a>`
'''
from __future__ import division, print_function, absolute_import, \
unicode_literals
import os
import subprocess
import planetplanet
from planetplanet import jwst
from planetplanet import Trappist1
from planetplanet.constants import *
from planetplanet.pool import Pool
import matplotlib
import matplotlib.pyplot as pl
from matplotlib.ticker import FuncFormatter
import numpy as np
import corner
from tqdm import tqdm
from scipy.stats import norm
datapath = os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(planetplanet.__file__))),
'scripts', 'data')
histpath = os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(planetplanet.__file__))),
'scripts')
if not os.path.exists(datapath):
os.makedirs(datapath)
def _test():
'''
This routine is too expensive to test on Travis, so I'm
bypassing it for now.
'''
pass
def Submit(queue = None, email = None, walltime = 8, nodes = 5, ppn = 12,
mpn = None, nsamp = 50000, eyeball = True,
batch_size = 100, nproc = None):
'''
Submits a PBS cluster job to run :py:func:`Compute` in parallel.
:param str queue: The name of the queue to submit to. \
Default :py:obj:`None`
:param str email: The email to send job status notifications to. \
Default :py:obj:`None`
:param int walltime: The number of hours to request. Default `8`
:param int nodes: The number of nodes to request. Default `5`
:param int ppn: The number of processors per node to request. Default `12`
:param int nsamp: The number of prior samples to draw. Default `50,000`
:param bool eyeball: Use the radiative equilibrium surface map? \
Default :py:obj:`True`
:param int batch_size: Size of each batch used in the parallelization. \
Default `100`
:param int mpn: Memory per node in gb to request. Default no setting.
:param int nproc: Number of processes to spawn. Default is the number of \
core.
'''
if nproc is None:
nproc = ppn * nodes
str_w = 'walltime=%d:00:00' % walltime
if mpn is not None:
str_n = 'nodes=%d:ppn=%d,feature=%dcore,mem=%dgb' % \
(nodes, ppn, ppn, mpn * nodes)
else:
str_n = 'nodes=%d:ppn=%d,feature=%dcore' % (nodes, ppn, ppn)
str_v = 'NPROC=%d,HISTPATH=%s,NSAMP=%d,EYEBALL=%d,BATCHSZ=%d' % \
(nproc, histpath, nsamp, int(eyeball), batch_size)
str_name = 'planetplanet'
str_out = 'hist.log'
qsub_args = ['qsub', 'hist.pbs',
'-v', str_v,
'-o', str_out,
'-j', 'oe',
'-N', str_name,
'-l', str_n,
'-l', str_w]
if email is not None:
qsub_args.append(['-M', email, '-m', 'ae'])
if queue is not None:
qsub_args += ['-q', queue]
print("Submitting the job...")
subprocess.call(qsub_args)
class _FunctionWrapper(object):
'''
A simple function wrapper class. Stores :py:obj:`args` and :py:obj:`kwargs`
and allows an arbitrary function to be called via :py:func:`map`.
Used internally.
'''
def __init__(self, f, *args, **kwargs):
'''
'''
self.f = f
self.args = args
self.kwargs = kwargs
def __call__(self, x):
'''
'''
return self.f(*self.args, **self.kwargs)
def _Parallelize(nsamp, eyeball, batch_size):
'''
Runs the actual parallelized computations. Used internally.
'''
# Get our function wrapper
m = _FunctionWrapper(Compute, nsamp = batch_size,
eyeball = eyeball, progress_bar = False)
# Parallelize. We will run `N` iterations
N = int(np.ceil(nsamp / batch_size))
with Pool() as pool:
pool.map(m, range(N))
def histogram(system, tstart, tend, dt = 0.0001):
'''
Computes statistical properties of planet-planet occultations that
occur over a given interval. Computes the frequency of occultations as
a function of orbital phase, duration, and signal-to-noise ratio, as well
as the fully marginalized occultation frequency for each planet in the
system. Occultations by the star are not included, nor are occultations
occuring behind the star, which are not visible to the observer.
:param system: A system instance.
:type system: :py:obj:`planetplanet.structs.System`
:param float tstart: The integration start time (BJD − 2,450,000)
:param float tend: The integration end time (BJD − 2,450,000)
:param float dt: The time resolution in days. Occultations shorter \
than this will not be registered.
:returns: A list of \
:py:obj:`(phase, impact, duration, signal, noise, snr)` tuples \
for each planet in the system. The phase angle is measured \
in degrees and the duration is measured in days. The signal and \
noise are measured in ppm.
.. warning:: This routine computes the **orbital phase angle**, which \
is measured from **transit**. This is different from the \
mean longitude by :math:`\pi/2`
'''
# Reset
system._reset()
time = np.arange(tstart, tend, dt)
# Compute the wavelength grid. We are hard-coding the
# 15 micron JWST MIRI band here.
lambda1 = 12.5
lambda2 = 17.5
R = 50
wav = [lambda1]
while(wav[-1] < lambda2):
wav.append(wav[-1] + wav[-1] / R)
wavelength = np.array(wav)
# Compute all limb darkening coefficients
for body in system.bodies:
body.u = [None for ld in body.limbdark]
for n, ld in enumerate(body.limbdark):
if callable(ld):
body.u[n] = ld(wavelength)
elif not hasattr(ld, '__len__'):
body.u[n] = ld * np.ones_like(wavelength)
else:
raise Exception("Limb darkening coefficients must be "
+ "provided as a list of scalars or "
+ "as a list of functions.")
body.u = np.array(body.u)
# HACK: Disable phase curves. The code would take *way*
# too long to run, and they don't affect these statistics.
body.phasecurve = False
# Convert from microns to meters
wavelength *= 1e-6
# No oversampling
time_hr = np.array(time)
# Continuum flux
system._continuum = np.zeros(len(time_hr) * len(wavelength))
# Allocate memory
system._malloc(len(time_hr), len(wavelength))
# Call the light curve routine
err = system._Flux(len(time_hr), np.ctypeslib.as_ctypes(time_hr),
len(wavelength),
np.ctypeslib.as_ctypes(wavelength),
np.ctypeslib.as_ctypes(system._continuum),
len(system.bodies), system._ptr_bodies,
system.settings)
assert err <= 0, "Error in C routine `Flux` (%d)." % err
# A histogram of the distribution of phases,
# impact parameters, and durations
hist = [[] for body in system.bodies[1:]]
for k, body in enumerate(system.bodies[1:]):
# Simulate an observation w/ JWST at 15 microns
# Same syntax as in `observe()`
w = jwst.get_miri_filter_wheel()
filter = w[np.argmax([f.name.lower() == 'f1500w' for f in w])]
filter.compute_lightcurve(time, body.flux,
system.continuum,
system.wavelength,
stack = 1,
atel = 25.,
thermal = True,
quiet = True)
# Identify the different planet-planet events
inds = np.where(body.occultor > 0)[0]
difs = np.where(np.diff(inds) > 1)[0]
# Total body photons
total_body_photons = np.nanmedian(filter.lightcurve.Nsys)
# Loop over individual ones
for i in inds[difs]:
# Loop over possible occultors
for occ in range(1, len(system.bodies)):
# Is body `occ` occulting (but not behind the star)?
if (body.occultor[i] & 2 ** occ) and \
(body.occultor[i] & 1 == 0):
# Note that `i` is the last index of the occultation
duration = np.argmax(body.occultor[:i][::-1]
& 2 ** occ == 0)
if duration > 0:
# Orbital phase, **measured from transit**
# At transit, phase = 0; at secondary, phase = 180.
phase = np.arctan2(body.x[i],
-body.z[i]) * 180 / np.pi
# Indices of the occultation
idx = range(i - duration, i + 1)
# Compute the minimum impact parameter
impact = np.min(np.sqrt((system.bodies[occ].x[idx]
- body.x[idx]) ** 2 +
(system.bodies[occ].y[idx]
- body.y[idx]) ** 2)) \
/ (system.bodies[occ]._r
+ body._r)
# Convert duration to log
duration = np.log10(duration * dt * 1440)
# Planet, background, and star photons
Nplan = filter.lightcurve.Nsys[idx]
Nback = filter.lightcurve.Nback[idx]
Nstar = filter.lightcurve.Ncont[idx]
# Compute the number of photons *missing*
# NOTE: There was a BUG in the previous version,
# where we did
# >>> Nplan = np.nanmedian(Nplan) - Nplan
# which gets the wrong baseline for the planet's
# continuum. This led to low SNR in the previous
# versions of the plots!
Nplan = total_body_photons - Nplan
# Compute signal of and noise on the event
# in parts per million
norm = 1.e6 / np.sum(Nstar + Nback)
signal = norm * np.sum(np.fabs(Nplan))
noise = norm * np.sqrt(np.sum(Nstar + Nback))
# Compute the actual SNR on event. Note that this
# is NOT the sum of the signals divided by the sum
# of the noises! We need to add the SNR of each
# *datapoint* individually in quadrature.
snr = np.sqrt(np.sum((Nplan) ** 2 / (Nstar + Nback)))
# Running list
hist[k].append((phase, impact, duration,
signal, noise, snr))
# Make into array
hist[k] = np.array(hist[k])
return hist
def Compute(nsamp = 300, minsnr = 1.0, nbody = True,
progress_bar = True, eyeball = True, **kwargs):
'''
Compute occultation histograms by drawing `nsamp` draws from the
system prior. Saves the results to `data/histXXX.npz`.
:param int nsamp: The number of prior samples to draw. Default `300`
:param float minsnr: The minimum SNR to include in the tally. Default `1.`
:param bool nbody: Use the N-Body solver? Default :py:obj:`True`
:param bool progress_bar: Display a progress bar? Default :py:obj:`True`
:param bool eyeball: Assume eyeball planets? Default :py:obj:`True`. If
:py:obj:`False`, uses the limb darkened surface map.
'''
# The dataset name
name = 'hist_' + ('e' if eyeball else 'l')
# Draw samples from the prior
hist = [[] for k in range(7)]
count = [np.zeros(nsamp) for k in range(7)]
if progress_bar:
wrap = tqdm
else:
wrap = lambda x: x
for n in wrap(range(nsamp)):
# Choose the correct radiance map
if eyeball:
radiancemap = planetplanet.RadiativeEquilibriumMap()
else:
radiancemap = planetplanet.LimbDarkenedMap()
# Instantiate the Trappist-1 system
system = Trappist1(sample = True, nbody = nbody,
quiet = True, radiancemap = radiancemap, **kwargs)
system.settings.timestep = 1. / 24.
# Run!
try:
h = histogram(system, OCTOBER_08_2016, OCTOBER_08_2016 + 365)
except:
print("ERROR in routine `hist.Compute()`")
continue
# Loop over the planets
for k in range(7):
# Count the number of events with SNR higher than `minsnr`
if len(h[k]):
count[k][n] = len(np.where(h[k][:,5] >= minsnr)[0])
# Append to cumulative histogram
hist[k].extend(list(h[k]))
# Convert to numpy arrays
for k in range(7):
hist[k] = np.array(hist[k])
# Save
n = 0
while os.path.exists(os.path.join(datapath, '%s%03d.npz' % (name, n))):
n += 1
np.savez(os.path.join(datapath, '%s%03d.npz' % (name, n)),
hist = hist, count = count)
def MergeFiles():
'''
Merge all the `npz` savesets into a single one for faster plotting.
'''
# Loop over both dataset types
for name in ['hist_e', 'hist_l']:
# Load
print("Loading %s..." % name)
for n in tqdm(range(1000)):
if os.path.exists(os.path.join(datapath, '%s%03d.npz' % (name,n))):
data = np.load(os.path.join(datapath, '%s%03d.npz' % (name,n)))
os.remove(os.path.join(datapath, '%s%03d.npz' % (name,n)))
# Skip corrupt files
try:
data['hist'][0]
data['count']
except:
continue
else:
break
if n == 0:
hist = data['hist']
count = data['count']
else:
for k in range(7):
hist[k] = np.vstack((hist[k], data['hist'][k]))
count = np.hstack((count, data['count']))
# Save as one big file
if n > 0:
print("Saving %s..." % name)
np.savez(os.path.join(datapath,'%s%03d.npz' % (name, 0)),
hist = hist, count = count)
def Plot(eyeball = True, zorder = [6,5,4,3,2,1,0]):
'''
Plots the results of a `Compute()` run and returns several figures.
'''
# The dataset name
name = 'hist_' + ('e' if eyeball else 'l')
# Instantiate a dummy system
system = Trappist1(sample = True, nbody = False, quiet = True)
# Load
print("Loading...")
for n in tqdm(range(1000)):
if os.path.exists(os.path.join(datapath, '%s%03d.npz' % (name, n))):
data = np.load(os.path.join(datapath, '%s%03d.npz' % (name, n)))
else:
if n == 0:
raise Exception("Please run `Compute()` first.")
break
if n == 0:
hist = data['hist']
count = data['count']
else:
for k in range(7):
hist[k] = np.vstack((hist[k], data['hist'][k]))
count = np.hstack((count, data['count']))
# For reference, total number of systems instantiated (samples)
nyears = len(count[0])
print("Total number of samples: %d" % nyears)
# For reference, the average number of occultations *per day* is
occ_day = np.sum([hist[n].shape[0]
for n in range(7)]) / count.shape[1] / 365
print("Average number of occultations per day: %.2f" % occ_day)
# I get 1.1 (!) These are occultations at all impact parameters
# and durations, so most are grazing / not really detectable.
# Corner plots
fig_corner = [None for i in range(7)]
for k, planet in enumerate(['b', 'c', 'd', 'e', 'f', 'g', 'h']):
# Probability that there's no SNR > 0.5 occultation of this
# planet in any given year
print("Planet %s P(SNR !> 1.0): %.3f" %
(planet, 1 - np.count_nonzero(count[k]) / count[k].shape[0]))
# The samples for the corner plot
samples = np.vstack((hist[k][:,0], hist[k][:,1], hist[k][:,2], hist[k][:,5])).T
# But first check if we have enough samples
if len(samples) == 0 or (samples.shape[0] <= samples.shape[1]):
fig_corner[k] = pl.figure()
continue
fig_corner[k] = corner.corner(samples, plot_datapoints = False,
range = [(-180,180), (0, 1), (0, 3), (0, 2)],
labels = ["Longitude [deg]",
"Impact parameter",
"Duration [min]",
"SNR"], bins = 30)
for i, ax in enumerate(fig_corner[k].axes):
ax.set_xlabel(ax.get_xlabel(), fontsize = 14, fontweight = 'bold')
ax.set_ylabel(ax.get_ylabel(), fontsize = 14, fontweight = 'bold')
for tick in ax.get_xticklabels() + ax.get_yticklabels():
tick.set_fontsize(12)
for i in [0,4,8,12]:
# IMPORTANT: The `histogram()` method returns the
# **orbital phase angle**, which is
# measured from *transit* (phase = 0 deg). The mean longitude
# is measured from *quadrature*, so there's a 90 deg offset we
# must apply. Order is secondary eclipse, quadrature left,
# transit, quadrature right, secondary eclipse
fig_corner[k].axes[i].set_xticks([-180, -90, 0, 90, 180])
fig_corner[k].axes[12].set_xticklabels([r"$+$90", r"$\pm$180",
r"$-$90", "0", r"$+$90"])
fig_corner[k].axes[8].set_yticks([np.log10(3), 1, np.log10(30), 2,
np.log10(300)])
fig_corner[k].axes[14].set_xticks([np.log10(3), 1, np.log10(30), 2,
np.log10(300)])
fig_corner[k].axes[8].set_yticklabels([3, 10, 30, 100, 300])
fig_corner[k].axes[14].set_xticklabels([3, 10, 30, 100, 300])
fig_corner[k].axes[15].set_xticks([0, 0.5, 1.0, 1.5, 2.0])
fig_corner[k].axes[12].set_yticks([0, 0.5, 1.0, 1.5, 2.0])
# Frequency plot (1)
matplotlib.rcParams['mathtext.fontset'] = 'cm'
fig_snr = pl.figure(figsize = (7, 7))
fig_snr.subplots_adjust(hspace = 0.075)
ax = pl.subplot2grid((1, 1), (0, 0))
for k, planet in enumerate(system.bodies[1:]):
if not len(hist[k]):
continue
snr = hist[k][:,5]
color = planet.color
# Average total SNR / year
tot_snr = np.sqrt(np.sum(snr ** 2) / nyears)
# Average total SNR / year from high SNR (> 1) occultations
tot_snr1 = np.sqrt(np.sum(snr[snr >= 1] ** 2) / nyears)
label = r"$\mathbf{%s}: %.2f\ (%.2f)$" % (planet.name, tot_snr, tot_snr1)
ax.hist(snr, cumulative = -1,
weights = np.ones_like(snr) / len(count[0]),
color = color, edgecolor = 'none',
alpha = 0.5, histtype = 'stepfilled',
bins = 51, range = (0, 5), label = label)
ax.hist(snr, cumulative = -1,
weights = np.ones_like(snr) / len(count[0]),
color = color, histtype = 'step',
lw = 2, bins = 51, range = (0, 5))
ax.set_xlabel('SNR', fontsize = 16, fontweight = 'bold')
ax.set_ylabel(r'Cumulative occultations per year', fontsize = 14,
fontweight = 'bold')
ax.set_yscale('log')
ax.set_ylim(1e-2, 200)
ax.set_xlim(-0.1, 5.1)
ax.yaxis.set_major_formatter(FuncFormatter(lambda x, pos: '%s' % x))
ax.axvline(1.0, color = 'k', lw = 1, alpha = 1, ls = '--')
leg = ax.legend(loc = 'upper right', fontsize = 14,
title = 'SNR / yr')
leg.get_title().set_fontweight('bold')
leg.get_title().set_fontsize(13)
if eyeball:
ax.set_title('Eyeball', fontweight = 'bold', fontsize = 16)
else:
ax.set_title('Uniform', fontweight = 'bold', fontsize = 16)
for tick in ax.get_xticklabels() + ax.get_yticklabels():
tick.set_fontsize(12)
# Frequency plot (2)
fig_hist = pl.figure(figsize = (7, 7))
fig_hist.subplots_adjust(hspace = 0.075)
ax = pl.subplot2grid((5, 1), (1, 0), rowspan = 4)
axt = pl.subplot2grid((5, 1), (0, 0), rowspan = 1,
sharex = ax, zorder = -99)
for k, planet in enumerate(system.bodies[1:]):
# Fit a gaussian
mu, sig = norm.fit(count[k])
mu = '%.1f' % mu
if len(mu) == 3:
label = r"$\mathbf{%s}: \ \ %s \pm %3.1f\ \mathrm{yr}^{-1}$" \
% (planet.name, mu, sig)
else:
label = r"$\mathbf{%s}: %s \pm %3.1f\ \mathrm{yr}^{-1}$" \
% (planet.name, mu, sig)
# Plot!
for axis in [ax, axt]:
axis.hist(count[k], color = planet.color, edgecolor = 'none',
alpha = 0.25, histtype = 'stepfilled', normed = True,
range = (0,55), zorder = zorder[k], label = label,
bins = 56)
axis.hist(count[k], color = planet.color, histtype = 'step',
normed = True, range = (0,55),
zorder = zorder[k] + 0.5, lw = 2, bins = 56)
leg = ax.legend(loc = 'upper right', fontsize = 18,
bbox_to_anchor = (0.89, 0.865),
bbox_transform = fig_hist.transFigure,
title = 'Occultations with SNR > 1')
leg.get_title().set_fontweight('bold')
leg.get_title().set_fontsize(13)
ax.set_xlabel('Occultations per year with SNR > 1', fontsize = 16,
fontweight = 'bold')
ax.set_ylabel('Probability', fontsize = 16, fontweight = 'bold')
ax.yaxis.set_label_coords(-0.15, 0.6)
if eyeball:
axt.set_title('Eyeball', fontweight = 'bold', fontsize = 16)
else:
axt.set_title('Uniform', fontweight = 'bold', fontsize = 16)
for tick in ax.get_xticklabels() + ax.get_yticklabels() \
+ axt.get_yticklabels():
tick.set_fontsize(12)
# HACK: Force a broken axis for the outer planets
if eyeball:
ax.set_ylim(0, 0.42)
axt.set_ylim(0.73, 1.05)
else:
ax.set_ylim(0, 0.21)
axt.set_ylim(0.75, 1.05)
axt.spines['bottom'].set_visible(False)
ax.spines['top'].set_visible(False)
axt.tick_params(bottom='off',labelbottom='off')
if eyeball:
axt.set_yticks([0.8, 1.0])
axt.set_yticklabels(["0.80", "1.00"])
else:
axt.set_yticks([0.85, 1.0])
axt.set_yticklabels(["0.850", "1.000"])
d = .015
kwargs = dict(transform=axt.transAxes, color='k', clip_on=False, lw = 1)
axt.plot((-d, +d), (-d, +d), **kwargs)
axt.plot((1 - d, 1 + d), (-d, +d), **kwargs)
kwargs.update(transform=ax.transAxes)
ax.plot((-d, +d), (1 - 0.25 * d, 1 + 0.25 * d), **kwargs)
ax.plot((1 - d, 1 + d), (1 - 0.25 * d, 1 + 0.25 * d), **kwargs)
return fig_corner, fig_snr, fig_hist
def MakeFigures(jpg = False):
'''
Plots all histogram figures for the paper.
'''
if jpg:
ext = 'jpg'
else:
ext = 'pdf'
for kind in ['eyeball', 'limbdark']:
fig_corner, fig_snr, fig_hist = Plot(eyeball = (kind == 'eyeball'))
for k, planet in enumerate(['b', 'c', 'd', 'e', 'f', 'g', 'h']):
fig_corner[k].savefig('%s_corner_%s.%s' % (planet, kind, ext),
bbox_inches = 'tight')
fig_snr.savefig('snr_%s.%s' % (kind, ext), bbox_inches = 'tight')
fig_hist.savefig('hist_%s.%s' % (kind, ext), bbox_inches = 'tight')
pl.close() | rodluger/planetplanet | scripts/hist.py | Python | gpl-3.0 | 27,529 | [
"Gaussian"
] | 03519b7d6d060582b4998747c47857e9d5a7e8392b587afc307ae6d9a1e5fe03 |
"""
Align V and J starting from an adaptive CSV file
"""
import collections
import contextlib
import csv
import functools
import itertools
import logging
import operator
import tempfile
import pysam
from .. import imgt, util
from . import align_fastq
log = logging.getLogger('vdjalign')
TAG_COUNT = 'XC'
TAG_CDR3_LENGTH = 'XL'
TAG_STATUS = 'XS'
def add_tags(reads, rows):
"""Tags should be the length of reads, in same order"""
reads = itertools.groupby(reads, operator.attrgetter('qname'))
for (name, tags), (g, v) in itertools.izip_longest(rows, reads):
assert name == g, (g, name)
for read in v:
t = read.tags
t.extend(tags.iteritems())
read.tags = t
yield read
def or_none(fn):
@functools.wraps(fn)
def apply_or_none(s):
if s:
return fn(s)
return apply_or_none
def build_parser(p):
p.add_argument('csv_file', type=util.opener('rU'))
p.add_argument('-d', '--delimiter', default='\t',
help="""Delimiter [default: tab]""")
p.add_argument('-c', '--count-column', default='n_sources', help="""Column
containing (integer) counts [default: %(default)s]""")
align_fastq.fill_targets_alignment_options(p)
p.set_defaults(func=action)
def action(a):
with a.csv_file as ifp:
csv_lines = (i for i in ifp if not i.startswith('#'))
r = csv.DictReader(csv_lines, delimiter=a.delimiter)
int_or_none = or_none(int)
Row = collections.namedtuple('Row', ['name', 'sequence', 'v_index',
'j_index', 'tags'])
log.info('Loading sequences.')
rows = (Row(name=row.get('name', str(i)),
sequence=row['nucleotide'],
v_index=int_or_none(row.get('vIndex')),
j_index=int_or_none(row.get('jIndex')),
tags={TAG_COUNT: int_or_none(row[a.count_column]),
TAG_CDR3_LENGTH: int_or_none(row['cdr3Length']),
TAG_STATUS: int(row['sequenceStatus'])})
for i, row in enumerate(r))
sequences = []
tags = []
for i in rows:
sequences.append((i.name, i.sequence))
tags.append((i.name, i.tags))
log.info('Done: read %d', len(sequences))
align = functools.partial(align_fastq.sw_to_bam, n_threads=a.threads,
read_group=a.read_group,
match=a.match,
mismatch=a.mismatch,
gap_open=a.gap_open,
gap_extend=a.gap_extend,
min_score=a.min_score,
bandwidth=a.bandwidth,
max_drop=a.max_drop)
closing = contextlib.closing
with imgt.temp_fasta(a.locus, 'v', a.v_subset) as vf, \
imgt.temp_fasta(a.locus, 'j', a.j_subset) as jf, \
util.with_if(a.locus == 'IGH', imgt.temp_fasta, a.locus, 'd',
collection=a.d_subset) as df, \
tempfile.NamedTemporaryFile(suffix='.bam') as tbam, \
tempfile.NamedTemporaryFile(suffix='.fasta') as tf:
for name, sequence in sequences:
tf.write('>{0}\n{1}\n'.format(name, sequence))
tf.flush()
ex_refs = [i for i in [df, jf] if i is not None]
align(vf, tf.name, tbam.name, extra_ref_paths=ex_refs)
with closing(pysam.Samfile(tbam.name, 'rb')) as in_sam, \
closing(pysam.Samfile(a.out_bamfile, 'wb', template=in_sam)) as out_sam:
for read in add_tags(in_sam, tags):
out_sam.write(read)
| cmccoy/ighutil | python/vdjalign/subcommands/adaptive_tsv.py | Python | gpl-3.0 | 3,850 | [
"pysam"
] | 200a9c6ab4c2bb9d7f7cc473436ae8fbc49ba1c8dcc2bcad6b3ba87bf098111c |
# -*- coding: utf-8 -*-
#
# test_stdp_triplet_synapse.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# This script tests the stdp_triplet_synapse in NEST.
import nest
import unittest
from math import exp
import numpy as np
@nest.ll_api.check_stack
class STDPTripletSynapseTestCase(unittest.TestCase):
"""Check stdp_triplet_synapse model properties."""
def setUp(self):
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
# settings
self.dendritic_delay = 1.0
self.decay_duration = 5.0
self.synapse_model = "stdp_triplet_synapse"
self.syn_spec = {
"synapse_model": self.synapse_model,
"delay": self.dendritic_delay,
# set receptor 1 postsynaptically, to not generate extra spikes
"receptor_type": 1,
"weight": 5.0,
"tau_plus": 16.8,
"tau_plus_triplet": 101.0,
"Aplus": 0.1,
"Aminus": 0.1,
"Aplus_triplet": 0.1,
"Aminus_triplet": 0.1,
"Kplus": 0.0,
"Kplus_triplet": 0.0,
"Wmax": 100.0,
}
self.post_neuron_params = {
"tau_minus": 33.7,
"tau_minus_triplet": 125.0,
}
# setup basic circuit
self.pre_neuron = nest.Create("parrot_neuron")
self.post_neuron = nest.Create(
"parrot_neuron", 1, params=self.post_neuron_params)
nest.Connect(self.pre_neuron, self.post_neuron, syn_spec=self.syn_spec)
def generateSpikes(self, neuron, times):
"""Trigger spike to given neuron at specified times."""
delay = 1.
gen = nest.Create("spike_generator", 1, {
"spike_times": [t - delay for t in times]})
nest.Connect(gen, neuron, syn_spec={"delay": delay})
def status(self, which):
"""Get synapse parameter status."""
stats = nest.GetConnections(
self.pre_neuron, synapse_model=self.synapse_model)
return stats.get(which) if len(stats) == 1 else stats.get(which)[0]
def decay(self, time, Kplus, Kplus_triplet, Kminus, Kminus_triplet):
"""Decay variables."""
Kplus *= exp(-time / self.syn_spec["tau_plus"])
Kplus_triplet *= exp(-time / self.syn_spec["tau_plus_triplet"])
Kminus *= exp(-time / self.post_neuron_params["tau_minus"])
Kminus_triplet *= exp(-time /
self.post_neuron_params["tau_minus_triplet"])
return (Kplus, Kplus_triplet, Kminus, Kminus_triplet)
def facilitate(self, w, Kplus, Kminus_triplet):
"""Facilitate weight."""
Wmax = self.status("Wmax")
return np.sign(Wmax) * (abs(w) + Kplus * (
self.syn_spec["Aplus"] +
self.syn_spec["Aplus_triplet"] * Kminus_triplet)
)
def depress(self, w, Kminus, Kplus_triplet):
"""Depress weight."""
Wmax = self.status("Wmax")
return np.sign(Wmax) * (abs(w) - Kminus * (
self.syn_spec["Aminus"] +
self.syn_spec["Aminus_triplet"] * Kplus_triplet)
)
def assertAlmostEqualDetailed(self, expected, given, message):
"""Improve assetAlmostEqual with detailed message."""
messageWithValues = "%s (expected: `%s` was: `%s`" % (
message, str(expected), str(given))
self.assertAlmostEqual(given, expected, msg=messageWithValues)
def test_badPropertiesSetupsThrowExceptions(self):
"""Check that exceptions are thrown when setting bad parameters."""
def setupProperty(property):
bad_syn_spec = self.syn_spec.copy()
bad_syn_spec.update(property)
nest.Connect(self.pre_neuron, self.post_neuron, syn_spec=bad_syn_spec)
def badPropertyWith(content, parameters):
msg = "BadProperty(.+)" + content
self.assertRaisesRegex(nest.kernel.NESTError, msg, setupProperty, parameters)
badPropertyWith("Kplus", {"Kplus": -1.0})
badPropertyWith("Kplus_triplet", {"Kplus_triplet": -1.0})
def test_varsZeroAtStart(self):
"""Check that pre- and postsynaptic variables are zero at start."""
self.assertAlmostEqualDetailed(
0.0, self.status("Kplus"), "Kplus should be zero")
self.assertAlmostEqualDetailed(0.0, self.status(
"Kplus_triplet"), "Kplus_triplet should be zero")
def test_preVarsIncreaseWithPreSpike(self):
"""Check that pre-synaptic variables (Kplus, Kplus_triplet) increase
after each pre-synaptic spike."""
self.generateSpikes(self.pre_neuron, [2.0])
Kplus = self.status("Kplus")
Kplus_triplet = self.status("Kplus_triplet")
nest.Simulate(20.0)
self.assertAlmostEqualDetailed(
Kplus + 1.0,
self.status("Kplus"),
"Kplus should have increased by 1")
self.assertAlmostEqualDetailed(
Kplus_triplet + 1.0,
self.status("Kplus_triplet"),
"Kplus_triplet should have increased by 1")
def test_preVarsDecayAfterPreSpike(self):
"""Check that pre-synaptic variables (Kplus, Kplus_triplet) decay
after each pre-synaptic spike."""
self.generateSpikes(self.pre_neuron, [2.0])
# trigger computation
self.generateSpikes(self.pre_neuron, [2.0 + self.decay_duration])
(Kplus, Kplus_triplet, _, _) = self.decay(
self.decay_duration, 1.0, 1.0, 0.0, 0.0)
Kplus += 1.0
Kplus_triplet += 1.0
nest.Simulate(20.0)
self.assertAlmostEqualDetailed(
Kplus, self.status("Kplus"), "Kplus should have decay")
self.assertAlmostEqualDetailed(Kplus_triplet, self.status(
"Kplus_triplet"), "Kplus_triplet should have decay")
def test_preVarsDecayAfterPostSpike(self):
"""Check that pre-synaptic variables (Kplus, Kplus_triplet) decay
after each postsynaptic spike."""
self.generateSpikes(self.pre_neuron, [2.0])
self.generateSpikes(self.post_neuron, [3.0, 4.0])
# trigger computation
self.generateSpikes(self.pre_neuron, [2.0 + self.decay_duration])
(Kplus, Kplus_triplet, _, _) = self.decay(
self.decay_duration, 1.0, 1.0, 0.0, 0.0)
Kplus += 1.0
Kplus_triplet += 1.0
nest.Simulate(20.0)
self.assertAlmostEqualDetailed(
Kplus, self.status("Kplus"), "Kplus should have decay")
self.assertAlmostEqualDetailed(Kplus_triplet, self.status(
"Kplus_triplet"), "Kplus_triplet should have decay")
def test_weightChangeWhenPrePostSpikes(self):
"""Check that weight changes whenever a pre-post spike pair happen."""
self.generateSpikes(self.pre_neuron, [2.0])
self.generateSpikes(self.post_neuron, [4.0])
self.generateSpikes(self.pre_neuron, [6.0]) # trigger computation
Kplus = self.status("Kplus")
Kplus_triplet = self.status("Kplus_triplet")
Kminus = 0.0
Kminus_triplet = 0.0
weight = self.status("weight")
Wmax = self.status("Wmax")
(Kplus, Kplus_triplet, Kminus, Kminus_triplet) = self.decay(
2.0, Kplus, Kplus_triplet, Kminus, Kminus_triplet)
weight = self.depress(weight, Kminus, Kplus_triplet)
Kplus += 1.0
Kplus_triplet += 1.0
(Kplus, Kplus_triplet, Kminus, Kminus_triplet) = self.decay(
2.0 + self.dendritic_delay, Kplus, Kplus_triplet,
Kminus, Kminus_triplet
)
weight = self.facilitate(weight, Kplus, Kminus_triplet)
Kminus += 1.0
Kminus_triplet += 1.0
(Kplus, Kplus_triplet, Kminus, Kminus_triplet) = self.decay(
2.0 - self.dendritic_delay, Kplus, Kplus_triplet,
Kminus, Kminus_triplet
)
weight = self.depress(weight, Kminus, Kplus_triplet)
nest.Simulate(20.0)
self.assertAlmostEqualDetailed(weight, self.status(
"weight"), "weight should have decreased")
def test_weightChangeWhenPrePostPreSpikes(self):
"""Check that weight changes whenever a pre-post-pre spike triplet
happen."""
self.generateSpikes(self.pre_neuron, [2.0, 6.0])
self.generateSpikes(self.post_neuron, [4.0])
self.generateSpikes(self.pre_neuron, [8.0]) # trigger computation
Kplus = self.status("Kplus")
Kplus_triplet = self.status("Kplus_triplet")
Kminus = 0.0
Kminus_triplet = 0.0
weight = self.status("weight")
Wmax = self.status("Wmax")
(Kplus, Kplus_triplet, Kminus, Kminus_triplet) = self.decay(
2.0, Kplus, Kplus_triplet, Kminus, Kminus_triplet)
weight = self.depress(weight, Kminus, Kplus_triplet)
Kplus += 1.0
Kplus_triplet += 1.0
(Kplus, Kplus_triplet, Kminus, Kminus_triplet) = self.decay(
2.0 + self.dendritic_delay, Kplus, Kplus_triplet,
Kminus, Kminus_triplet
)
weight = self.facilitate(weight, Kplus, Kminus_triplet)
Kminus += 1.0
Kminus_triplet += 1.0
(Kplus, Kplus_triplet, Kminus, Kminus_triplet) = self.decay(
2.0 - self.dendritic_delay, Kplus, Kplus_triplet,
Kminus, Kminus_triplet
)
weight = self.depress(weight, Kminus, Kplus_triplet)
Kplus += 1.0
Kplus_triplet += 1.0
(Kplus, Kplus_triplet, Kminus, Kminus_triplet) = self.decay(
2.0, Kplus, Kplus_triplet, Kminus, Kminus_triplet)
weight = self.depress(weight, Kminus, Kplus_triplet)
nest.Simulate(20.0)
self.assertAlmostEqualDetailed(weight, self.status(
"weight"), "weight should have decreased")
def test_maxWeightStaturatesWeight(self):
"""Check that setting maximum weight property keep weight limited."""
limited_weight = self.status("weight") + 1e-10
limited_syn_spec = self.syn_spec.copy()
limited_syn_spec.update({"Wmax": limited_weight})
nest.Connect(self.pre_neuron, self.post_neuron,
syn_spec=limited_syn_spec)
self.generateSpikes(self.pre_neuron, [2.0])
self.generateSpikes(self.pre_neuron, [3.0]) # trigger computation
nest.Simulate(20.0)
print(limited_weight)
print(self.status('weight'))
self.assertAlmostEqualDetailed(limited_weight, self.status(
"weight"), "weight should have been limited")
@nest.ll_api.check_stack
class STDPTripletInhTestCase(STDPTripletSynapseTestCase):
def setUp(self):
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
# settings
self.dendritic_delay = 1.0
self.decay_duration = 5.0
self.synapse_model = "stdp_triplet_synapse"
self.syn_spec = {
"synapse_model": self.synapse_model,
"delay": self.dendritic_delay,
# set receptor 1 postsynaptically, to not generate extra spikes
"receptor_type": 1,
"weight": -5.0,
"tau_plus": 16.8,
"tau_plus_triplet": 101.0,
"Aplus": 0.1,
"Aminus": 0.1,
"Aplus_triplet": 0.1,
"Aminus_triplet": 0.1,
"Kplus": 0.0,
"Kplus_triplet": 0.0,
"Wmax": -100.0,
}
self.post_neuron_params = {
"tau_minus": 33.7,
"tau_minus_triplet": 125.0,
}
# setup basic circuit
self.pre_neuron = nest.Create("parrot_neuron")
self.post_neuron = nest.Create("parrot_neuron", 1,
params=self.post_neuron_params)
nest.Connect(self.pre_neuron, self.post_neuron, syn_spec=self.syn_spec)
def suite_inh():
return unittest.makeSuite(STDPTripletInhTestCase, "test")
def suite():
return unittest.makeSuite(STDPTripletSynapseTestCase, "test")
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
runner.run(suite_inh())
if __name__ == "__main__":
run()
| nest/nest-simulator | testsuite/pytests/test_stdp_triplet_synapse.py | Python | gpl-2.0 | 12,739 | [
"NEURON"
] | 746c818ba3f3e685f60673bfd209254e8b4b8215bdc9302014b8c621d7773572 |
"""
MultiRenderWidget
:Authors:
Berend Klein Haneveld
"""
from vtk import vtkOpenGLGPUMultiVolumeRayCastMapper
from vtk import vtkRenderer
from vtk import vtkInteractorStyleTrackballCamera
from vtk import vtkImagePlaneWidget
from vtk import vtkVolume
from vtk import vtkImageData
from vtk import vtkColorTransferFunction
from vtk import vtkPiecewiseFunction
from vtk import vtkVolumeProperty
from vtk import VTK_FLOAT
from PySide.QtGui import QWidget
from PySide.QtGui import QGridLayout
from PySide.QtCore import Signal
from PySide.QtCore import Slot
from ui.transformations import TransformationList
from ui.transformations import ClippingBox
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from core.vtkDrawing import CreateBounds
from core.vtkDrawing import CreateOrientationGrid
class MultiRenderWidget(QWidget):
"""
MultiRenderWidget is a widget that can display two datasets: fixed and
moving dataset.
It uses the given volume property to derive how the volumes should be
displayed. This widget also has its own controls that define how the
volumes from the other widgets will be mixed into one visualization.
The hard thing is to find out how to share volumes / volume properties /
resources between widgets while still being linked together. So for
instance when a volume is clipped in one of the single views it should
be immediately visible in this widget. And the problem with the volume
properties is that the volume property for this widget should be linked
to the other widgets so that when they update their volume properties, this
volume property will also be updated. But it can't be the same...
There can be a few visualization modes:
* 'simple' mix mode
* colorized mix mode
Simple mix mode is a mode that displays both datasets in the same way as
they are visualized in the other views. Two controls are given to provide
a way of setting the opacity of both volumes so that the user can mix the
datasets to a nice visualization.
Colorized mix mode makes grayscale visualizations of the
"""
dataChanged = Signal()
updated = Signal()
def __init__(self):
super(MultiRenderWidget, self).__init__()
# Default volume renderer
self.renderer = vtkRenderer()
self.renderer.SetBackground2(0.4, 0.4, 0.4)
self.renderer.SetBackground(0.1, 0.1, 0.1)
self.renderer.SetGradientBackground(True)
self.renderer.SetInteractive(1)
self.renderer.SetLayer(0)
# Overlay renderer which is synced with the default renderer
self.rendererOverlay = vtkRenderer()
self.rendererOverlay.SetLayer(1)
self.rendererOverlay.SetInteractive(0)
self.renderer.GetActiveCamera().AddObserver("ModifiedEvent", self._syncCameras)
self.rwi = QVTKRenderWindowInteractor(parent=self)
self.rwi.SetInteractorStyle(vtkInteractorStyleTrackballCamera())
self.rwi.GetRenderWindow().AddRenderer(self.renderer)
self.rwi.GetRenderWindow().AddRenderer(self.rendererOverlay)
self.rwi.GetRenderWindow().SetNumberOfLayers(2)
self.rwi.SetDesiredUpdateRate(0)
self._imagePlaneWidgets = [vtkImagePlaneWidget() for i in range(3)]
for index in range(3):
self._imagePlaneWidgets[index].DisplayTextOn()
self._imagePlaneWidgets[index].SetInteractor(self.rwi)
self.mapper = vtkOpenGLGPUMultiVolumeRayCastMapper()
self.mapper.SetBlendModeToComposite()
self.volume = vtkVolume()
self.volume.SetMapper(self.mapper)
self.renderer.AddViewProp(self.volume)
self.fixedGridItems = []
self.movingGridItems = []
self.orientationGridItems = []
# Create two empty datasets
self.fixedImageData = CreateEmptyImageData()
self.movingImageData = CreateEmptyImageData()
self.fixedVolumeProperty = vtkVolumeProperty()
self.movingVolumeProperty = vtkVolumeProperty()
color, opacityFunction = CreateEmptyFunctions()
self.fixedVolumeProperty.SetColor(color)
self.fixedVolumeProperty.SetScalarOpacity(opacityFunction)
self.movingVolumeProperty.SetColor(color)
self.movingVolumeProperty.SetScalarOpacity(opacityFunction)
self.visualization = None # MultiVolumeVisualization
self.clippingBox = ClippingBox()
self.clippingBox.setWidget(self)
self.mapper.SetInputData(0, self.fixedImageData)
self.mapper.SetInputData(1, self.movingImageData)
self._transformations = TransformationList()
self._transformations.transformationChanged.connect(self.updateTransformation)
self._shouldResetCamera = False
self.setMinimumWidth(340)
self.setMinimumHeight(340)
layout = QGridLayout(self)
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.rwi, 0, 0)
self.setLayout(layout)
def render(self):
if self._shouldResetCamera:
self.renderer.ResetCamera()
self._shouldResetCamera = False
self.rwi.Render()
# Prevent warning messages on OSX by not asking to render
# when the render window has never rendered before
if not self.rwi.GetRenderWindow().GetNeverRendered():
self.rwi.GetRenderWindow().Render()
@Slot(object)
def setFixedData(self, imageData):
self._cleanUpGrids()
self.fixedImageData = imageData
if self.fixedImageData is None:
self.fixedImageData = CreateEmptyImageData()
if self.movingImageData is None:
self.movingImageData = CreateEmptyImageData()
self.mapper.SetInputData(0, self.fixedImageData)
self.mapper.SetInputData(1, self.movingImageData)
for index in range(3):
self._imagePlaneWidgets[index].SetInputData(self.fixedImageData)
self._imagePlaneWidgets[index].SetPlaneOrientation(index)
self._updateGrids()
self._createClippingBox()
self._shouldResetCamera = True
@Slot(object)
def setMovingData(self, imageData):
self._cleanUpGrids()
self.movingImageData = imageData
if self.movingImageData is None:
self.movingImageData = CreateEmptyImageData()
if self.fixedImageData is None:
self.fixedImageData = CreateEmptyImageData()
self.mapper.SetInputData(0, self.fixedImageData)
self.mapper.SetInputData(1, self.movingImageData)
self._updateGrids()
self._shouldResetCamera = True
def setVolumeVisualization(self, visualization):
self.visualization = visualization
if self.visualization is None:
color, opacityFunction = CreateEmptyFunctions()
self.fixedVolumeProperty = vtkVolumeProperty()
self.fixedVolumeProperty.SetColor(color)
self.fixedVolumeProperty.SetScalarOpacity(opacityFunction)
self.movingVolumeProperty = vtkVolumeProperty()
self.movingVolumeProperty.SetColor(color)
self.movingVolumeProperty.SetScalarOpacity(opacityFunction)
else:
self.fixedVolumeProperty = self.visualization.fixedVolProp
self.movingVolumeProperty = self.visualization.movingVolProp
self.visualization.setMapper(self.mapper)
if self.visualization.fixedVisualization:
self._updateMapper(self.visualization.fixedVisualization, 1)
if self.visualization.movingVisualization:
self._updateMapper(self.visualization.movingVisualization, 2)
self._updateVolumeProperties()
def _updateGrids(self):
if not self._hasImageData():
return
if self._hasMovingImageData():
self.movingGridItems = CreateBounds(self.movingImageData.GetBounds())
boundsFixed = self.fixedImageData.GetBounds()
boundsMoving = self.movingImageData.GetBounds()
maxBounds = map(lambda x, y: max(x, y), boundsFixed, boundsMoving)
self.orientationGridItems = CreateOrientationGrid(maxBounds, self.renderer.GetActiveCamera())
for item in (self.movingGridItems + self.fixedGridItems + self.orientationGridItems):
self.renderer.AddViewProp(item)
def _cleanUpGrids(self):
for item in (self.fixedGridItems + self.movingGridItems + self.orientationGridItems):
self.renderer.RemoveViewProp(item)
self.fixedGridItems = []
self.movingGridItems = []
self.orientationGridItems = []
def _createClippingBox(self):
if not self._hasImageData():
self.clippingBox.showClippingBox(False)
else:
if self._hasFixedImageData():
self.clippingBox.setImageData(self.fixedImageData)
elif self._hasMovingImageData():
self.clippingBox.setImageData(self.movingImageData)
else:
self.clippingBox.enable(False)
def _hasImageData(self):
return self._hasFixedImageData() or self._hasMovingImageData()
def _hasFixedImageData(self):
return self._isActualImageData(self.fixedImageData)
def _hasMovingImageData(self):
return self._isActualImageData(self.movingImageData)
def _isActualImageData(self, imageData):
dimensions = imageData.GetDimensions()
return dimensions != (3, 3, 3)
# Properties
@property
def transformations(self):
return self._transformations
@transformations.setter
def transformations(self, value):
self._transformations.copyFromTransformations(value)
# Slots
@Slot(object)
def setSlices(self, slices):
for sliceIndex in range(len(slices)):
if slices[sliceIndex]:
self._imagePlaneWidgets[sliceIndex].On()
else:
self._imagePlaneWidgets[sliceIndex].Off()
def showClippingBox(self, show):
self.clippingBox.showClippingBox(show)
self.render()
def showClippingPlanes(self, show):
self.clippingBox.showClippingPlanes(show)
self.render()
def resetClippingBox(self):
self.clippingBox.resetClippingBox()
self.render()
@Slot()
def updateTransformation(self):
transform = self._transformations.completeTransform()
self.mapper.SetSecondInputUserTransform(transform)
for item in self.movingGridItems:
item.SetUserTransform(transform)
self.render()
# Private methods
def _updateMapper(self, volVis, volNr):
shaderType = volVis.shaderType()
if volNr == 1:
self.mapper.SetShaderType1(shaderType)
if shaderType == 2: # MIDA
lowerBound = (volVis.lowerBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
upperBound = (volVis.upperBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
self.mapper.SetLowerBound1(lowerBound)
self.mapper.SetUpperBound1(upperBound)
self.mapper.SetBrightness1(volVis.brightness / 100.0)
if shaderType == 1: # MIP
lowerBound = (volVis.lowerBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
upperBound = (volVis.upperBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
self.mapper.SetLowerBound1(lowerBound)
self.mapper.SetUpperBound1(upperBound)
else:
self.mapper.SetShaderType2(shaderType)
if shaderType == 2: # MIDA
lowerBound = (volVis.lowerBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
upperBound = (volVis.upperBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
self.mapper.SetLowerBound2(lowerBound)
self.mapper.SetUpperBound2(upperBound)
self.mapper.SetBrightness2(volVis.brightness / 100.0)
if shaderType == 1: # MIP
lowerBound = (volVis.lowerBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
upperBound = (volVis.upperBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
self.mapper.SetLowerBound2(lowerBound)
self.mapper.SetUpperBound2(upperBound)
def _updateVolumeProperties(self):
"""
Private method to update the volume properties.
"""
if self.volume.GetProperty() != self.fixedVolumeProperty:
self.volume.SetProperty(self.fixedVolumeProperty)
if self.mapper.GetProperty2() != self.movingVolumeProperty:
self.mapper.SetProperty2(self.movingVolumeProperty)
self.render()
def _syncCameras(self, camera, ev):
"""
Camera modified event callback. Copies the parameters of
the renderer camera into the camera of the overlay so they
stay synced at all times.
"""
self.rendererOverlay.GetActiveCamera().ShallowCopy(camera)
# Helper methods
def CreateEmptyImageData():
"""
Create an empty image data object. The multi volume mapper expects two
inputs, so if there is only one dataset loaded, a dummy dataset can be
created using this method. Be sure to also set a dummy volume property
(CreateVolumeVisualizationInvisible) so that the volume does not show up in
the renderer.
:rtype: vtkImageData
"""
dimensions = [3, 3, 3]
imageData = vtkImageData()
imageData.Initialize()
imageData.SetDimensions(dimensions)
imageData.SetSpacing(1, 1, 1)
imageData.SetOrigin(10, 10, 0)
imageData.AllocateScalars(VTK_FLOAT, 1)
for z in xrange(0, dimensions[2]-1):
for y in xrange(0, dimensions[1]-1):
for x in xrange(0, dimensions[0]-1):
imageData.SetScalarComponentFromDouble(x, y, z, 0, 0.0)
return imageData
def CreateEmptyFunctions():
"""
:rtype: vtkColorTransferFunction, vtkPiecewiseFunction
"""
# Transfer functions and properties
colorFunction = vtkColorTransferFunction()
colorFunction.AddRGBPoint(-1000, 0.0, 0.0, 0.0)
colorFunction.AddRGBPoint(1000, 0.0, 0.0, 0.0)
opacityFunction = vtkPiecewiseFunction()
opacityFunction.AddPoint(-1000, 0.0)
opacityFunction.AddPoint(1000, 0.0)
return colorFunction, opacityFunction
| berendkleinhaneveld/Registrationshop | ui/widgets/MultiRenderWidget.py | Python | mit | 12,737 | [
"VTK"
] | aa88733036368474152fa995b916ca01a0c4f9de59b9b91fe8abe4b937b409ce |
# -*- coding: utf-8 -*-
import os
import moojoos as mj
import numpy as np
from pylab import *
from PIL import Image
from scipy.ndimage import filters
# target files for comparison
files = [ 'rena_sharp.jpg', 'rena_gaussian_2.jpg' ]
# targetting rena's lips?
r = 260
cd = os.path.dirname(os.path.abspath(__file__))
# gray-scaled images
test1 = np.array(Image.open(os.path.join(cd, files[0])).convert('L'))
test2 = np.array(Image.open(os.path.join(cd, files[1])).convert('L'))
# median-filtered: does noise-reduction help later calculations???
t1_med = filters.median_filter(test1, 3)
t2_med = filters.median_filter(test2, 3)
# Sobel filters
t1_sob, t1_sobm = np.zeros(test1.shape), np.zeros(test1.shape)
t2_sob, t2_sobm = np.zeros(test2.shape), np.zeros(test2.shape)
filters.sobel(test1, 1, t1_sob) # Sobel, vert. on orig. gray-scale image
filters.sobel(test2, 1, t2_sob)
filters.sobel(t1_med, 1, t1_sobm) # Sobel, vert. on median-filtered image
filters.sobel(t2_med, 1, t2_sobm)
# Edge markers @ r
t1s_mx, t1s_mn = mj.signal.peak_detect(t1_sob[r], lookahead=1, minpeak=4.0)
t1s_peaks = list(set(t1s_mx[0]+t1s_mn[0]))
t1sm_mx, t1sm_mn = mj.signal.peak_detect(t1_sobm[r], lookahead=1, minpeak=4.0)
t1sm_peaks = list(set(t1sm_mx[0]+t1sm_mn[0]))
t2s_mx, t2s_mn = mj.signal.peak_detect(t2_sob[r], lookahead=1, minpeak=4.0)
t2s_peaks = list(set(t2s_mx[0]+t2s_mn[0]))
t2sm_mx, t2sm_mn = mj.signal.peak_detect(t2_sobm[r], lookahead=1, minpeak=4.0)
t2sm_peaks = list(set(t2sm_mx[0]+t2sm_mn[0]))
figure()
gray()
###############################################################################
subplot(4,3,1)
title('rena, sharp')
imshow(test1)
axis('off')
subplot(4,3,2)
imshow(t1_sob)
xlim(0,test1.shape[1])
ylim(test1.shape[0],0)
xticks(range(0,350,100))
for p in t1s_peaks:
s,e = mj.signal.find_edge_startend(t1_sob[r], p)
plot(s, r, 'b+')
plot(e, r, 'g+')
subplot(4,3,3)
ylim(-600,1000)
plot(range(len(test1[r])), test1[r])
plot(range(len(t1_sob[r])), t1_sob[r])
axhline(y=0, color='green', ls='--')
plot(t1s_peaks, t1_sob[r][t1s_peaks], 'r+')
###############################################################################
subplot(4,3,4)
title('rena, sharp - med.filt.')
imshow(t1_med)
axis('off')
subplot(4,3,5)
imshow(t1_sobm)
xlim(0,t1_sobm.shape[1])
ylim(t1_sobm.shape[0],0)
xticks(range(0,350,100))
for p in t1sm_peaks:
s,e = mj.signal.find_edge_startend(t1_sobm[r], p)
plot(s, r, 'b+')
plot(e, r, 'g+')
subplot(4,3,6)
ylim(-600,1000)
plot(range(len(test1[r])), test1[r])
plot(range(len(t1_sobm[r])), t1_sobm[r])
axhline(y=0, color='green', ls='--')
plot(t1sm_peaks, t1_sobm[r][t1sm_peaks], 'r+')
###############################################################################
subplot(4,3,7)
title('rena, blurred')
imshow(test2)
axis('off')
subplot(4,3,8)
imshow(t2_sob)
xlim(0,t2_sob.shape[1])
ylim(t2_sob.shape[0],0)
xticks(range(0,350,100))
for p in t2s_peaks:
s,e = mj.signal.find_edge_startend(t2_sob[r], p)
plot(s, r, 'b+')
plot(e, r, 'g+')
subplot(4,3,9)
ylim(-600,1000)
plot(range(len(test2[r])), test2[r])
plot(range(len(t2_sob[r])), t2_sob[r])
axhline(y=0, color='green', ls='--')
plot(t2s_peaks, t2_sob[r][t2s_peaks], 'r+')
###############################################################################
subplot(4,3,10)
title('rena, blurred - med.filt.')
imshow(t2_med)
axis('off')
subplot(4,3,11)
imshow(t2_sobm)
xlim(0,test2.shape[1])
ylim(test2.shape[0],0)
xticks(range(0,350,100))
for p in t2sm_peaks:
s,e = mj.signal.find_edge_startend(t2_sobm[r], p)
plot(s, r, 'b+')
plot(e, r, 'g+')
subplot(4,3,12)
ylim(-600,1000)
plot(range(len(test2[r])), test2[r])
plot(range(len(t2_sobm[r])), t2_sobm[r])
axhline(y=0, color='green', ls='--')
plot(t2sm_peaks, t2_sobm[r][t2sm_peaks], 'r+')
#
#imx0 = zeros(imr.shape)
#filters.sobel(imr, 1, imx0)
#
#imx2 = zeros(imr.shape)
#filters.sobel(img2, 1, imx2)
#
#imx3 = zeros(imr.shape)
#filters.sobel(img3, 1, imx3)
#
#imx4 = zeros(imr.shape)
#filters.sobel(img4, 1, imx4)
#
#subplot(241)
#imshow(imx0)
#title('Untouched')
#subplot(245)
#plot(range(len(imr[r])), imr[r])
#plot(range(len(imx0[r])), imx0[r])
#
#subplot(242)
#imshow(imx2)
#title(r'Gaussian, $\sigma=2$')
#subplot(246)
#plot(range(len(img2[r])), img2[r])
#plot(range(len(imx2[r])), imx2[r])
#
#subplot(243)
#imshow(imx3)
#title(r'Gaussian, $\sigma=3$')
#subplot(247)
#plot(range(len(img3[r])), img3[r])
#plot(range(len(imx3[r])), imx3[r])
#
#subplot(244)
#imshow(imx4)
#title(r'Gaussian, $\sigma=4$')
#subplot(248)
#plot(range(len(img4[r])), img4[r])
#plot(range(len(imx4[r])), imx4[r])
#
show() | buruzaemon/moojoos | src/blur_estimate.py | Python | bsd-2-clause | 4,805 | [
"Gaussian"
] | 524e74206aaddc27a2db04d663c15cf44f2fac46f39fb2ed4de47d8c328e43a9 |
"""
Objects with No values
"""
from galaxy.datatypes.metadata import MetadataCollection
from galaxy.datatypes.registry import Registry
class RecursiveNone:
def __str__( self ):
return "None"
def __repr__( self ):
return str( self )
def __getattr__( self, name ):
value = RecursiveNone()
setattr( self, name, value )
return value
def __nonzero__( self ):
return False
class NoneDataset( RecursiveNone ):
def __init__( self, datatypes_registry = None, ext = 'data', dbkey = '?' ):
self.ext = self.extension = ext
self.dbkey = dbkey
if datatypes_registry is None:
# Default Value Required for unit tests
datatypes_registry = Registry()
datatypes_registry.load_datatypes()
self.datatype = datatypes_registry.get_datatype_by_extension( ext )
self._metadata = None
self.metadata = MetadataCollection( self )
def __getattr__( self, name ):
return "None"
def missing_meta( self ):
return False
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/util/none_like.py | Python | gpl-3.0 | 1,064 | [
"Galaxy"
] | 22c715c634511eeb1d5594fba86139cd7c03e2d49c5a68369c734fbeafe81636 |
import time
import sys
import os
import numpy
import math
from past.utils import old_div
try:
import cosmology
import extras
import astrometry
import aux
except ImportError:
sys.path.append(f'{os.environ["HOME"]}/Projects/'
'planckClusters/MOSAICpipe/pipe_utils')
import cosmology
import extras
import astrometry
import aux
# get the utils from the parent directory
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils import (p_BCG, mklogarray, histo, bin_data, color)
sout = sys.stderr
land = numpy.logical_and
lor = numpy.logical_or
##################################################################
# Define the sub-sample for BCGs candidates around a position
##################################################################
def get_BCG_candidates(self, Mr_limit=-22.71, p_lim=1e-4):
t0 = time.time()
sout.write("# Computing p_BCG probabilities...\n")
# The Abs mag limit @ z=0.1 in the i-band
Mi_limit = cosmology.reobs(
'El_Benitez2003',
m=Mr_limit,
oldfilter="r_MOSAICII",
newfilter="i_MOSAICII")
# Evaluate the genertic mask for BCG only onece
if not self.BCG_probs:
# We get the limit at the z_ph of each candidate, corrected by z=0.1
Mr_BCG_limit = Mr_limit + self.ev_r - self.evf['r'](
0.1) # + self.DM_factor
Mi_BCG_limit = Mi_limit + self.ev_i - self.evf['i'](
0.1) # + self.DM_factor
# Evaluate the BCG Probability function, we
# get the limit for each object
self.p = p_BCG(self.Mr, Mr_BCG_limit)
self.BCG_probs = True
i_lim = 25.0
star_lim = self.starlim
p_lim = max(self.p) * 0.8
sout.write("\tAvoiding BCG_prob < %.3f in BGCs\n" % p_lim)
mask_p = numpy.where(self.p >= p_lim, 1, 0)
mask_g = numpy.where(self.g < i_lim + 5, 1, 0)
mask_r = numpy.where(self.r < i_lim + 2, 1, 0)
mask_i = numpy.where(self.i < i_lim, 1, 0)
mask_z = numpy.where(self.z < i_lim + 1, 1, 0)
mask_t = numpy.where(self.type <= 2.0, 1, 0)
# Avoid freakishly bright objects, 2.5 mags brighter than the
# M_BCG_limit
mask_br = numpy.where(self.Mr > Mr_BCG_limit - 2.5, 1, 0)
mask_bi = numpy.where(self.Mi > Mi_BCG_limit - 2.5, 1, 0)
# Put a more strict cut in class_star for bcg candidates
sout.write("\tAvoiding CLASS_STAR > %s in BGCs\n" % star_lim)
mask_star = numpy.where(self.class_star <= star_lim, 1, 0)
# Construct the final mask now
self.mask_BCG = (mask_t * mask_g * mask_r * mask_i * mask_z *
mask_br * mask_bi * mask_p * mask_star)
self.BCG_masked = True
# Model color only once
#self.zx = numpy.arange(0.01, self.zlim, 0.01)
self.gr_model = cosmology.color_z(
sed='El_Benitez2003',
filter_new='g_MOSAICII',
filter_old='r_MOSAICII',
z=self.zx,
calibration='AB')
self.ri_model = cosmology.color_z(
sed='El_Benitez2003',
filter_new='r_MOSAICII',
filter_old='i_MOSAICII',
z=self.zx,
calibration='AB')
self.iz_model = cosmology.color_z(
sed='El_Benitez2003',
filter_new='i_MOSAICII',
filter_old='z_MOSAICII',
z=self.zx,
calibration='AB')
sout.write(" \tDone: %s\n" % extras.elapsed_time_str(t0))
# Select the candidates now
idx = numpy.where(self.mask_BCG == 1)
# And pass up to to class
self.idx_BCG = idx
self.id_BCG = self.id[idx]
self.ra_BCG = self.ra[idx]
self.dec_BCG = self.dec[idx]
self.p_BCG = self.p[idx]
self.z_BCG = self.z_ph[idx]
self.t_BCG = self.type[idx]
self.N_BCG = len(idx[0])
self.Mi_BCG = self.Mi[idx]
self.Mr_BCG = self.Mr[idx]
self.DM_BCG = self.DM[idx] # distance modulus
self.dang_BCG = self.dang[idx] # distance modulus
self.zml_BCG = self.z_ml[idx]
self.tml_BCG = self.t_ml[idx]
self.zb_BCG = self.z_b[idx]
self.tb_BCG = self.t_b[idx]
self.class_BCG = self.class_star[idx]
self.a_BCG = self.a_image[idx]
self.b_BCG = self.b_image[idx]
self.theta_BCG = self.theta[idx]
# r,i-band stuff
self.r_BCG = self.r[idx]
self.i_BCG = self.i[idx]
# Get the 1-sigma intervals
self.z1_BCG = self.z1[idx]
self.z2_BCG = self.z2[idx]
# The r-band Luminosity of the BCGs
self.LBCG = self.Lr[idx]
# The distance to the candidate's position for each BCG, in arcmin
if self.N_BCG:
sout.write(color("\tFound %s BCG candidates\n" % self.N_BCG, 36, 1))
else:
sout.write(color("\tFound %s BCG candidates\n" % self.N_BCG, 31, 5))
return
########################################################
# Modified/updated from find_clusters_ext_auto.py
# Select galaxies around ID galaxy un redshift range
########################################################
def select_members_radius(self, i, Mi_lim=-20.25, radius=500.0, zo=None):
# Width of the redshift shell
dz = self.dz
t0 = time.time()
sout.write("# Selecting Cluster members... Ngal, N200, R200 \n")
# Get the relevant info for ith BCG
ra0 = self.ra[i]
dec0 = self.dec[i]
Mi_BCG = self.Mi[i]
#DM = self.DM[i]
ID_BCG = self.id[i]
if zo:
print("# Will use z:%.3f for cluster -- from user!" % zo)
else:
zo = self.z_ph[i]
print("# Will use z:%.3f for cluster -- from data!" % zo)
# 1 - Select in position around ra0,dec0
# Define radius in degress @ zo
R = radius # in kpc
r = astrometry.kpc2arc(zo, R, self.cosmo) / 3600. # in degrees.
rcore = r / 2.0
dist = astrometry.circle_distance(ra0,
dec0,
self.ra,
self.dec,
units='deg')
mask_R = numpy.where(dist <= r, 1, 0)
mask_rcore = numpy.where(dist <= rcore, 1, 0)
arcmin2Mpc = astrometry.arc2kpc(
zo, 60.0, self.cosmo) / 1000.0 # scale between arcmin and Mpc
# 2 - Select in redshift
z1 = zo - dz
z2 = zo + dz
mask_z = numpy.where(land(self.z_ph >= z1, self.z_ph <= z2), 1, 0)
# 3 - Select in brightness
Mi_lim_zo = Mi_lim + self.evf['i'](zo) - self.evf['i'](0.1)
mask_L1 = numpy.where(self.Mi <= Mi_lim_zo, 1, 0) # Faint cut > 0.4L*
mask_L2 = numpy.where(self.Mi >= Mi_BCG, 1, 0) # Bright cut < L_BCG
# The final selection mask, position x redshift x Luminosity
#idx = numpy.where(mask_R * mask_L1 * mask_L2 * mask_z == 1)[0]
idc = numpy.where(mask_rcore * mask_L1 * mask_L2 * mask_z == 1)[0]
# Shot versions handles
gr = self.gr
ri = self.ri
# Some simple 3-sigma clipping defined using r< rcore
Nsigma = 3.0
loop = 1
converge = False
while not converge:
# The conditions to apply
c1 = numpy.abs(gr[idc] - gr[idc].mean()) > Nsigma * numpy.std(
gr[idc], ddof=1)
c2 = numpy.abs(ri[idc] - ri[idc].mean()) > Nsigma * numpy.std(
ri[idc], ddof=1)
iclip = numpy.where(lor(
c1, c2))[0] # where any of the conditions fails
if len(iclip) > 0:
idc = numpy.delete(idc, iclip) # Removed failed ones
converge = False
else:
converge = True
loop += 1
# Get the average redshift within the core:
#z_cl = self.z_ph[idc].mean()
#z_clrms = self.z_ph[idc].std()
#print(idc)
#print(self.z_ph[idc])
# Compute the weighted average and rms
dz = 0.5 * numpy.abs(self.z2[idc] - self.z1[idc])
# Fix zeros
dz[dz == 0] = 1e-5
z_cl, z_clrms = aux.statsw(self.z_ph[idc], weight=1.0 / dz)
sout.write(" \t Done: %s\n" % extras.elapsed_time_str(t0))
# Or we can make a new mask where the condition's are true
c1 = numpy.abs(self.gr - gr[idc].mean()) > Nsigma * numpy.std(
gr[idc], ddof=1)
c2 = numpy.abs(self.ri - ri[idc].mean()) > Nsigma * numpy.std(
ri[idc], ddof=1)
mask_cm = numpy.where(lor(c1, c2), 0, 1) # where condition fails
iRadius = numpy.where(
mask_R * mask_L1 * mask_L2 * mask_z * mask_cm == 1)
iRadius_all = numpy.where(mask_L1 * mask_L2 * mask_z * mask_cm == 1)
Ngal = len(iRadius[0])
sout.write(color("# Total: %s objects selected in %s [kpc] around %s\n" %
(Ngal, radius, self.ID), 36, 1))
# Pass up
self.iRadius = iRadius
self.arcmin2Mpc = arcmin2Mpc
self.dist2BCG = dist
self.Lsum = self.Lr[iRadius].sum()
self.Ngal = Ngal
self.z_cl = z_cl
self.z_clerr = z_clrms
self.rdeg = r # in degress
self.r1Mpc = r # naming fix for background estimates
self.idc = idc # galaxies used for mean redshift
self.ID_BCG = ID_BCG
# Sort indices radially for galaxies < N*R1Mpc, will be used later
i = numpy.argsort(self.dist2BCG[iRadius_all])
self.ix_radial = iRadius_all[0][i]
return z_cl, z_clrms
##########################################
# Compute the Background for the clusters
##########################################
def background(self):
ixr = self.ix_radial
# No back substraction
if self.Ngal <= 2:
self.Ngal_c = self.Ngal
print(color('Background -- Not enough galaxies found in cluster', 31, 5))
return
# Store radially ordered
r = self.dist2BCG[ixr] * 60.0 # in arcmin
Lr = self.Lr[ixr] # We do in the r-band as Reyes et al
# Bin the Ngal/Lum data in log spacing
n = 10
rbin = mklogarray(0.0, r.max(), n)
Nbin, rcenter = histo(r, rbin, center='yes')
Lbin, rcenter = bin_data(r, Lr, rbin, center='yes')
# Compute the area in each shell
ir = numpy.indices(rbin.shape)[0]
ir1 = ir[:-1]
ir2 = ir[1:]
r1 = rbin[ir1]
r2 = rbin[ir2]
abin = math.pi * (r2**2 - r1**2)
PN = old_div(Nbin, abin) # Number Surface density
# Compute the background median density both in Lum and Ngal
# Between 4.0 - 9.0 r1Mpc
R1 = 4.0 * self.r1Mpc * 60.0
R2 = 9.0 * self.r1Mpc * 60.0
print("# Estimating Background between R1,R2 %.2f--%2.f[arcmin]" %
(R1, R2))
if R2 >= r.max():
print(color('\tBackground R2 > image limits! -- recomputing', 31, 0))
R2 = r2.max()
R1 = R2 - 2.0 * self.r1Mpc * 60.0
print("# Estimating Background between R1,R2 %.2f--%2.f[arcmin]" %
(R1, R2))
PN_bgr = PN[land(rcenter > R1, rcenter < R2)]
# Get the mean values for the Ngal and Lr profiles, which will
# be the correction per arcmin^2
PN_mean = numpy.mean(PN_bgr)
print('\tmean number of BG galaxies -- {}'.format(PN_mean))
# Total number in area
N_bgr = PN_bgr.sum()
area_bgr = math.pi * (R2**2 - R1**2)
# Get the correction for Number of galaxies and Luminosoty
# For R200 we need to recompute R200 and N200 based on new
# R200 value.
area_r1Mpc = math.pi * (self.r1Mpc * 60.)**2 # in arcmin2
self.Ngal_c = self.Ngal - PN_mean * area_r1Mpc
if self.Ngal_c < 0:
self.Ngal_c = 0.0
print('---- test stuff -----')
print(self.iclose)
print(self.x_image[self.iclose], self.y_image[self.iclose])
# print(self.Ngal)
# print(PN)
# print(r1)
# print(rcenter)
# print(R1,R2)
# print(r.min(),r.max())
# print("PN_mean",PN_mean)
# print(PN_bgr)
# print(area_r1Mpc)
# print("Ngal ", self.Ngal)
# print("Ngal_c", self.Ngal_c)
#print("r200_c",self.r200_c)
#print("R200_c",self.R200_c)
self.d_Ngal_c2 = self.Ngal_c + (
(old_div(area_r1Mpc, area_bgr))**2) * N_bgr
# Avoid sqrt of negative number
if self.d_Ngal_c2 < 0:
self.d_Ngal_c = 0
else:
self.d_Ngal_c = math.sqrt(self.Ngal_c + (
(old_div(area_r1Mpc, area_bgr))**2) * N_bgr)
return
##########################################
# Compute the Background for the clusters
##########################################
def background_map(self):
ixr = self.ix_radial
# bcg index
i = self.iclose
# No back substraction
if self.Ngal <= 2:
self.Ngal_c = self.Ngal
print(color('Background -- Not enough galaxies found in cluster', 31, 5))
return
# Store radially ordered
r = self.dist2BCG[ixr] * 60.0 # in arcmin
Lr = self.Lr[ixr] # We do in the r-band as Reyes et al
# Bin the Ngal/Lum data in log spacing
n = 10
rbin = mklogarray(0.0, r.max(), n)
Nbin, rcenter = histo(r, rbin, center='yes')
Lbin, rcenter = bin_data(r, Lr, rbin, center='yes')
# Compute the area in each shell
ir = numpy.indices(rbin.shape)[0]
ir1 = ir[:-1]
ir2 = ir[1:]
r1 = rbin[ir1]
r2 = rbin[ir2]
abin = math.pi * (r2**2 - r1**2)
PN = old_div(Nbin, abin) # Number Surface density
# Compute the background median density both in Lum and Ngal
# Here's is where we are going to make a couple of maps to compute the areas
# for the background
R1 = 3.0 * self.r1Mpc * 60.0
R2 = r.max() # go all the way out
print("# Estimating Background @ r > 3mpc -- %.2f - %.2f [arcmin]" % (R1, R2))
PN_bgr = PN[rcenter > R1]
# Get the mean values for the Ngal and Lr profiles, which will
# be the correction per arcmin^2
PN_mean = numpy.mean(PN_bgr)
print('\tmean number of BG galaxies -- {}'.format(PN_mean))
# Total number in background area
N_bgr = PN_bgr.sum()
# get the area of the background. We'll make a 'blank' image the same size as
# our input image and then sum over the pixels that are either in or out of
# the cluster region.
# cluster location
a, b = round(self.x_image[self.iclose]), round(self.y_image[self.iclose])
# size of the image
n = self.jpg_array.shape[0]
# cluster radius in arcseconds converted to pixels.
r = R1 * 60 / self.pixscale
# create pixel grid
y,x = numpy.ogrid[-a:n-a, -b:n-b]
# mask the cluster region
mask = x*x + y*y <= r*r
# create new 'bool' image
img_array = numpy.ones((n, n), dtype='bool')
# the cluster region becomes 'false' or zero
img_array[mask] = False
# sum the background region gives the number of pixels. Multiply by the pixel
# scale to get the total area. Convert to arcminutes.
area_bgr = img_array.sum() * self.pixscale / 60
# Get the correction for Number of galaxies and Luminosoty
# For R200 we need to recompute R200 and N200 based on new
# R200 value.
area_r1Mpc = math.pi * (self.r1Mpc * 60.)**2 # in arcmin2
# use the inverse of the cluster mask to find the cluster area
area_r1mpc = (n**2 - img_array.sum()) * self.pixscale / 60
self.Ngal_c = self.Ngal - PN_mean * area_r1Mpc
if self.Ngal_c < 0:
self.Ngal_c = 0.0
self.d_Ngal_c2 = self.Ngal_c + (
(old_div(area_r1Mpc, area_bgr))**2) * N_bgr
# Avoid sqrt of negative number
if self.d_Ngal_c2 < 0:
self.d_Ngal_c = 0
else:
self.d_Ngal_c = math.sqrt(self.Ngal_c + (
(old_div(area_r1Mpc, area_bgr))**2) * N_bgr)
return
| boada/planckClusters | CLUSTERpipe/plugins/_galaxies.py | Python | mit | 15,299 | [
"Galaxy"
] | 52e64fa7b21d2fb03128c14c66677ea49cb7f6c9946ef1791bbb24be72c203c0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import ldap
import re
import sys
import urllib
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_equal, assert_false
import desktop.conf
from desktop.lib.django_test_util import make_logged_in_client
from django.contrib.auth.models import User, Group
from django.utils.encoding import smart_unicode
from django.core.urlresolvers import reverse
from django.test.client import Client
from useradmin.models import HuePermission, GroupPermission, UserProfile
from useradmin.models import get_profile, get_default_user_group
import useradmin.conf
import useradmin.ldap_access
from hadoop import pseudo_hdfs4
from useradmin.password_policy import reset_password_policy
def reset_all_users():
"""Reset to a clean state by deleting all users"""
for user in User.objects.all():
user.delete()
def reset_all_groups():
"""Reset to a clean state by deleting all groups"""
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing(None)
for grp in Group.objects.all():
grp.delete()
class LdapTestConnection(object):
"""
Test class which mimics the behaviour of LdapConnection (from ldap_access.py).
It also includes functionality to fake modifications to an LDAP server. It is designed
as a singleton, to allow for changes to persist across discrete connections.
This class assumes uid is the user_name_attr.
"""
def __init__(self):
self._instance = LdapTestConnection.Data()
def add_user_group_for_test(self, user, group):
self._instance.groups[group]['members'].append(user)
def remove_user_group_for_test(self, user, group):
self._instance.groups[group]['members'].remove(user)
def add_posix_user_group_for_test(self, user, group):
self._instance.groups[group]['posix_members'].append(user)
def remove_posix_user_group_for_test(self, user, group):
self._instance.groups[group]['posix_members'].remove(user)
def find_users(self, username_pattern, search_attr=None, user_name_attr=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
""" Returns info for a particular user via a case insensitive search """
if find_by_dn:
data = filter(lambda attrs: attrs['dn'] == username_pattern, self._instance.users.values())
else:
username_pattern = "^%s$" % username_pattern.replace('.','\\.').replace('*', '.*')
username_fsm = re.compile(username_pattern, flags=re.I)
usernames = filter(lambda username: username_fsm.match(username), self._instance.users.keys())
data = [self._instance.users.get(username) for username in usernames]
return data
def find_groups(self, groupname_pattern, search_attr=None, group_name_attr=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
""" Return all groups in the system with parents and children """
if find_by_dn:
data = filter(lambda attrs: attrs['dn'] == groupname_pattern, self._instance.groups.values())
# SCOPE_SUBTREE means we return all sub-entries of the desired entry along with the desired entry.
if data and scope == ldap.SCOPE_SUBTREE:
sub_data = filter(lambda attrs: attrs['dn'].endswith(data[0]['dn']), self._instance.groups.values())
data.extend(sub_data)
else:
groupname_pattern = "^%s$" % groupname_pattern.replace('.','\\.').replace('*', '.*')
groupnames = filter(lambda username: re.match(groupname_pattern, username), self._instance.groups.keys())
data = [self._instance.groups.get(groupname) for groupname in groupnames]
return data
def find_members_of_group(self, dn, search_attr, ldap_filter, scope=ldap.SCOPE_SUBTREE):
members = []
for group_info in self._instance.groups:
if group_info['dn'] == dn:
members.extend(group_info['members'])
members = set(members)
users = []
for user_info in self._instance.users:
if user_info['dn'] in members:
users.append(user_info)
groups = []
for group_info in self._instance.groups:
if group_info['dn'] in members:
groups.append(group_info)
return users + groups
def find_users_of_group(self, dn):
members = []
for group_info in self._instance.groups.values():
if group_info['dn'] == dn:
members.extend(group_info['members'])
members = set(members)
users = []
for user_info in self._instance.users.values():
if user_info['dn'] in members:
users.append(user_info)
return users
def find_groups_of_group(self, dn):
members = []
for group_info in self._instance.groups.values():
if group_info['dn'] == dn:
members.extend(group_info['members'])
groups = []
for group_info in self._instance.groups.values():
if group_info['dn'] in members:
groups.append(group_info)
return groups
class Data:
def __init__(self):
self.users = {'moe': {'dn': 'uid=moe,ou=People,dc=example,dc=com', 'username':'moe', 'first':'Moe', 'email':'moe@stooges.com', 'groups': ['cn=TestUsers,ou=Groups,dc=example,dc=com']},
'lårry': {'dn': 'uid=lårry,ou=People,dc=example,dc=com', 'username':'lårry', 'first':'Larry', 'last':'Stooge', 'email':'larry@stooges.com', 'groups': ['cn=TestUsers,ou=Groups,dc=example,dc=com', 'cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com']},
'curly': {'dn': 'uid=curly,ou=People,dc=example,dc=com', 'username':'curly', 'first':'Curly', 'last':'Stooge', 'email':'curly@stooges.com', 'groups': ['cn=TestUsers,ou=Groups,dc=example,dc=com', 'cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com']},
'Rock': {'dn': 'uid=Rock,ou=People,dc=example,dc=com', 'username':'Rock', 'first':'rock', 'last':'man', 'email':'rockman@stooges.com', 'groups': ['cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com']},
'nestedguy': {'dn': 'uid=nestedguy,ou=People,dc=example,dc=com', 'username':'nestedguy', 'first':'nested', 'last':'guy', 'email':'nestedguy@stooges.com', 'groups': ['cn=NestedGroup,ou=Groups,dc=example,dc=com']},
'otherguy': {'dn': 'uid=otherguy,ou=People,dc=example,dc=com', 'username':'otherguy', 'first':'Other', 'last':'Guy', 'email':'other@guy.com'},
'posix_person': {'dn': 'uid=posix_person,ou=People,dc=example,dc=com', 'username': 'posix_person', 'first': 'pos', 'last': 'ix', 'email': 'pos@ix.com'},
'posix_person2': {'dn': 'uid=posix_person2,ou=People,dc=example,dc=com', 'username': 'posix_person2', 'first': 'pos', 'last': 'ix', 'email': 'pos@ix.com'},
'user with space': {'dn': 'uid=user with space,ou=People,dc=example,dc=com', 'username': 'user with space', 'first': 'user', 'last': 'space', 'email': 'user@space.com'},
'spaceless': {'dn': 'uid=user without space,ou=People,dc=example,dc=com', 'username': 'spaceless', 'first': 'user', 'last': 'space', 'email': 'user@space.com'},}
self.groups = {'TestUsers': {
'dn': 'cn=TestUsers,ou=Groups,dc=example,dc=com',
'name':'TestUsers',
'members':['uid=moe,ou=People,dc=example,dc=com','uid=lårry,ou=People,dc=example,dc=com','uid=curly,ou=People,dc=example,dc=com'],
'posix_members':[]},
'Test Administrators': {
'dn': 'cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com',
'name':'Test Administrators',
'members':['uid=Rock,ou=People,dc=example,dc=com','uid=lårry,ou=People,dc=example,dc=com','uid=curly,ou=People,dc=example,dc=com'],
'posix_members':[]},
'OtherGroup': {
'dn': 'cn=OtherGroup,cn=TestUsers,ou=Groups,dc=example,dc=com',
'name':'OtherGroup',
'members':[],
'posix_members':[]},
'NestedGroups': {
'dn': 'cn=NestedGroups,ou=Groups,dc=example,dc=com',
'name':'NestedGroups',
'members':['cn=NestedGroup,ou=Groups,dc=example,dc=com'],
'posix_members':[]
},
'NestedGroup': {
'dn': 'cn=NestedGroup,ou=Groups,dc=example,dc=com',
'name':'NestedGroup',
'members':['uid=nestedguy,ou=People,dc=example,dc=com'],
'posix_members':[]
},
'NestedPosixGroups': {
'dn': 'cn=NestedPosixGroups,ou=Groups,dc=example,dc=com',
'name':'NestedPosixGroups',
'members':['cn=PosixGroup,ou=Groups,dc=example,dc=com'],
'posix_members':[]
},
'PosixGroup': {
'dn': 'cn=PosixGroup,ou=Groups,dc=example,dc=com',
'name':'PosixGroup',
'members':[],
'posix_members':['posix_person','lårry']},
'PosixGroup1': {
'dn': 'cn=PosixGroup1,cn=PosixGroup,ou=Groups,dc=example,dc=com',
'name':'PosixGroup1',
'members':[],
'posix_members':['posix_person2']},
}
def test_invalid_username():
BAD_NAMES = ('-foo', 'foo:o', 'foo o', ' foo')
c = make_logged_in_client(username="test", is_superuser=True)
for bad_name in BAD_NAMES:
assert_true(c.get('/useradmin/users/new'))
response = c.post('/useradmin/users/new', dict(username=bad_name, password1="test", password2="test"))
assert_true('not allowed' in response.context["form"].errors['username'][0])
def test_group_permissions():
reset_all_users()
reset_all_groups()
# Get ourselves set up with a user and a group
c = make_logged_in_client(username="test", is_superuser=True)
Group.objects.create(name="test-group")
test_user = User.objects.get(username="test")
test_user.groups.add(Group.objects.get(name="test-group"))
test_user.save()
# Make sure that a superuser can always access applications
response = c.get('/useradmin/users')
assert_true('Hue Users' in response.content)
assert_true(len(GroupPermission.objects.all()) == 0)
c.post('/useradmin/groups/edit/test-group',
dict(name="test-group",
members=[User.objects.get(username="test").pk],
permissions=[HuePermission.objects.get(app='useradmin',action='access').pk],
save="Save"), follow=True)
assert_true(len(GroupPermission.objects.all()) == 1)
# Now test that we have limited access
c1 = make_logged_in_client(username="nonadmin", is_superuser=False)
response = c1.get('/useradmin/users')
assert_true('You do not have permission to access the Useradmin application.' in response.content)
# Add the non-admin to a group that should grant permissions to the app
test_user = User.objects.get(username="nonadmin")
test_user.groups.add(Group.objects.get(name='test-group'))
test_user.save()
# Check that we have access now
response = c1.get('/useradmin/users')
assert_true(get_profile(test_user).has_hue_permission('access','useradmin'))
assert_true('Hue Users' in response.content)
# Make sure we can't modify permissions
response = c1.get('/useradmin/permissions/edit/useradmin/access')
assert_true('must be a superuser to change permissions' in response.content)
# And revoke access from the group
c.post('/useradmin/permissions/edit/useradmin/access',
dict(app='useradmin',
priv='access',
groups=[],
save="Save"), follow=True)
assert_true(len(GroupPermission.objects.all()) == 0)
assert_false(get_profile(test_user).has_hue_permission('access','useradmin'))
# We should no longer have access to the app
response = c1.get('/useradmin/users')
assert_true('You do not have permission to access the Useradmin application.' in response.content)
def test_default_group():
reset_all_users()
reset_all_groups()
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing('test_default')
get_default_user_group()
c = make_logged_in_client(username='test', is_superuser=True)
# Create default group if it doesn't already exist.
assert_true(Group.objects.filter(name='test_default').exists())
# Try deleting the default group
assert_true(Group.objects.filter(name='test_default').exists())
response = c.post('/useradmin/groups/delete', {'group_names': ['test_default']})
assert_true('default user group may not be deleted' in response.content)
assert_true(Group.objects.filter(name='test_default').exists())
# Change the name of the default group, and try deleting again
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing('new_default')
response = c.post('/useradmin/groups/delete' , {'group_names': ['test_default']})
assert_false(Group.objects.filter(name='test_default').exists())
assert_true(Group.objects.filter(name='new_default').exists())
def test_get_profile():
# Ensure profiles are created after get_profile is called.
reset_all_users()
reset_all_groups()
c = make_logged_in_client(username='test', password='test', is_superuser=True)
assert_equal(0, UserProfile.objects.count())
p = get_profile(User.objects.get(username='test'))
assert_equal(1, UserProfile.objects.count())
def test_group_admin():
reset_all_users()
reset_all_groups()
c = make_logged_in_client(username="test", is_superuser=True)
response = c.get('/useradmin/groups')
# No groups just yet
assert_true(len(response.context["groups"]) == 0)
assert_true("Hue Groups" in response.content)
# Create a group
response = c.get('/useradmin/groups/new')
assert_equal('/useradmin/groups/new', response.context['action'])
c.post('/useradmin/groups/new', dict(name="testgroup"))
# We should have an empty group in the DB now
assert_true(len(Group.objects.all()) == 1)
assert_true(Group.objects.filter(name="testgroup").exists())
assert_true(len(Group.objects.get(name="testgroup").user_set.all()) == 0)
# And now, just for kicks, let's try adding a user
response = c.post('/useradmin/groups/edit/testgroup',
dict(name="testgroup",
members=[User.objects.get(username="test").pk],
save="Save"), follow=True)
assert_true(len(Group.objects.get(name="testgroup").user_set.all()) == 1)
assert_true(Group.objects.get(name="testgroup").user_set.filter(username="test").exists())
# Test some permissions
c2 = make_logged_in_client(username="nonadmin", is_superuser=False)
# Need to give access to the user for the rest of the test
group = Group.objects.create(name="access-group")
perm = HuePermission.objects.get(app='useradmin', action='access')
GroupPermission.objects.create(group=group, hue_permission=perm)
test_user = User.objects.get(username="nonadmin")
test_user.groups.add(Group.objects.get(name="access-group"))
test_user.save()
# Make sure non-superusers can't do bad things
response = c2.get('/useradmin/groups/new')
assert_true("You must be a superuser" in response.content)
response = c2.get('/useradmin/groups/edit/testgroup')
assert_true("You must be a superuser" in response.content)
response = c2.post('/useradmin/groups/new', dict(name="nonsuperuser"))
assert_true("You must be a superuser" in response.content)
response = c2.post('/useradmin/groups/edit/testgroup',
dict(name="nonsuperuser",
members=[User.objects.get(username="test").pk],
save="Save"), follow=True)
assert_true("You must be a superuser" in response.content)
# Should be one group left, because we created the other group
response = c.post('/useradmin/groups/delete', {'group_names': ['testgroup']})
assert_true(len(Group.objects.all()) == 1)
group_count = len(Group.objects.all())
response = c.post('/useradmin/groups/new', dict(name="with space"))
assert_equal(len(Group.objects.all()), group_count + 1)
def test_user_admin_password_policy():
reset_all_users()
reset_all_groups()
# Set up password policy
password_hint = password_error_msg = ("The password must be at least 8 characters long, "
"and must contain both uppercase and lowercase letters, "
"at least one number, and at least one special character.")
password_rule = "^(?=.*?[A-Z])(?=(.*[a-z]){1,})(?=(.*[\d]){1,})(?=(.*[\W_]){1,}).{8,}$"
useradmin.conf.PASSWORD_POLICY.IS_ENABLED.set_for_testing(True)
useradmin.conf.PASSWORD_POLICY.PWD_RULE.set_for_testing(password_rule)
useradmin.conf.PASSWORD_POLICY.PWD_HINT.set_for_testing(password_hint)
useradmin.conf.PASSWORD_POLICY.PWD_ERROR_MESSAGE.set_for_testing(password_error_msg)
reset_password_policy()
# Test first-ever login with password policy enabled
c = Client()
response = c.get('/accounts/login/')
assert_equal(200, response.status_code)
assert_true(response.context['first_login_ever'])
response = c.post('/accounts/login/', dict(username="test_first_login", password="foo"))
assert_true(response.context['first_login_ever'])
assert_equal([password_error_msg], response.context["form"]["password"].errors)
response = c.post('/accounts/login/', dict(username="test_first_login", password="foobarTest1["), follow=True)
assert_equal(200, response.status_code)
assert_true(User.objects.get(username="test_first_login").is_superuser)
assert_true(User.objects.get(username="test_first_login").check_password("foobarTest1["))
c.get('/accounts/logout')
# Test changing a user's password
c = make_logged_in_client('superuser', is_superuser=True)
# Test password hint is displayed
response = c.get('/useradmin/users/edit/superuser')
assert_true(password_hint in response.content)
# Password is less than 8 characters
response = c.post('/useradmin/users/edit/superuser',
dict(username="superuser",
is_superuser=True,
password1="foo",
password2="foo"))
assert_equal([password_error_msg], response.context["form"]["password1"].errors)
# Password is more than 8 characters long but does not have a special character
response = c.post('/useradmin/users/edit/superuser',
dict(username="superuser",
is_superuser=True,
password1="foobarTest1",
password2="foobarTest1"))
assert_equal([password_error_msg], response.context["form"]["password1"].errors)
# Password1 and Password2 are valid but they do not match
response = c.post('/useradmin/users/edit/superuser',
dict(username="superuser",
is_superuser=True,
password1="foobarTest1??",
password2="foobarTest1?",
password_old="foobarTest1[",
is_active=True))
assert_equal(["Passwords do not match."], response.context["form"]["password2"].errors)
# Password is valid now
c.post('/useradmin/users/edit/superuser',
dict(username="superuser",
is_superuser=True,
password1="foobarTest1[",
password2="foobarTest1[",
password_old="test",
is_active=True))
assert_true(User.objects.get(username="superuser").is_superuser)
assert_true(User.objects.get(username="superuser").check_password("foobarTest1["))
# Test creating a new user
response = c.get('/useradmin/users/new')
assert_true(password_hint in response.content)
# Password is more than 8 characters long but does not have a special character
response = c.post('/useradmin/users/new',
dict(username="test_user",
is_superuser=False,
password1="foo",
password2="foo"))
assert_equal({'password1': [password_error_msg], 'password2': [password_error_msg]},
response.context["form"].errors)
# Password is more than 8 characters long but does not have a special character
response = c.post('/useradmin/users/new',
dict(username="test_user",
is_superuser=False,
password1="foobarTest1",
password2="foobarTest1"))
assert_equal({'password1': [password_error_msg], 'password2': [password_error_msg]},
response.context["form"].errors)
# Password1 and Password2 are valid but they do not match
response = c.post('/useradmin/users/new',
dict(username="test_user",
is_superuser=False,
password1="foobarTest1[",
password2="foobarTest1?"))
assert_equal({'password2': ["Passwords do not match."]}, response.context["form"].errors)
# Password is valid now
c.post('/useradmin/users/new',
dict(username="test_user",
is_superuser=False,
password1="foobarTest1[",
password2="foobarTest1[", is_active=True))
assert_false(User.objects.get(username="test_user").is_superuser)
assert_true(User.objects.get(username="test_user").check_password("foobarTest1["))
def test_user_admin():
FUNNY_NAME = '~`!@#$%^&*()_-+={}[]|\;"<>?/,.'
FUNNY_NAME_QUOTED = urllib.quote(FUNNY_NAME)
reset_all_users()
reset_all_groups()
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing('test_default')
useradmin.conf.PASSWORD_POLICY.IS_ENABLED.set_for_testing(False)
reset_password_policy()
c = make_logged_in_client('test', is_superuser=True)
user = User.objects.get(username='test')
# Test basic output.
response = c.get('/useradmin/')
assert_true(len(response.context["users"]) > 0)
assert_true("Hue Users" in response.content)
# Test editing a superuser
# Just check that this comes back
response = c.get('/useradmin/users/edit/test')
# Edit it, to add a first and last name
response = c.post('/useradmin/users/edit/test',
dict(username="test",
first_name=u"Inglés",
last_name=u"Español",
is_superuser="True",
is_active="True"),
follow=True)
assert_true("User information updated" in response.content,
"Notification should be displayed in: %s" % response.content)
# Edit it, can't change username
response = c.post('/useradmin/users/edit/test',
dict(username="test2",
first_name=u"Inglés",
last_name=u"Español",
is_superuser="True",
is_active="True"),
follow=True)
assert_true("You cannot change a username" in response.content)
# Now make sure that those were materialized
response = c.get('/useradmin/users/edit/test')
assert_equal(smart_unicode("Inglés"), response.context["form"].instance.first_name)
assert_true("Español" in response.content)
# Shouldn't be able to demote to non-superuser
response = c.post('/useradmin/users/edit/test', dict(username="test",
first_name=u"Inglés", last_name=u"Español",
is_superuser=False, is_active=True))
assert_true("You cannot remove" in response.content,
"Shouldn't be able to remove the last superuser")
# Shouldn't be able to delete oneself
response = c.post('/useradmin/users/delete', {u'user_ids': [user.id]})
assert_true("You cannot remove yourself" in response.content,
"Shouldn't be able to delete the last superuser")
# Let's try changing the password
response = c.post('/useradmin/users/edit/test', dict(username="test", first_name="Tom", last_name="Tester", is_superuser=True, password1="foo", password2="foobar"))
assert_equal(["Passwords do not match."], response.context["form"]["password2"].errors, "Should have complained about mismatched password")
# Old password not confirmed
response = c.post('/useradmin/users/edit/test', dict(username="test", first_name="Tom", last_name="Tester", password1="foo", password2="foo", is_active=True, is_superuser=True))
assert_equal(["The old password does not match the current password."], response.context["form"]["password_old"].errors, "Should have complained about old password")
# Good now
response = c.post('/useradmin/users/edit/test', dict(username="test", first_name="Tom", last_name="Tester", password1="foo", password2="foo", password_old="test", is_active=True, is_superuser=True))
assert_true(User.objects.get(username="test").is_superuser)
assert_true(User.objects.get(username="test").check_password("foo"))
# Change it back!
response = c.post('/useradmin/users/edit/test', dict(username="test", first_name="Tom", last_name="Tester", password1="test", password2="test", password_old="foo", is_active="True", is_superuser="True"))
assert_true(User.objects.get(username="test").check_password("test"))
assert_true(make_logged_in_client(username = "test", password = "test"), "Check that we can still login.")
# Check new user form for default group
group = get_default_user_group()
response = c.get('/useradmin/users/new')
assert_true(response)
assert_true(('<option value="1" selected="selected">%s</option>' % group) in str(response))
# Create a new regular user (duplicate name)
response = c.post('/useradmin/users/new', dict(username="test", password1="test", password2="test"))
assert_equal({ 'username': ["User with this Username already exists."]}, response.context["form"].errors)
# Create a new regular user (for real)
response = c.post('/useradmin/users/new', dict(username=FUNNY_NAME,
password1="test",
password2="test",
is_active="True"))
response = c.get('/useradmin/')
assert_true(FUNNY_NAME_QUOTED in response.content)
assert_true(len(response.context["users"]) > 1)
assert_true("Hue Users" in response.content)
# Validate profile is created.
assert_true(UserProfile.objects.filter(user__username=FUNNY_NAME).exists())
# Need to give access to the user for the rest of the test
group = Group.objects.create(name="test-group")
perm = HuePermission.objects.get(app='useradmin', action='access')
GroupPermission.objects.create(group=group, hue_permission=perm)
# Verify that we can modify user groups through the user admin pages
response = c.post('/useradmin/users/new', dict(username="group_member", password1="test", password2="test", groups=[group.pk]))
User.objects.get(username='group_member')
assert_true(User.objects.get(username='group_member').groups.filter(name='test-group').exists())
response = c.post('/useradmin/users/edit/group_member', dict(username="group_member", groups=[]))
assert_false(User.objects.get(username='group_member').groups.filter(name='test-group').exists())
# Check permissions by logging in as the new user
c_reg = make_logged_in_client(username=FUNNY_NAME, password="test")
test_user = User.objects.get(username=FUNNY_NAME)
test_user.groups.add(Group.objects.get(name="test-group"))
test_user.save()
# Regular user should be able to modify oneself
response = c_reg.post('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,),
dict(username = FUNNY_NAME,
first_name = "Hello",
is_active = True,
groups=[group.id for group in test_user.groups.all()]), follow=True)
assert_equal(response.status_code, 200)
response = c_reg.get('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,), follow=True)
assert_equal(response.status_code, 200)
assert_equal("Hello", response.context["form"].instance.first_name)
funny_user = User.objects.get(username=FUNNY_NAME)
# Can't edit other people.
response = c_reg.post("/useradmin/users/delete", {u'user_ids': [funny_user.id]})
assert_true("You must be a superuser" in response.content,
"Regular user can't edit other people")
# Revert to regular "test" user, that has superuser powers.
c_su = make_logged_in_client()
# Inactivate FUNNY_NAME
c_su.post('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,),
dict(username = FUNNY_NAME,
first_name = "Hello",
is_active = False))
# Now make sure FUNNY_NAME can't log back in
response = c_reg.get('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,))
assert_true(response.status_code == 302 and "login" in response["location"],
"Inactivated user gets redirected to login page")
# Delete that regular user
funny_profile = get_profile(test_user)
response = c_su.post('/useradmin/users/delete', {u'user_ids': [funny_user.id]})
assert_equal(302, response.status_code)
assert_false(User.objects.filter(username=FUNNY_NAME).exists())
assert_false(UserProfile.objects.filter(id=funny_profile.id).exists())
# Bulk delete users
u1 = User.objects.create(username='u1', password="u1")
u2 = User.objects.create(username='u2', password="u2")
assert_equal(User.objects.filter(username__in=['u1', 'u2']).count(), 2)
response = c_su.post('/useradmin/users/delete', {u'user_ids': [u1.id, u2.id]})
assert_equal(User.objects.filter(username__in=['u1', 'u2']).count(), 0)
# Make sure that user deletion works if the user has never performed a request.
funny_user = User.objects.create(username=FUNNY_NAME, password='test')
assert_true(User.objects.filter(username=FUNNY_NAME).exists())
assert_false(UserProfile.objects.filter(user__username=FUNNY_NAME).exists())
response = c_su.post('/useradmin/users/delete', {u'user_ids': [funny_user.id]})
assert_equal(302, response.status_code)
assert_false(User.objects.filter(username=FUNNY_NAME).exists())
assert_false(UserProfile.objects.filter(user__username=FUNNY_NAME).exists())
# You shouldn't be able to create a user without a password
response = c_su.post('/useradmin/users/new', dict(username="test"))
assert_true("You must specify a password when creating a new user." in response.content)
@attr('requires_hadoop')
def test_ensure_home_directory():
raise SkipTest
reset_all_users()
reset_all_groups()
useradmin.conf.PASSWORD_POLICY.IS_ENABLED.set_for_testing(False)
reset_password_policy()
# Cluster and client for home directory creation
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True, groupname='test1')
cluster.fs.setuser(cluster.superuser)
# Create a user with a home directory
assert_false(cluster.fs.exists('/user/test1'))
response = c.post('/useradmin/users/new', dict(username="test1", password1='test', password2='test', ensure_home_directory=True))
assert_true(cluster.fs.exists('/user/test1'))
dir_stat = cluster.fs.stats('/user/test1')
assert_equal('test1', dir_stat.user)
assert_equal('test1', dir_stat.group)
assert_equal('40755', '%o' % dir_stat.mode)
# Create a user, then add their home directory
assert_false(cluster.fs.exists('/user/test2'))
response = c.post('/useradmin/users/new', dict(username="test2", password1='test', password2='test'))
assert_false(cluster.fs.exists('/user/test2'))
response = c.post('/useradmin/users/edit/%s' % "test2", dict(username="test2", password1='test', password2='test', password_old="test", ensure_home_directory=True))
assert_true(cluster.fs.exists('/user/test2'))
dir_stat = cluster.fs.stats('/user/test2')
assert_equal('test2', dir_stat.user)
assert_equal('test2', dir_stat.group)
assert_equal('40755', '%o' % dir_stat.mode)
def test_list_for_autocomplete():
reset_all_users()
reset_all_groups()
# Now the autocomplete has access to all the users and groups
c1 = make_logged_in_client('test_list_for_autocomplete', is_superuser=False, groupname='test_list_for_autocomplete')
c2_same_group = make_logged_in_client('test_list_for_autocomplete2', is_superuser=False, groupname='test_list_for_autocomplete')
c3_other_group = make_logged_in_client('test_list_for_autocomplete3', is_superuser=False, groupname='test_list_for_autocomplete_other_group')
# c1 is in the same group as c2
response = c1.get(reverse('useradmin.views.list_for_autocomplete'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
content = json.loads(response.content)
users = [user['username'] for user in content['users']]
groups = [user['name'] for user in content['groups']]
assert_equal(['test_list_for_autocomplete2', 'test_list_for_autocomplete3'], users)
assert_true('test_list_for_autocomplete' in groups, groups)
assert_true('test_list_for_autocomplete_other_group' in groups, groups)
# c2 is in the same group as c1
response = c2_same_group.get(reverse('useradmin.views.list_for_autocomplete'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
content = json.loads(response.content)
users = [user['username'] for user in content['users']]
groups = [user['name'] for user in content['groups']]
assert_equal(['test_list_for_autocomplete', 'test_list_for_autocomplete3'], users)
assert_true('test_list_for_autocomplete' in groups, groups)
assert_true('test_list_for_autocomplete_other_group' in groups, groups)
# c3 is alone except for groups
response = c3_other_group.get(reverse('useradmin.views.list_for_autocomplete'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
content = json.loads(response.content)
users = [user['username'] for user in content['users']]
groups = [user['name'] for user in content['groups']]
assert_equal(['test_list_for_autocomplete', 'test_list_for_autocomplete2'], users)
assert_true('test_list_for_autocomplete' in groups, groups)
assert_true('test_list_for_autocomplete_other_group' in groups, groups)
class MockLdapConnection(object):
def __init__(self, ldap_config, ldap_url, username, password, ldap_cert):
self.ldap_config = ldap_config
self.ldap_url = ldap_url
self.username = username
self.password = password
self.ldap_cert = ldap_cert
def test_get_connection_bind_password():
# Unfortunately our tests leak a cached test ldap connection across functions, so we need to clear it out.
useradmin.ldap_access.CACHED_LDAP_CONN = None
# Monkey patch the LdapConnection class as we don't want to make a real connection.
OriginalLdapConnection = useradmin.ldap_access.LdapConnection
reset = [
desktop.conf.LDAP.LDAP_URL.set_for_testing('default.example.com'),
desktop.conf.LDAP.BIND_PASSWORD.set_for_testing('default-password'),
desktop.conf.LDAP.LDAP_SERVERS.set_for_testing({
'test': {
'ldap_url': 'test.example.com',
'bind_password': 'test-password',
}
})
]
try:
useradmin.ldap_access.LdapConnection = MockLdapConnection
connection = useradmin.ldap_access.get_connection_from_server()
assert_equal(connection.password, 'default-password')
connection = useradmin.ldap_access.get_connection_from_server('test')
assert_equal(connection.password, 'test-password')
finally:
useradmin.ldap_access.LdapConnection = OriginalLdapConnection
for f in reset:
f()
def test_get_connection_bind_password_script():
# Unfortunately our tests leak a cached test ldap connection across functions, so we need to clear it out.
useradmin.ldap_access.CACHED_LDAP_CONN = None
SCRIPT = '%s -c "print \'\\n password from script \\n\'"' % sys.executable
# Monkey patch the LdapConnection class as we don't want to make a real connection.
OriginalLdapConnection = useradmin.ldap_access.LdapConnection
reset = [
desktop.conf.LDAP.LDAP_URL.set_for_testing('default.example.com'),
desktop.conf.LDAP.BIND_PASSWORD_SCRIPT.set_for_testing(
'%s -c "print \'\\n default password \\n\'"' % sys.executable
),
desktop.conf.LDAP.LDAP_SERVERS.set_for_testing({
'test': {
'ldap_url': 'test.example.com',
'bind_password_script':
'%s -c "print \'\\n test password \\n\'"' % sys.executable,
}
})
]
try:
useradmin.ldap_access.LdapConnection = MockLdapConnection
connection = useradmin.ldap_access.get_connection_from_server()
assert_equal(connection.password, ' default password ')
connection = useradmin.ldap_access.get_connection_from_server('test')
assert_equal(connection.password, ' test password ')
finally:
useradmin.ldap_access.LdapConnection = OriginalLdapConnection
for f in reset:
f()
| kalahbrown/HueBigSQL | apps/useradmin/src/useradmin/tests.py | Python | apache-2.0 | 37,759 | [
"MOE"
] | 098a3a33e0b588466db6067aa00d821dbf7ebafbae36cf25006abf6070e464d9 |
../../../../../../../share/pyshared/orca/scripts/apps/Thunderbird/script.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/Thunderbird/script.py | Python | gpl-3.0 | 75 | [
"ORCA"
] | 3922391b837543ef6fc33fc9942bd76254c2f9bebd3315fcafcbe03dfb0ee81f |
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008 Daniel Moisset, Ricardo Quesada, Rayentray Tappa, Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Transitions between Scenes'''
__docformat__ = 'restructuredtext'
import pyglet
from cocos.actions import *
import cocos.scene as scene
from cocos.director import director
from cocos.layer import ColorLayer
from cocos.sprite import Sprite
__all__ = [ 'TransitionScene',
'RotoZoomTransition','JumpZoomTransition',
'MoveInLTransition','MoveInRTransition',
'MoveInBTransition','MoveInTTransition',
'SlideInLTransition','SlideInRTransition',
'SlideInBTransition','SlideInTTransition',
'FlipX3DTransition', 'FlipY3DTransition','FlipAngular3DTransition',
'ShuffleTransition',
'TurnOffTilesTransition',
'FadeTRTransition', 'FadeBLTransition',
'FadeUpTransition', 'FadeDownTransition',
'ShrinkGrowTransition',
'CornerMoveTransition',
'EnvelopeTransition',
'SplitRowsTransition', 'SplitColsTransition',
'FadeTransition',
'ZoomTransition',
]
class TransitionScene(scene.Scene):
"""TransitionScene
A Scene that takes two scenes and makes a transition between them.
These scenes are children of the transition scene.
"""
def __init__(self, dst, duration=1.25, src=None):
'''Initializes the transition
:Parameters:
`dst` : Scene
Incoming scene
`duration` : float
Duration of the transition in seconds. Default: 1.25
`src` : Scene
Outgoing scene. Default: current scene
'''
super(TransitionScene, self).__init__()
self.in_scene = dst #: scene that will replace the old one
if src == None:
src = director.scene
# if the director is already running a transition scene then terminate
# it so we may move on
if isinstance(src, TransitionScene):
src.finish()
src = src.in_scene
self.out_scene = src #: scene that will be replaced
self.duration = duration #: duration in seconds of the transition
if not self.duration:
self.duration = 1.25
if self.out_scene is None:
raise Exception("You need to specfy a `src` argument")
if self.out_scene is self.in_scene:
raise Exception("Incoming scene must be different from outgoing scene")
self.start()
def start(self):
'''Adds the incoming scene with z=1 and the outgoing scene with z=0'''
self.add( self.in_scene, z=1 )
self.add( self.out_scene, z=0 )
def finish(self):
'''Called when the time is over.
It removes both the incoming and the outgoing scenes from the transition scene,
and restores the outgoing scene's attributes like: position, visible and scale.
'''
self.remove( self.in_scene )
self.remove( self.out_scene )
self.restore_out()
director.replace( self.in_scene )
def hide_out_show_in( self ):
'''Hides the outgoing scene and shows the incoming scene'''
self.in_scene.visible = True
self.out_scene.visible = False
def hide_all( self ):
'''Hides both the incoming and outgoing scenes'''
self.in_scene.visible = False
self.out_scene.visible = False
def restore_out( self ):
'''Restore the position, visible and scale attributes of the outgoing scene
to the original values'''
self.out_scene.visible = True
self.out_scene.position = (0,0)
self.out_scene.scale = 1
class RotoZoomTransition(TransitionScene):
'''Rotate and zoom out the outgoing scene, and then rotate and zoom in the incoming
'''
def __init__( self, *args, **kwargs ):
super(RotoZoomTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
self.in_scene.scale = 0.001
self.out_scene.scale = 1.0
self.in_scene.transform_anchor = (width // 2, height //2 )
self.out_scene.transform_anchor = (width // 2, height //2 )
rotozoom = ( ScaleBy(0.001, duration=self.duration/2.0 ) | \
Rotate(360 * 2, duration=self.duration/2.0 ) ) + \
Delay( self.duration / 2.0 )
self.out_scene.do( rotozoom )
self.in_scene.do( Reverse(rotozoom) + CallFunc(self.finish) )
class JumpZoomTransition(TransitionScene):
'''Zoom out and jump the outgoing scene, and then jump and zoom in the incoming
'''
def __init__( self, *args, **kwargs ):
super(JumpZoomTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
self.in_scene.scale = 0.5
self.in_scene.position = ( width, 0 )
self.in_scene.transform_anchor = (width // 2, height //2 )
self.out_scene.transform_anchor = (width // 2, height //2 )
jump = JumpBy( (-width,0), width//4, 2, duration=self.duration / 4.0 )
scalein = ScaleTo( 1, duration=self.duration / 4.0 )
scaleout = ScaleTo( 0.5, duration=self.duration / 4.0 )
jumpzoomout = scaleout + jump
jumpzoomin = jump + scalein
delay = Delay( self.duration / 2.0 )
self.out_scene.do( jumpzoomout )
self.in_scene.do( delay + jumpzoomin + CallFunc(self.finish) )
class MoveInLTransition(TransitionScene):
'''Move in from to the left the incoming scene.
'''
def __init__( self, *args, **kwargs ):
super(MoveInLTransition, self ).__init__( *args, **kwargs)
self.init()
a = self.get_action()
self.in_scene.do( (Accelerate(a,0.5) ) + CallFunc(self.finish) )
def init(self):
width, height = director.get_window_size()
self.in_scene.position=(-width,0)
def get_action(self):
return MoveTo( (0,0), duration=self.duration)
class MoveInRTransition(MoveInLTransition):
'''Move in from to the right the incoming scene.
'''
def init(self):
width, height = director.get_window_size()
self.in_scene.position=(width,0)
def get_action(self):
return MoveTo( (0,0), duration=self.duration)
class MoveInTTransition(MoveInLTransition):
'''Move in from to the top the incoming scene.
'''
def init(self):
width, height = director.get_window_size()
self.in_scene.position=(0,height)
def get_action(self):
return MoveTo( (0,0), duration=self.duration)
class MoveInBTransition(MoveInLTransition):
'''Move in from to the bottom the incoming scene.
'''
def init(self):
width, height = director.get_window_size()
self.in_scene.position=(0,-height)
def get_action(self):
return MoveTo( (0,0), duration=self.duration)
class SlideInLTransition(TransitionScene):
'''Slide in the incoming scene from the left border.
'''
def __init__( self, *args, **kwargs ):
super(SlideInLTransition, self ).__init__( *args, **kwargs)
self.width, self.height = director.get_window_size()
self.init()
move = self.get_action()
self.in_scene.do( Accelerate(move,0.5) )
self.out_scene.do( Accelerate(move,0.5) + CallFunc( self.finish) )
def init(self):
self.in_scene.position=( -self.width,0)
def get_action(self):
return MoveBy( (self.width,0), duration=self.duration)
class SlideInRTransition(SlideInLTransition):
'''Slide in the incoming scene from the right border.
'''
def init(self):
self.in_scene.position=(self.width,0)
def get_action(self):
return MoveBy( (-self.width,0), duration=self.duration)
class SlideInTTransition(SlideInLTransition):
'''Slide in the incoming scene from the top border.
'''
def init(self):
self.in_scene.position=(0,self.height)
def get_action(self):
return MoveBy( (0,-self.height), duration=self.duration)
class SlideInBTransition(SlideInLTransition):
'''Slide in the incoming scene from the bottom border.
'''
def init(self):
self.in_scene.position=(0,-self.height)
def get_action(self):
return MoveBy( (0,self.height), duration=self.duration)
class FlipX3DTransition(TransitionScene):
'''Flips the screen horizontally.
The front face is the outgoing scene and the back face is the incoming scene.
'''
def __init__( self, *args, **kwargs ):
super(FlipX3DTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
turnongrid = Waves3D( amplitude=0, duration=0, grid=(1,1), waves=2 )
flip90 = OrbitCamera( angle_x=0, delta_z=90, duration = self.duration / 2.0 )
flipback90 = OrbitCamera( angle_x=0, angle_z=90, delta_z=90, duration = self.duration / 2.0 )
self.in_scene.visible = False
flip = turnongrid + \
flip90 + \
CallFunc(self.hide_all) + \
FlipX3D(duration=0) + \
CallFunc( self.hide_out_show_in ) + \
flipback90
self.do( flip + \
CallFunc(self.finish) + \
StopGrid() )
class FlipY3DTransition(TransitionScene):
'''Flips the screen vertically.
The front face is the outgoing scene and the back face is the incoming scene.
'''
def __init__( self, *args, **kwargs ):
super(FlipY3DTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
turnongrid = Waves3D( amplitude=0, duration=0, grid=(1,1), waves=2 )
flip90 = OrbitCamera( angle_x=90, delta_z=-90, duration = self.duration / 2.0 )
flipback90 = OrbitCamera( angle_x=90, angle_z=90, delta_z=90, duration = self.duration / 2.0 )
self.in_scene.visible = False
flip = turnongrid + \
flip90 + \
CallFunc(self.hide_all) + \
FlipX3D(duration=0) + \
CallFunc( self.hide_out_show_in ) + \
flipback90
self.do( flip + \
CallFunc(self.finish) + \
StopGrid() )
class FlipAngular3DTransition(TransitionScene):
'''Flips the screen half horizontally and half vertically.
The front face is the outgoing scene and the back face is the incoming scene.
'''
def __init__( self, *args, **kwargs ):
super(FlipAngular3DTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
turnongrid = Waves3D( amplitude=0, duration=0, grid=(1,1), waves=2 )
flip90 = OrbitCamera( angle_x=45, delta_z=90, duration = self.duration / 2.0 )
flipback90 = OrbitCamera( angle_x=45, angle_z=90, delta_z=90, duration = self.duration / 2.0 )
self.in_scene.visible = False
flip = turnongrid + \
flip90 + \
CallFunc(self.hide_all) + \
FlipX3D(duration=0) + \
CallFunc( self.hide_out_show_in ) + \
flipback90
self.do( flip + \
CallFunc(self.finish) + \
StopGrid() )
class ShuffleTransition(TransitionScene):
'''Shuffle the outgoing scene, and then reorder the tiles with the incoming scene.
'''
def __init__( self, *args, **kwargs ):
super(ShuffleTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
aspect = width / float(height)
x,y = int(12*aspect), 12
shuffle = ShuffleTiles( grid=(x,y), duration=self.duration/2.0, seed=15 )
self.in_scene.visible = False
self.do( shuffle + \
CallFunc(self.hide_out_show_in) + \
Reverse(shuffle) + \
CallFunc(self.finish) + \
StopGrid()
)
class ShrinkGrowTransition(TransitionScene):
'''Shrink the outgoing scene while grow the incoming scene
'''
def __init__( self, *args, **kwargs ):
super(ShrinkGrowTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
self.in_scene.scale = 0.001
self.out_scene.scale = 1
self.in_scene.transform_anchor = ( 2*width / 3.0, height / 2.0 )
self.out_scene.transform_anchor = ( width / 3.0, height / 2.0 )
scale_out = ScaleTo( 0.01, duration=self.duration )
scale_in = ScaleTo( 1.0, duration=self.duration )
self.in_scene.do( Accelerate(scale_in,0.5) )
self.out_scene.do( Accelerate(scale_out,0.5) + CallFunc( self.finish) )
class CornerMoveTransition(TransitionScene):
'''Moves the bottom-right corner of the incoming scene to the top-left corner
'''
def __init__( self, *args, **kwargs ):
super(CornerMoveTransition, self ).__init__( *args, **kwargs)
self.out_scene.do( MoveCornerUp( duration=self.duration ) + \
CallFunc(self.finish) + \
StopGrid() )
def start(self):
# don't call super. overriding order
self.add( self.in_scene, z=0 )
self.add( self.out_scene, z=1 )
class EnvelopeTransition(TransitionScene):
'''From the outgoing scene:
- moves the top-right corner to the center
- moves the bottom-left corner to the center
From the incoming scene:
- performs the reverse action of the outgoing scene
'''
def __init__( self, *args, **kwargs ):
super(EnvelopeTransition, self ).__init__( *args, **kwargs)
self.in_scene.visible = False
move = QuadMoveBy( delta0=(320,240), delta1=(-630,0), delta2=(-320,-240), delta3=(630,0), duration=self.duration / 2.0 )
# move = Accelerate( move )
self.do( move + \
CallFunc(self.hide_out_show_in) + \
Reverse(move) + \
CallFunc(self.finish) + \
StopGrid()
)
class FadeTRTransition(TransitionScene):
'''Fade the tiles of the outgoing scene from the left-bottom corner the to top-right corner.
'''
def __init__( self, *args, **kwargs ):
super(FadeTRTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
aspect = width / float(height)
x,y = int(12*aspect), 12
a = self.get_action(x,y)
# a = Accelerate( a)
self.out_scene.do( a + \
CallFunc(self.finish) + \
StopGrid() )
def start(self):
# don't call super. overriding order
self.add( self.in_scene, z=0 )
self.add( self.out_scene, z=1 )
def get_action(self,x,y):
return FadeOutTRTiles( grid=(x,y), duration=self.duration )
class FadeBLTransition(FadeTRTransition):
'''Fade the tiles of the outgoing scene from the top-right corner to the bottom-left corner.
'''
def get_action(self,x,y):
return FadeOutBLTiles( grid=(x,y), duration=self.duration )
class FadeUpTransition(FadeTRTransition):
'''Fade the tiles of the outgoing scene from the bottom to the top.
'''
def get_action(self,x,y):
return FadeOutUpTiles( grid=(x,y), duration=self.duration )
class FadeDownTransition(FadeTRTransition):
'''Fade the tiles of the outgoing scene from the top to the bottom.
'''
def get_action(self,x,y):
return FadeOutDownTiles( grid=(x,y), duration=self.duration )
class TurnOffTilesTransition(TransitionScene):
'''Turn off the tiles of the outgoing scene in random order
'''
def __init__( self, *args, **kwargs ):
super(TurnOffTilesTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
aspect = width / float(height)
x,y = int(12*aspect), 12
a = TurnOffTiles( grid=(x,y), duration=self.duration )
# a = Accelerate( a)
self.out_scene.do( a + \
CallFunc(self.finish) + \
StopGrid() )
def start(self):
# don't call super. overriding order
self.add( self.in_scene, z=0 )
self.add( self.out_scene, z=1 )
class FadeTransition(TransitionScene):
'''Fade out the outgoing scene and then fade in the incoming scene.
Optionally supply the color to fade to in-between as an RGB color tuple.
'''
def __init__( self, *args, **kwargs ):
color = kwargs.pop('color', (0, 0, 0)) + (0,)
super(FadeTransition, self ).__init__( *args, **kwargs)
self.fadelayer = ColorLayer(*color)
self.in_scene.visible = False
self.add( self.fadelayer, z=2 )
def on_enter( self ):
super( FadeTransition, self).on_enter()
self.fadelayer.do( FadeIn( duration=self.duration/2.0) + \
CallFunc( self.hide_out_show_in) + \
FadeOut( duration=self.duration /2.0 ) + \
CallFunc( self.finish) )
def on_exit( self ):
super( FadeTransition, self).on_exit()
self.remove( self.fadelayer )
class SplitColsTransition(TransitionScene):
'''Splits the screen in columns.
The odd columns goes upwards while the even columns goes downwards.
'''
def __init__( self, *args, **kwargs ):
super(SplitColsTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
self.in_scene.visible = False
flip_a = self.get_action()
flip = flip_a + \
CallFunc( self.hide_out_show_in ) + \
Reverse(flip_a)
self.do( AccelDeccel(flip) + \
CallFunc(self.finish) + \
StopGrid() )
def get_action( self ):
return SplitCols( cols=3, duration=self.duration/2.0)
class SplitRowsTransition(SplitColsTransition):
'''Splits the screen in rows.
The odd rows goes to the left while the even rows goes to the right.
'''
def get_action( self ):
return SplitRows( rows=3, duration=self.duration/2.0)
class ZoomTransition(TransitionScene):
'''Zoom and FadeOut the outgoing scene.'''
def __init__(self, *args, **kwargs):
if 'src' in kwargs or len(args) == 3:
raise Exception("ZoomTransition does not accept 'src' parameter.")
super(ZoomTransition, self ).__init__( *args, **kwargs)
self.out_scene.visit()
def start(self):
screensprite = self._create_out_screenshot()
zoom = ScaleBy(2, self.duration) | FadeOut(self.duration)
screensprite.do(zoom)
restore = CallFunc(self.finish)
self.in_scene.do(Delay(self.duration * 2) + restore)
self.add(screensprite, z=1)
self.add(self.in_scene, z=0)
def finish(self):
'''Called when the time is over.
It removes both the incoming and the outgoing scenes from the transition scene,
and restores the outgoing scene's attributes like: position, visible and scale.
'''
self.remove( self.in_scene )
self.restore_out()
director.replace( self.in_scene )
def _create_out_screenshot(self):
# TODO: try to use `pyglet.image.get_buffer_manager().get_color_buffer()`
# instead of create a new BufferManager... note that pyglet uses
# a BufferManager singleton that fail when you change the window
# size.
buffer = pyglet.image.BufferManager()
image = buffer.get_color_buffer()
width, height = director.window.width, director.window.height
actual_width, actual_height = director.get_window_size()
out = Sprite(image)
out.position = actual_width / 2, actual_height / 2
out.scale = max(actual_width / float(width), actual_height / float(height))
return out
| adamwiggins/cocos2d | cocos/scenes/transitions.py | Python | bsd-3-clause | 21,816 | [
"VisIt"
] | 3b455ab1a70c6c0d77df79af6aa9f80873fb1acd9b708ac51da0599521d04b2e |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RS4vectors(RPackage):
"""Foundation of vector-like and list-like containers in Bioconductor
The S4Vectors package defines the Vector and List virtual classes and a
set of generic functions that extend the semantic of ordinary vectors
and lists in R. Package developers can easily implement vector-like or
list-like objects as concrete subclasses of Vector or List. In addition,
a few low-level concrete subclasses of general interest (e.g. DataFrame,
Rle, and Hits) are implemented in the S4Vectors package itself (many
more are implemented in the IRanges package and in other Bioconductor
infrastructure packages)."""
homepage = "https://bioconductor.org/packages/S4Vectors"
git = "https://git.bioconductor.org/packages/S4Vectors.git"
version('0.28.1', commit='994cb7ef830e76f8b43169cc72b553869fafb2ed')
version('0.26.1', commit='935769c')
version('0.22.1', commit='d25e517b48ca4184a4c2ee1f8223c148a55a8b8a')
version('0.20.1', commit='1878b2909086941e556c5ea953c6fd86aebe9b02')
version('0.18.3', commit='d6804f94ad3663828440914920ac933b934aeff1')
version('0.16.0', commit='00fec03fcbcb7cff37917fab0da28d91fdf9dc3d')
version('0.14.7', commit='40af17fe0b8e93b6a72fc787540d2961773b8e23')
depends_on('r@3.3.0:', type=('build', 'run'))
depends_on('r@4.0.0:', when='@0.28.1:', type=('build', 'run'))
depends_on('r-biocgenerics@0.21.1:', type=('build', 'run'))
depends_on('r-biocgenerics@0.23.3:', when='@0.16.0:0.22.1', type=('build', 'run'))
depends_on('r-biocgenerics@0.31.1:', when='@0.26.1:', type=('build', 'run'))
depends_on('r-biocgenerics@0.36.0:', when='@0.28.1:', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/r-s4vectors/package.py | Python | lgpl-2.1 | 1,947 | [
"Bioconductor"
] | 49e8cc72e7f3ae978e97a23b77b9a96bbf4e207bee2236b38d5ba7a60cb26a13 |
from __future__ import print_function
import json
import os
import re
import sys
import xml.etree.ElementTree as etree
from Cython.Compiler.Main import compile_single, CompilationOptions
from Cython.Compiler.TreeFragment import parse_from_strings
from Cython.Compiler.Visitor import TreeVisitor
from Cython.Compiler import Nodes
os.chdir(os.path.abspath(os.path.join(__file__, "..", "..")))
class Visitor(TreeVisitor):
def __init__(self, state=None):
super(Visitor, self).__init__()
self.state = dict(state or {})
self.events = []
def record_event(self, node, **kw):
state = self.state.copy()
state.update(**kw)
state["node"] = node
state["pos"] = node.pos
state["end_pos"] = node.end_pos()
self.events.append(state)
def visit_Node(self, node):
self.visitchildren(node)
def visit_ModuleNode(self, node):
self.state["module"] = node.full_module_name
self.visitchildren(node)
self.state.pop("module")
def visit_CDefExternNode(self, node):
self.state["extern_from"] = node.include_file
self.visitchildren(node)
self.state.pop("extern_from")
def visit_CStructOrUnionDefNode(self, node):
self.record_event(node, type="struct", name=node.name)
self.state["struct"] = node.name
self.visitchildren(node)
self.state.pop("struct")
def visit_CFuncDeclaratorNode(self, node):
if isinstance(node.base, Nodes.CNameDeclaratorNode):
self.record_event(node, type="function", name=node.base.name)
else:
self.visitchildren(node)
def visit_CVarDefNode(self, node):
if isinstance(node.declarators[0], Nodes.CNameDeclaratorNode):
# Grab the type name.
# TODO: Do a better job.
type_ = node.base_type
if hasattr(type_, "name"):
type_name = type_.name
elif hasattr(type_, "base_type"):
type_name = type_.base_type.name
else:
type_name = str(type_)
self.record_event(
node, type="variable", name=node.declarators[0].name, vartype=type_name
)
else:
self.visitchildren(node)
def visit_CClassDefNode(self, node):
self.state["class"] = node.class_name
self.visitchildren(node)
self.state.pop("class")
def visit_PropertyNode(self, node):
self.state["property"] = node.name
self.visitchildren(node)
self.state.pop("property")
def visit_DefNode(self, node):
self.state["function"] = node.name
self.visitchildren(node)
self.state.pop("function")
def visit_AttributeNode(self, node):
if getattr(node.obj, "name", None) == "lib":
self.record_event(node, type="use", name=node.attribute)
else:
self.visitchildren(node)
def extract(path, **kwargs):
name = os.path.splitext(os.path.relpath(path))[0].replace("/", ".")
options = CompilationOptions()
options.include_path.append("include")
options.language_level = 2
options.compiler_directives = dict(c_string_type="str", c_string_encoding="ascii")
context = options.create_context()
tree = parse_from_strings(
name,
open(path).read(),
context,
level="module_pxd" if path.endswith(".pxd") else None,
**kwargs
)
extractor = Visitor({"file": path})
extractor.visit(tree)
return extractor.events
def iter_cython(path):
"""Yield all ``.pyx`` and ``.pxd`` files in the given root."""
for dir_path, dir_names, file_names in os.walk(path):
for file_name in file_names:
if file_name.startswith("."):
continue
if os.path.splitext(file_name)[1] not in (".pyx", ".pxd"):
continue
yield os.path.join(dir_path, file_name)
doxygen = {}
doxygen_base = "https://ffmpeg.org/doxygen/trunk"
tagfile_path = "tmp/tagfile.xml"
tagfile_json = tagfile_path + ".json"
if os.path.exists(tagfile_json):
print("Loading pre-parsed Doxygen tagfile:", tagfile_json, file=sys.stderr)
doxygen = json.load(open(tagfile_json))
if not doxygen:
print("Parsing Doxygen tagfile:", tagfile_path, file=sys.stderr)
if not os.path.exists(tagfile_path):
print(" MISSING!", file=sys.stderr)
else:
root = etree.parse(tagfile_path)
def inspect_member(node, name_prefix=""):
name = name_prefix + node.find("name").text
anchorfile = node.find("anchorfile").text
anchor = node.find("anchor").text
url = "%s/%s#%s" % (doxygen_base, anchorfile, anchor)
doxygen[name] = {"url": url}
if node.attrib["kind"] == "function":
ret_type = node.find("type").text
arglist = node.find("arglist").text
sig = "%s %s%s" % (ret_type, name, arglist)
doxygen[name]["sig"] = sig
for struct in root.iter("compound"):
if struct.attrib["kind"] != "struct":
continue
name_prefix = struct.find("name").text + "."
for node in struct.iter("member"):
inspect_member(node, name_prefix)
for node in root.iter("member"):
inspect_member(node)
json.dump(doxygen, open(tagfile_json, "w"), sort_keys=True, indent=4)
print("Parsing Cython source for references...", file=sys.stderr)
lib_references = {}
for path in iter_cython("av"):
try:
events = extract(path)
except Exception as e:
print(" %s in %s" % (e.__class__.__name__, path), file=sys.stderr)
print(" %s" % e, file=sys.stderr)
continue
for event in events:
if event["type"] == "use":
lib_references.setdefault(event["name"], []).append(event)
defs_by_extern = {}
for path in iter_cython("include"):
# This one has "include" directives, which is not supported when
# parsing from a string.
if path == "include/libav.pxd":
continue
# Extract all #: comments from the source files.
comments_by_line = {}
for i, line in enumerate(open(path)):
m = re.match(r"^\s*#: ?", line)
if m:
comment = line[m.end() :].rstrip()
comments_by_line[i + 1] = line[m.end() :]
# Extract Cython definitions from the source files.
for event in extract(path):
extern = event.get("extern_from") or path.replace("include/", "")
defs_by_extern.setdefault(extern, []).append(event)
# Collect comments above and below
comments = event["_comments"] = []
line = event["pos"][1] - 1
while line in comments_by_line:
comments.insert(0, comments_by_line.pop(line))
line -= 1
line = event["end_pos"][1] + 1
while line in comments_by_line:
comments.append(comments_by_line.pop(line))
line += 1
# Figure out the Sphinx headline.
if event["type"] == "function":
event["_sort_key"] = 2
sig = doxygen.get(event["name"], {}).get("sig")
if sig:
sig = re.sub(r"\).+", ")", sig) # strip trailer
event["_headline"] = ".. c:function:: %s" % sig
else:
event["_headline"] = ".. c:function:: %s()" % event["name"]
elif event["type"] == "variable":
struct = event.get("struct")
if struct:
event["_headline"] = ".. c:member:: %s %s" % (
event["vartype"],
event["name"],
)
event["_sort_key"] = 1.1
else:
event["_headline"] = ".. c:var:: %s" % event["name"]
event["_sort_key"] = 3
elif event["type"] == "struct":
event["_headline"] = ".. c:type:: struct %s" % event["name"]
event["_sort_key"] = 1
event["_doxygen_url"] = "%s/struct%s.html" % (doxygen_base, event["name"])
else:
print("Unknown event type %s" % event["type"], file=sys.stderr)
name = event["name"]
if event.get("struct"):
name = "%s.%s" % (event["struct"], name)
# Doxygen URLs
event.setdefault("_doxygen_url", doxygen.get(name, {}).get("url"))
# Find use references.
ref_events = lib_references.get(name, [])
if ref_events:
ref_pairs = []
for ref in sorted(ref_events, key=lambda e: e["name"]):
chunks = [ref.get("module"), ref.get("class")]
chunks = filter(None, chunks)
prefix = ".".join(chunks) + "." if chunks else ""
if ref.get("property"):
ref_pairs.append(
(ref["property"], ":attr:`%s%s`" % (prefix, ref["property"]))
)
elif ref.get("function"):
name = ref["function"]
if name in ("__init__", "__cinit__", "__dealloc__"):
ref_pairs.append(
(
name,
":class:`%s%s <%s>`"
% (prefix, name, prefix.rstrip(".")),
)
)
else:
ref_pairs.append((name, ":func:`%s%s`" % (prefix, name)))
else:
continue
unique_refs = event["_references"] = []
seen = set()
for name, ref in sorted(ref_pairs):
if name in seen:
continue
seen.add(name)
unique_refs.append(ref)
print(
"""
..
This file is generated by includes.py; any modifications will be destroyed!
Wrapped C Types and Functions
=============================
"""
)
for extern, events in sorted(defs_by_extern.iteritems()):
did_header = False
for event in events:
headline = event.get("_headline")
comments = event.get("_comments")
refs = event.get("_references", [])
url = event.get("_doxygen_url")
indent = " " if event.get("struct") else ""
if not headline:
continue
if (
not filter(None, (x.strip() for x in comments if x.strip()))
and not refs
and event["type"] not in ("struct",)
):
pass
if not did_header:
print("``%s``" % extern)
print("-" * (len(extern) + 4))
print()
did_header = True
if url:
print()
print(indent + ".. rst-class:: ffmpeg-quicklink")
print()
print(indent + " `FFmpeg Docs <%s>`_" % url)
print(indent + headline)
print()
if comments:
for line in comments:
print(indent + " " + line)
print()
if refs:
print(indent + " Referenced by: ", end="")
for i, ref in enumerate(refs):
print((", " if i else "") + ref, end="")
print(".")
print()
| pupil-labs/PyAV | docs/includes.py | Python | bsd-3-clause | 11,341 | [
"VisIt"
] | b3a269c13ad75da94acd0693677bff4bdf5182ec7d25583c89dd7201f75717f4 |
""" :mod: TransformationCleaningAgent
=================================
.. module: TransformationCleaningAgent
:synopsis: clean up of finalised transformations
"""
__RCSID__ = "$Id$"
# # imports
import re
import ast
import os.path
from datetime import datetime, timedelta
# # from DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.WorkloadManagementSystem.Client.WMSClient import WMSClient
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
# # agent's name
AGENT_NAME = 'Transformation/TransformationCleaningAgent'
class TransformationCleaningAgent( AgentModule ):
"""
.. class:: TransformationCleaningAgent
:param ~DIRAC.DataManagementSystem.Client.DataManager.DataManager dm: DataManager instance
:param ~TransformationClient.TransformationClient transClient: TransformationClient instance
:param ~FileCatalogClient.FileCatalogClient metadataClient: FileCatalogClient instance
"""
def __init__( self, *args, **kwargs ):
""" c'tor
"""
AgentModule.__init__( self, *args, **kwargs )
# # transformation client
self.transClient = None
# # wms client
self.wmsClient = None
# # request client
self.reqClient = None
# # file catalog client
self.metadataClient = None
# # transformations types
self.transformationTypes = None
# # directory locations
self.directoryLocations = None
# # transformation metadata
self.transfidmeta = None
# # archive periof in days
self.archiveAfter = None
# # active SEs
self.activeStorages = None
# # transformation log SEs
self.logSE = None
# # enable/disable execution
self.enableFlag = None
self.dataProcTTypes = ['MCSimulation', 'Merge']
self.dataManipTTypes = ['Replication', 'Removal']
def initialize( self ):
""" agent initialisation
reading and setting confing opts
:param self: self reference
"""
# # shifter proxy
self.am_setOption( 'shifterProxy', 'DataManager' )
# # transformations types
self.dataProcTTypes = Operations().getValue( 'Transformations/DataProcessing', self.dataProcTTypes )
self.dataManipTTypes = Operations().getValue( 'Transformations/DataManipulation', self.dataManipTTypes )
agentTSTypes = self.am_getOption( 'TransformationTypes', [] )
if agentTSTypes:
self.transformationTypes = sorted( agentTSTypes )
else:
self.transformationTypes = sorted( self.dataProcTTypes + self.dataManipTTypes )
self.log.info( "Will consider the following transformation types: %s" % str( self.transformationTypes ) )
# # directory locations
self.directoryLocations = sorted( self.am_getOption( 'DirectoryLocations', [ 'TransformationDB',
'MetadataCatalog' ] ) )
self.log.info( "Will search for directories in the following locations: %s" % str( self.directoryLocations ) )
# # transformation metadata
self.transfidmeta = self.am_getOption( 'TransfIDMeta', "TransformationID" )
self.log.info( "Will use %s as metadata tag name for TransformationID" % self.transfidmeta )
# # archive periof in days
self.archiveAfter = self.am_getOption( 'ArchiveAfter', 7 ) # days
self.log.info( "Will archive Completed transformations after %d days" % self.archiveAfter )
# # active SEs
self.activeStorages = sorted( self.am_getOption( 'ActiveSEs', [] ) )
self.log.info( "Will check the following storage elements: %s" % str( self.activeStorages ) )
# # transformation log SEs
self.logSE = Operations().getValue( '/LogStorage/LogSE', 'LogSE' )
self.log.info( "Will remove logs found on storage element: %s" % self.logSE )
# # enable/disable execution, should be using CS option Status?? with default value as 'Active'??
self.enableFlag = self.am_getOption( 'EnableFlag', 'True' )
# # transformation client
self.transClient = TransformationClient()
# # wms client
self.wmsClient = WMSClient()
# # request client
self.reqClient = ReqClient()
# # file catalog client
self.metadataClient = FileCatalogClient()
return S_OK()
#############################################################################
def execute( self ):
""" execution in one agent's cycle
:param self: self reference
"""
self.enableFlag = self.am_getOption( 'EnableFlag', 'True' )
if self.enableFlag != 'True':
self.log.info( 'TransformationCleaningAgent is disabled by configuration option EnableFlag' )
return S_OK( 'Disabled via CS flag' )
# # Obtain the transformations in Cleaning status and remove any mention of the jobs/files
res = self.transClient.getTransformations( { 'Status' : 'Cleaning',
'Type' : self.transformationTypes } )
if res['OK']:
for transDict in res['Value']:
# # if transformation is of type `Replication` or `Removal`, there is nothing to clean.
# # We just archive
if transDict[ 'Type' ] in self.dataManipTTypes:
res = self.archiveTransformation( transDict['TransformationID'] )
if not res['OK']:
self.log.error( "Problems archiving transformation %s: %s" % ( transDict['TransformationID'],
res['Message'] ) )
else:
res = self.cleanTransformation( transDict['TransformationID'] )
if not res['OK']:
self.log.error( "Problems cleaning transformation %s: %s" % ( transDict['TransformationID'],
res['Message'] ) )
# # Obtain the transformations in RemovingFiles status and (wait for it) removes the output files
res = self.transClient.getTransformations( { 'Status' : 'RemovingFiles',
'Type' : self.transformationTypes} )
if res['OK']:
for transDict in res['Value']:
res = self.removeTransformationOutput( transDict['TransformationID'] )
if not res['OK']:
self.log.error( "Problems removing transformation %s: %s" % ( transDict['TransformationID'],
res['Message'] ) )
# # Obtain the transformations in Completed status and archive if inactive for X days
olderThanTime = datetime.utcnow() - timedelta( days = self.archiveAfter )
res = self.transClient.getTransformations( { 'Status' : 'Completed',
'Type' : self.transformationTypes },
older = olderThanTime,
timeStamp = 'LastUpdate' )
if res['OK']:
for transDict in res['Value']:
res = self.archiveTransformation( transDict['TransformationID'] )
if not res['OK']:
self.log.error( "Problems archiving transformation %s: %s" % ( transDict['TransformationID'],
res['Message'] ) )
else:
self.log.error( "Could not get the transformations" )
return S_OK()
#############################################################################
#
# Get the transformation directories for checking
#
def getTransformationDirectories( self, transID ):
""" get the directories for the supplied transformation from the transformation system.
These directories are used by removeTransformationOutput and cleanTransformation for removing output.
:param self: self reference
:param int transID: transformation ID
"""
self.log.verbose("Cleaning Transformation directories of transformation %d" %transID)
directories = []
if 'TransformationDB' in self.directoryLocations:
res = self.transClient.getTransformationParameters( transID, ['OutputDirectories'] )
if not res['OK']:
self.log.error( "Failed to obtain transformation directories", res['Message'] )
return res
transDirectories = []
if res['Value']:
if not isinstance( res['Value'], list ):
transDirectories = ast.literal_eval( res['Value'] )
else:
transDirectories = res['Value']
directories = self._addDirs( transID, transDirectories, directories )
if 'MetadataCatalog' in self.directoryLocations:
res = self.metadataClient.findDirectoriesByMetadata( {self.transfidmeta:transID} )
if not res['OK']:
self.log.error( "Failed to obtain metadata catalog directories", res['Message'] )
return res
transDirectories = res['Value']
directories = self._addDirs( transID, transDirectories, directories )
if not directories:
self.log.info( "No output directories found" )
directories = sorted( directories )
return S_OK( directories )
@classmethod
def _addDirs( cls, transID, newDirs, existingDirs ):
""" append unique :newDirs: list to :existingDirs: list
:param self: self reference
:param int transID: transformationID
:param list newDirs: src list of paths
:param list existingDirs: dest list of paths
"""
for folder in newDirs:
transStr = str( transID ).zfill( 8 )
if re.search( transStr, str( folder ) ):
if not folder in existingDirs:
existingDirs.append( os.path.normpath( folder ) )
return existingDirs
#############################################################################
#
# These are the methods for performing the cleaning of catalogs and storage
#
def cleanStorageContents( self, directory ):
""" delete lfn dir from all active SE
:param self: self reference
:param sre directory: folder name
"""
self.log.verbose("Cleaning Storage Contents")
for storageElement in self.activeStorages:
res = self.__removeStorageDirectory( directory, storageElement )
if not res['OK']:
return res
return S_OK()
def __removeStorageDirectory( self, directory, storageElement ):
""" wipe out all contents from :directory: at :storageElement:
:param self: self reference
:param str directory: path
:param str storageElement: SE name
"""
self.log.info( 'Removing the contents of %s at %s' % ( directory, storageElement ) )
se = StorageElement( storageElement )
res = returnSingleResult( se.exists( directory ) )
if not res['OK']:
self.log.error( "Failed to obtain existance of directory", res['Message'] )
return res
exists = res['Value']
if not exists:
self.log.info( "The directory %s does not exist at %s " % ( directory, storageElement ) )
return S_OK()
res = returnSingleResult( se.removeDirectory( directory, recursive = True ) )
if not res['OK']:
self.log.error( "Failed to remove storage directory", res['Message'] )
return res
self.log.info( "Successfully removed %d files from %s at %s" % ( res['Value']['FilesRemoved'],
directory,
storageElement ) )
return S_OK()
def cleanCatalogContents( self, directory ):
""" wipe out everything from catalog under folder :directory:
:param self: self reference
:params str directory: folder name
"""
self.log.verbose("Cleaning Catalog contents")
res = self.__getCatalogDirectoryContents( [directory] )
if not res['OK']:
return res
filesFound = res['Value']
if not filesFound:
self.log.info( "No files are registered in the catalog directory %s" % directory )
return S_OK()
self.log.info( "Attempting to remove %d possible remnants from the catalog and storage" % len( filesFound ) )
# Executing with shifter proxy
gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'false' )
res = DataManager().removeFile( filesFound, force = True )
gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'true' )
if not res['OK']:
return res
realFailure = False
for lfn, reason in res['Value']['Failed'].items():
if "File does not exist" in str( reason ):
self.log.warn( "File %s not found in some catalog: " % ( lfn ) )
else:
self.log.error( "Failed to remove file found in the catalog", "%s %s" % ( lfn, reason ) )
realFailure = True
if realFailure:
return S_ERROR( "Failed to remove all files found in the catalog" )
return S_OK()
def __getCatalogDirectoryContents( self, directories ):
""" get catalog contents under paths :directories:
:param self: self reference
:param list directories: list of paths in catalog
"""
self.log.info( 'Obtaining the catalog contents for %d directories:' % len( directories ) )
for directory in directories:
self.log.info( directory )
activeDirs = directories
allFiles = {}
fc = FileCatalog()
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
res = returnSingleResult( fc.listDirectory( currentDir ) )
activeDirs.remove( currentDir )
if not res['OK'] and res['Message'].endswith( 'The supplied path does not exist' ):
self.log.info( "The supplied directory %s does not exist" % currentDir )
elif not res['OK']:
if "No such file or directory" in res['Message']:
self.log.info( "%s: %s" % ( currentDir, res['Message'] ) )
else:
self.log.error( "Failed to get directory %s content: %s" % ( currentDir, res['Message'] ) )
else:
dirContents = res['Value']
activeDirs.extend( dirContents['SubDirs'] )
allFiles.update( dirContents['Files'] )
self.log.info( "Found %d files" % len( allFiles ) )
return S_OK( allFiles.keys() )
def cleanTransformationLogFiles( self, directory ):
""" clean up transformation logs from directory :directory:
:param self: self reference
:param str directory: folder name
"""
self.log.verbose( "Removing log files found in the directory %s" % directory )
res = returnSingleResult( StorageElement( self.logSE ).removeDirectory( directory ) )
if not res['OK']:
self.log.error( "Failed to remove log files", res['Message'] )
return res
self.log.info( "Successfully removed transformation log directory" )
return S_OK()
#############################################################################
#
# These are the functional methods for archiving and cleaning transformations
#
def removeTransformationOutput( self, transID ):
""" This just removes any mention of the output data from the catalog and storage """
self.log.info( "Removing output data for transformation %s" % transID )
res = self.getTransformationDirectories( transID )
if not res['OK']:
self.log.error( 'Problem obtaining directories for transformation %s with result "%s"' % ( transID, res ) )
return S_OK()
directories = res['Value']
for directory in directories:
if not re.search( '/LOG/', directory ):
res = self.cleanCatalogContents( directory )
if not res['OK']:
return res
res = self.cleanStorageContents( directory )
if not res['OK']:
return res
self.log.info( "Removed directories in the catalog and storage for transformation" )
# Clean ALL the possible remnants found in the metadata catalog
res = self.cleanMetadataCatalogFiles( transID )
if not res['OK']:
return res
self.log.info( "Successfully removed output of transformation %d" % transID )
# Change the status of the transformation to RemovedFiles
res = self.transClient.setTransformationParameter( transID, 'Status', 'RemovedFiles' )
if not res['OK']:
self.log.error( "Failed to update status of transformation %s to RemovedFiles" % ( transID ), res['Message'] )
return res
self.log.info( "Updated status of transformation %s to RemovedFiles" % ( transID ) )
return S_OK()
def archiveTransformation( self, transID ):
""" This just removes job from the jobDB and the transformation DB
:param self: self reference
:param int transID: transformation ID
"""
self.log.info( "Archiving transformation %s" % transID )
# Clean the jobs in the WMS and any failover requests found
res = self.cleanTransformationTasks( transID )
if not res['OK']:
return res
# Clean the transformation DB of the files and job information
res = self.transClient.cleanTransformation( transID )
if not res['OK']:
return res
self.log.info( "Successfully archived transformation %d" % transID )
# Change the status of the transformation to archived
res = self.transClient.setTransformationParameter( transID, 'Status', 'Archived' )
if not res['OK']:
self.log.error( "Failed to update status of transformation %s to Archived" % ( transID ), res['Message'] )
return res
self.log.info( "Updated status of transformation %s to Archived" % ( transID ) )
return S_OK()
def cleanTransformation( self, transID ):
""" This removes what was produced by the supplied transformation,
leaving only some info and log in the transformation DB.
"""
self.log.info( "Cleaning transformation %s" % transID )
res = self.getTransformationDirectories( transID )
if not res['OK']:
self.log.error( 'Problem obtaining directories for transformation %s with result "%s"' % ( transID, res ) )
return S_OK()
directories = res['Value']
# Clean the jobs in the WMS and any failover requests found
res = self.cleanTransformationTasks( transID )
if not res['OK']:
return res
# Clean the log files for the jobs
for directory in directories:
if re.search( '/LOG/', directory ):
res = self.cleanTransformationLogFiles( directory )
if not res['OK']:
return res
res = self.cleanCatalogContents( directory )
if not res['OK']:
return res
res = self.cleanStorageContents( directory )
if not res['OK']:
return res
# Clean ALL the possible remnants found in the BK
res = self.cleanMetadataCatalogFiles( transID )
if not res['OK']:
return res
# Clean the transformation DB of the files and job information
res = self.transClient.cleanTransformation( transID )
if not res['OK']:
return res
self.log.info( "Successfully cleaned transformation %d" % transID )
res = self.transClient.setTransformationParameter( transID, 'Status', 'Cleaned' )
if not res['OK']:
self.log.error( "Failed to update status of transformation %s to Cleaned" % ( transID ), res['Message'] )
return res
self.log.info( "Updated status of transformation %s to Cleaned" % ( transID ) )
return S_OK()
def cleanMetadataCatalogFiles( self, transID ):
""" wipe out files from catalog """
res = self.metadataClient.findFilesByMetadata( { self.transfidmeta : transID } )
if not res['OK']:
return res
fileToRemove = res['Value']
if not fileToRemove:
self.log.info( 'No files found for transID %s' % transID )
return S_OK()
# Executing with shifter proxy
gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'false' )
res = DataManager().removeFile( fileToRemove, force = True )
gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'true' )
if not res['OK']:
return res
for lfn, reason in res['Value']['Failed'].items():
self.log.error( "Failed to remove file found in metadata catalog", "%s %s" % ( lfn, reason ) )
if res['Value']['Failed']:
return S_ERROR( "Failed to remove all files found in the metadata catalog" )
self.log.info( "Successfully removed all files found in the BK" )
return S_OK()
#############################################################################
#
# These are the methods for removing the jobs from the WMS and transformation DB
#
def cleanTransformationTasks( self, transID ):
""" clean tasks from WMS, or from the RMS if it is a DataManipulation transformation
"""
self.log.verbose("Cleaning Transformation tasks of transformation %d" %transID)
res = self.__getTransformationExternalIDs( transID )
if not res['OK']:
return res
externalIDs = res['Value']
if externalIDs:
res = self.transClient.getTransformationParameters( transID, ['Type'] )
if not res['OK']:
self.log.error( "Failed to determine transformation type" )
return res
transType = res['Value']
if transType in self.dataProcTTypes:
res = self.__removeWMSTasks( externalIDs )
else:
res = self.__removeRequests( externalIDs )
if not res['OK']:
return res
return S_OK()
def __getTransformationExternalIDs( self, transID ):
""" collect all ExternalIDs for transformation :transID:
:param self: self reference
:param int transID: transforamtion ID
"""
res = self.transClient.getTransformationTasks( condDict = { 'TransformationID' : transID } )
if not res['OK']:
self.log.error( "Failed to get externalIDs for transformation %d" % transID, res['Message'] )
return res
externalIDs = [ taskDict['ExternalID'] for taskDict in res["Value"] ]
self.log.info( "Found %d tasks for transformation" % len( externalIDs ) )
return S_OK( externalIDs )
def __removeRequests( self, requestIDs ):
""" This will remove requests from the RMS system -
"""
rIDs = [ int( long( j ) ) for j in requestIDs if long( j ) ]
for reqID in rIDs:
self.reqClient.deleteRequest( reqID )
return S_OK()
def __removeWMSTasks( self, transJobIDs ):
""" wipe out jobs and their requests from the system
TODO: should check request status, maybe FTS files as well ???
:param self: self reference
:param list trasnJobIDs: job IDs
"""
# Prevent 0 job IDs
jobIDs = [ int( j ) for j in transJobIDs if int( j ) ]
allRemove = True
for jobList in breakListIntoChunks( jobIDs, 500 ):
res = self.wmsClient.killJob( jobList )
if res['OK']:
self.log.info( "Successfully killed %d jobs from WMS" % len( jobList ) )
elif ( "InvalidJobIDs" in res ) and ( "NonauthorizedJobIDs" not in res ) and ( "FailedJobIDs" not in res ):
self.log.info( "Found %s jobs which did not exist in the WMS" % len( res['InvalidJobIDs'] ) )
elif "NonauthorizedJobIDs" in res:
self.log.error( "Failed to kill %s jobs because not authorized" % len( res['NonauthorizedJobIDs'] ) )
allRemove = False
elif "FailedJobIDs" in res:
self.log.error( "Failed to kill %s jobs" % len( res['FailedJobIDs'] ) )
allRemove = False
res = self.wmsClient.deleteJob( jobList )
if res['OK']:
self.log.info( "Successfully removed %d jobs from WMS" % len( jobList ) )
elif ( "InvalidJobIDs" in res ) and ( "NonauthorizedJobIDs" not in res ) and ( "FailedJobIDs" not in res ):
self.log.info( "Found %s jobs which did not exist in the WMS" % len( res['InvalidJobIDs'] ) )
elif "NonauthorizedJobIDs" in res:
self.log.error( "Failed to remove %s jobs because not authorized" % len( res['NonauthorizedJobIDs'] ) )
allRemove = False
elif "FailedJobIDs" in res:
self.log.error( "Failed to remove %s jobs" % len( res['FailedJobIDs'] ) )
allRemove = False
if not allRemove:
return S_ERROR( "Failed to remove all remnants from WMS" )
self.log.info( "Successfully removed all tasks from the WMS" )
if not jobIDs:
self.log.info( "JobIDs not present, unable to remove asociated requests." )
return S_OK()
failed = 0
failoverRequests = {}
res = self.reqClient.getRequestIDsForJobs( jobIDs )
if not res['OK']:
self.log.error( "Failed to get requestID for jobs.", res['Message'] )
return res
failoverRequests.update( res['Value']['Successful'] )
if not failoverRequests:
return S_OK()
for jobID, requestID in res['Value']['Successful'].items():
# Put this check just in case, tasks must have associated jobs
if jobID == 0 or jobID == '0':
continue
res = self.reqClient.deleteRequest( requestID )
if not res['OK']:
self.log.error( "Failed to remove request from RequestDB", res['Message'] )
failed += 1
else:
self.log.verbose( "Removed request %s associated to job %d." % ( requestID, jobID ) )
if failed:
self.log.info( "Successfully removed %s requests" % ( len( failoverRequests ) - failed ) )
self.log.info( "Failed to remove %s requests" % failed )
return S_ERROR( "Failed to remove all the request from RequestDB" )
self.log.info( "Successfully removed all the associated failover requests" )
return S_OK()
| Andrew-McNab-UK/DIRAC | TransformationSystem/Agent/TransformationCleaningAgent.py | Python | gpl-3.0 | 25,929 | [
"DIRAC"
] | 4b544d2e648d83a6749096e112bde584d23cc02fcdc63eb8c906d578da6cf744 |
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006-2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Calculate properties of nuclei based on data parsed by cclib."""
import logging
import numpy
from .calculationmethod import Method
class Nuclear(Method):
"""A container for methods pertaining to atomic nuclei."""
def __init__(self, data, progress=None, loglevel=logging.INFO, logname="Log"):
super(Nuclear, self).__init__(data, progress, loglevel, logname)
def __str__(self):
"""Return a string representation of the object."""
return "Nuclear"
def __repr__(self):
"""Return a representation of the object."""
return "Nuclear"
def repulsion_energy(self):
"""Return the nuclear repulsion energy."""
nre = 0.0
for i in range(self.data.natom):
ri = self.data.atomcoords[0][i]
zi = self.data.atomnos[i]
for j in range(i+1, self.data.natom):
rj = self.data.atomcoords[0][j]
zj = self.data.atomnos[j]
d = numpy.linalg.norm(ri-rj)
nre += zi*zj/d
return nre
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| Clyde-fare/cclib | src/cclib/method/nuclear.py | Python | lgpl-2.1 | 1,630 | [
"cclib"
] | a86c86f86385d4f8db0105a7a98d9467c11d20a3ef42f04c7d697556595bba35 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
""" Module for translating ONNX operators into Mxnet operatoes"""
# pylint: disable=unused-argument,protected-access
import numpy as np
from . import _translation_utils as translation_utils
from .... import symbol
# Method definitions for the callable objects mapped in the import_helper module
def identity(attrs, inputs, proto_obj):
"""Returns the identity function of the input."""
return 'identity', attrs, inputs
def random_uniform(attrs, inputs, proto_obj):
"""Draw random samples from a uniform distribtuion."""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
"Instructions to install - https://github.com/onnx/onnx")
new_attrs = translation_utils._remove_attributes(attrs, ['seed'])
new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs.get('dtype', 1))]
return 'random_uniform', new_attrs, inputs
def random_normal(attrs, inputs, proto_obj):
"""Draw random samples from a Gaussian distribution."""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
"Instructions to install - https://github.com/onnx/onnx")
new_attr = translation_utils._remove_attributes(attrs, ['seed'])
new_attr = translation_utils._fix_attribute_names(new_attr, {'mean': 'loc'})
new_attr['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attr.get('dtype', 1))]
return 'random_normal', new_attr, inputs
def sample_multinomial(attrs, inputs, proto_obj):
"""Draw random samples from a multinomial distribution."""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
new_attrs = translation_utils._remove_attributes(attrs, ['seed'])
new_attrs = translation_utils._fix_attribute_names(new_attrs, {'sample_size': 'shape'})
new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(attrs.get('dtype', 6))]
return 'sample_multinomial', new_attrs, inputs
def mean(attrs, inputs, proto_obj):
"""Mean of all the input tensors."""
concat_input = [symbol.expand_dims(op_input, axis=0) for op_input in inputs]
concat_sym = symbol.concat(*concat_input, dim=0)
mean_sym = symbol.mean(concat_sym, axis=0)
return mean_sym, attrs, inputs
def logical_and(attrs, inputs, proto_obj):
"""Logical and of two input arrays."""
return 'broadcast_logical_and', attrs, inputs
def logical_or(attrs, inputs, proto_obj):
"""Logical or of two input arrays."""
return 'broadcast_logical_or', attrs, inputs
def logical_xor(attrs, inputs, proto_obj):
"""Logical xor of two input arrays."""
return 'broadcast_logical_xor', attrs, inputs
def logical_not(attrs, inputs, proto_obj):
"""Logical not of two input arrays."""
return 'logical_not', attrs, inputs
def absolute(attrs, inputs, proto_obj):
"""Returns element-wise absolute value of the input."""
return 'abs', attrs, inputs
def negative(attrs, inputs, proto_obj):
"""Negation of every element in a tensor"""
return 'negative', attrs, inputs
def add_n(attrs, inputs, proto_obj):
"""Elementwise sum of arrays"""
return 'add_n', attrs, inputs
# Sorting and Searching
def argmax(attrs, inputs, proto_obj):
"""Returns indices of the maximum values along an axis"""
axis = attrs.get('axis', 0)
keepdims = attrs.get('keepdims', 1)
argmax_op = symbol.argmax(inputs[0], axis=axis, keepdims=keepdims)
# onnx argmax operator always expects int64 as output type
cast_attrs = {'dtype': 'int64'}
return 'cast', cast_attrs, argmax_op
def argmin(attrs, inputs, proto_obj):
"""Returns indices of the minimum values along an axis."""
axis = attrs.get('axis', 0)
keepdims = attrs.get('keepdims', 1)
argmin_op = symbol.argmin(inputs[0], axis=axis, keepdims=keepdims)
# onnx argmax operator always expects int64 as output type
cast_attrs = {'dtype': 'int64'}
return 'cast', cast_attrs, argmin_op
def maximum(attrs, inputs, proto_obj):
"""
Elementwise maximum of arrays.
MXNet maximum compares only two symbols at a time.
ONNX can send more than two to compare.
Breaking into multiple mxnet ops to compare two symbols at a time
"""
if len(inputs) > 1:
mxnet_op = symbol.maximum(inputs[0], inputs[1])
for op_input in inputs[2:]:
mxnet_op = symbol.maximum(mxnet_op, op_input)
else:
mxnet_op = symbol.maximum(inputs[0], inputs[0])
return mxnet_op, attrs, inputs
def minimum(attrs, inputs, proto_obj):
"""Elementwise minimum of arrays."""
# MXNet minimum compares only two symbols at a time.
# ONNX can send more than two to compare.
# Breaking into multiple mxnet ops to compare two symbols at a time
if len(inputs) > 1:
mxnet_op = symbol.minimum(inputs[0], inputs[1])
for op_input in inputs[2:]:
mxnet_op = symbol.minimum(mxnet_op, op_input)
else:
mxnet_op = symbol.minimum(inputs[0], inputs[0])
return mxnet_op, attrs, inputs
def lesser(attrs, inputs, proto_obj):
"""Logical Lesser operator with broadcasting."""
return 'broadcast_lesser', attrs, inputs
def greater(attrs, inputs, proto_obj):
"""Logical Greater operator with broadcasting."""
return 'broadcast_greater', attrs, inputs
def equal(attrs, inputs, proto_obj):
"""Logical Equal operator with broadcasting."""
return 'broadcast_equal', attrs, inputs
#Hyperbolic functions
def tanh(attrs, inputs, proto_obj):
"""Returns the hyperbolic tangent of the input array."""
return 'tanh', attrs, inputs
# Rounding
def ceil(attrs, inputs, proto_obj):
""" Calculate ceil value for input """
return 'ceil', attrs, inputs
def floor(attrs, inputs, proto_obj):
""" Calculate floor value for input """
return 'floor', attrs, inputs
# Joining and spliting
def concat(attrs, inputs, proto_obj):
""" Joins input arrays along a given axis. """
new_attrs = translation_utils._fix_attribute_names(attrs, {'axis': 'dim'})
return 'concat', new_attrs, inputs
# Basic neural network functions
def softsign(attrs, inputs, proto_obj):
"""Computes softsign of x element-wise."""
return 'softsign', attrs, inputs
def sigmoid(attrs, inputs, proto_obj):
"""Computes elementwise sigmoid of the input array"""
return 'sigmoid', attrs, inputs
def hardsigmoid(attrs, inputs, proto_obj):
"""Computes elementwise hard sigmoid of the input array"""
return 'hard_sigmoid', attrs, inputs
def relu(attrs, inputs, proto_obj):
"""Computes rectified linear function."""
return 'relu', attrs, inputs
def pad(attrs, inputs, proto_obj):
""" Add padding to input tensor"""
opset_version = proto_obj.opset_version
if 'mode' not in attrs.keys():
attrs['mode'] = 'constant'
if opset_version >= 11:
pads = list(proto_obj._params[inputs[1].name].asnumpy())
pads = tuple([int(i) for i in pads])
new_attrs = translation_utils._add_extra_attributes(attrs, {'pad_width': pads})
if len(inputs) == 3:
const = proto_obj._params[inputs[2].name].asnumpy()[0]
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'constant_value': const})
new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width'))
return 'pad', new_attrs, inputs[0]
else:
new_attrs = translation_utils._fix_attribute_names(attrs, {'pads' : 'pad_width',
'value' : 'constant_value'
})
new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width'))
return 'pad', new_attrs, inputs
def matrix_multiplication(attrs, inputs, proto_obj):
"""Performs general matrix multiplication"""
return 'linalg_gemm2', attrs, inputs
def batch_norm(attrs, inputs, proto_obj):
"""Batch normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon': 'eps',
'is_test': 'fix_gamma'})
new_attrs = translation_utils._remove_attributes(new_attrs,
['spatial', 'consumed_inputs'])
# Disable cuDNN BN only if epsilon from model is < than minimum cuDNN eps (1e-5)
cudnn_min_eps = 1e-5
cudnn_off = 0 if attrs.get('epsilon', cudnn_min_eps) >= cudnn_min_eps else 1
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'cudnn_off': cudnn_off})
# in test mode "fix_gamma" should be unset.
new_attrs['fix_gamma'] = not attrs.get('is_test', 1)
return 'BatchNorm', new_attrs, inputs
def instance_norm(attrs, inputs, proto_obj):
"""Instance Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon' : 'eps'})
new_attrs['eps'] = attrs.get('epsilon', 1e-5)
return 'InstanceNorm', new_attrs, inputs
def leaky_relu(attrs, inputs, proto_obj):
"""Leaky Relu function"""
if 'alpha' in attrs:
new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'})
else:
new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 0.01})
return 'LeakyReLU', new_attrs, inputs
def _elu(attrs, inputs, proto_obj):
"""Elu function"""
if 'alpha' in attrs:
new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'})
else:
new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 1.0})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'act_type': 'elu'})
return 'LeakyReLU', new_attrs, inputs
def _prelu(attrs, inputs, proto_obj):
"""PRelu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'prelu'})
return 'LeakyReLU', new_attrs, inputs
def _selu(attrs, inputs, proto_obj):
"""Selu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'selu'})
return 'LeakyReLU', new_attrs, inputs
def softmax(attrs, inputs, proto_obj):
"""Softmax function."""
if 'axis' not in attrs:
attrs = translation_utils._add_extra_attributes(attrs, {'axis': 1})
return 'softmax', attrs, inputs
def log_softmax(attrs, inputs, proto_obj):
"""Computes the log softmax of the input. This is equivalent to
computing softmax followed by log."""
return 'log_softmax', attrs, inputs
def softplus(attrs, inputs, proto_obj):
"""Applies the sofplus activation function element-wise to the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type' : 'softrelu'})
return 'Activation', new_attrs, inputs
def conv(attrs, inputs, proto_obj):
"""Compute N-D convolution on (N+2)-D input."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
'strides' : 'stride',
'pads': 'pad',
'dilations': 'dilate',
'group': 'num_group'})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1})
new_attrs = translation_utils._fix_bias('Convolution', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('Convolution', new_attrs, inputs, proto_obj)
kernel = new_attrs['kernel']
stride = new_attrs['stride'] if 'stride' in new_attrs else []
padding = new_attrs['pad'] if 'pad' in new_attrs else []
dilations = new_attrs['dilate'] if 'dilate' in new_attrs else []
num_filter = new_attrs['num_filter']
num_group = new_attrs['num_group']
no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else 0
bias = None if no_bias is True else inputs[2]
mxnet_pad = translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel))
left_pads = mxnet_pad[0::2]
right_pads = mxnet_pad[1::2]
is_pad_sym = left_pads == right_pads
if not is_pad_sym:
# Unlike ONNX, MXNet's convolution operator does not support asymmetric padding, so we first
# use 'Pad' operator, which supports asymmetric padding. Then use the convolution operator.
pad_width = (0, 0, 0, 0) + mxnet_pad
pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width)
conv_op = symbol.Convolution(pad_op, inputs[1], bias,
kernel=kernel, stride=stride, dilate=dilations,
num_filter=num_filter, num_group=num_group, no_bias=no_bias)
else:
pad_width = left_pads
conv_op = symbol.Convolution(inputs[0], inputs[1], bias,
kernel=kernel, stride=stride, dilate=dilations, pad=pad_width,
num_filter=num_filter, num_group=num_group, no_bias=no_bias)
return conv_op, new_attrs, inputs
def deconv(attrs, inputs, proto_obj):
"""Computes transposed convolution of the input tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
'strides' : 'stride',
'pads': 'pad',
'dilations': 'dilate',
'group': 'num_group'})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1})
new_attrs = translation_utils._fix_bias('Deconvolution', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('Deconvolution', new_attrs, inputs, proto_obj)
kernel = new_attrs['kernel'] if 'kernel' in new_attrs else []
stride = new_attrs['stride'] if 'stride' in new_attrs else []
padding = new_attrs['pad'] if 'pad' in new_attrs else []
dilations = new_attrs['dilate'] if 'dilate' in new_attrs else []
num_filter = new_attrs['num_filter']
num_group = new_attrs['num_group']
no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else False
bias = None if no_bias is True else inputs[2]
# Unlike ONNX, MXNet's deconvolution operator does not support asymmetric padding, so we first
# use 'Pad' operator, which supports asymmetric padding. Then use the deconvolution operator.
pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel))
pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width)
deconv_op = symbol.Deconvolution(pad_op, inputs[1], bias,
kernel=kernel, stride=stride, dilate=dilations,
num_filter=num_filter, num_group=num_group, no_bias=no_bias)
return deconv_op, new_attrs, inputs
def fully_connected(attrs, inputs, proto_obj):
"""Applies a linear transformation: Y=XWT+b."""
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
new_attrs = translation_utils._fix_bias('FullyConnected', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('FullyConnected', new_attrs, inputs, proto_obj)
return 'FullyConnected', new_attrs, inputs
def global_maxpooling(attrs, inputs, proto_obj):
"""Performs max pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'max'})
return 'Pooling', new_attrs, inputs
def global_avgpooling(attrs, inputs, proto_obj):
"""Performs avg pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'avg'})
return 'Pooling', new_attrs, inputs
def global_lppooling(attrs, inputs, proto_obj):
"""Performs global lp pooling on the input."""
p_value = attrs.get('p', 2)
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'lp',
'p_value': p_value})
new_attrs = translation_utils._remove_attributes(new_attrs, ['p'])
return 'Pooling', new_attrs, inputs
def linalg_gemm(attrs, inputs, proto_obj):
"""Performs general matrix multiplication and accumulation"""
trans_a = 0
trans_b = 0
alpha = 1
beta = 1
if 'transA' in attrs:
trans_a = attrs['transA']
if 'transB' in attrs:
trans_b = attrs['transB']
if 'alpha' in attrs:
alpha = attrs['alpha']
if 'beta' in attrs:
beta = attrs['beta']
flatten_a = symbol.flatten(inputs[0])
matmul_op = symbol.linalg_gemm2(A=flatten_a, B=inputs[1],
transpose_a=trans_a, transpose_b=trans_b,
alpha=alpha)
gemm_op = symbol.broadcast_add(matmul_op, beta*inputs[2])
new_attrs = translation_utils._fix_attribute_names(attrs, {'transA': 'transpose_a',
'transB': 'transpose_b'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
return gemm_op, new_attrs, inputs
def local_response_norm(attrs, inputs, proto_obj):
"""Local Response Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'bias': 'knorm',
'size' : 'nsize'})
return 'LRN', new_attrs, inputs
def dropout(attrs, inputs, proto_obj):
"""Dropout Regularization."""
mode = 'training'
opset_version = proto_obj.opset_version
if 'is_test' in attrs and attrs['is_test'] == 0:
mode = 'always'
new_attrs = translation_utils._remove_attributes(attrs, ['is_test'])
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'mode': mode})
if opset_version >= 12:
new_attrs = translation_utils._remove_attributes(new_attrs, ['seed'])
if len(inputs) == 2:
ratio_float = proto_obj._params[inputs[1].name].asnumpy()[0]
new_attrs = translation_utils._remove_attributes(new_attrs, ['p'])
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'p': ratio_float})
elif len(inputs) == 1:
new_attrs = translation_utils._fix_attribute_names(new_attrs, {'ratio': 'p'})
return 'Dropout', new_attrs, inputs[0]
else:
new_attrs = translation_utils._fix_attribute_names(new_attrs, {'ratio': 'p'})
return 'Dropout', new_attrs, inputs
# Changing shape and type.
def reshape(attrs, inputs, proto_obj):
"""Reshape the given array by the shape attribute."""
if len(inputs) == 1:
return 'reshape', attrs, inputs[0]
reshape_shape = list(proto_obj._params[inputs[1].name].asnumpy())
reshape_shape = [int(i) for i in reshape_shape]
new_attrs = {'shape': reshape_shape}
return 'reshape', new_attrs, inputs[:1]
def cast(attrs, inputs, proto_obj):
""" Cast input to a given dtype"""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'})
new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs['dtype'])]
return 'cast', new_attrs, inputs
def split(attrs, inputs, proto_obj):
"""Splits an array along a particular axis into multiple sub-arrays."""
split_list = attrs.get('split') if 'split' in attrs else []
new_attrs = translation_utils._fix_attribute_names(attrs,
{'split' : 'num_outputs'})
if 'axis' not in attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'axis': 0})
if not split_list:
num_outputs = len(proto_obj.model_metadata.get('output_tensor_data'))
else:
if len(set(split_list)) == 1:
num_outputs = len(split_list)
else:
raise NotImplementedError("Operator {} in MXNet does not support variable splits."
"Tracking the issue to support variable split here: "
"https://github.com/apache/incubator-mxnet/issues/11594"
.format('split'))
new_attrs['num_outputs'] = num_outputs
return 'split', new_attrs, inputs
def _slice(attrs, inputs, proto_obj):
"""Returns a slice of the input tensor along multiple axes."""
input_tensor_data = proto_obj.model_metadata.get('input_tensor_data')[0]
input_shape = input_tensor_data[1]
if proto_obj.opset_version >= 10:
begin = proto_obj._params[inputs[1].name].asnumpy()
end = proto_obj._params[inputs[2].name].asnumpy()
if len(inputs) >= 4:
axes = list(proto_obj._params[inputs[3].name].asnumpy())
axes = tuple([int(i) for i in axes])
else:
axes = tuple(range(len(begin)))
new_attrs = translation_utils._add_extra_attributes(attrs, {'axes' : axes,
'begin' : begin,
'end' : end
})
else:
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis',
'ends' : 'end',
'starts' : 'begin'})
# onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator
# for multiple axes from mxnet
begin = new_attrs.get('begin')
end = list(new_attrs.get('end'))
axes = new_attrs.get('axis', tuple(range(len(begin))))
for i, axis in enumerate(axes):
end[i] = None if end[i] >= input_shape[axis] else end[i]
slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])
if len(axes) > 1:
for i, axis in enumerate(axes):
slice_op = symbol.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])
return slice_op, new_attrs, inputs
def transpose(attrs, inputs, proto_obj):
"""Transpose the input array."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'perm' : 'axes'})
return 'transpose', new_attrs, inputs
def squeeze(attrs, inputs, proto_obj):
"""Remove single-dimensional entries from the shape of a tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis'})
return 'squeeze', new_attrs, inputs
def unsqueeze(attrs, inputs, cls):
"""Inserts a new axis of size 1 into the array shape"""
# MXNet can only add one axis at a time.
mxnet_op = inputs[0]
for axis in attrs["axes"]:
mxnet_op = symbol.expand_dims(mxnet_op, axis=axis)
return mxnet_op, attrs, inputs
def flatten(attrs, inputs, proto_obj):
"""Flattens the input array into a 2-D array by collapsing the higher dimensions."""
#Mxnet does not have axis support. By default uses axis=1
if 'axis' in attrs and attrs['axis'] != 1:
raise RuntimeError("Flatten operator only supports axis=1")
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
return 'Flatten', new_attrs, inputs
def clip(attrs, inputs, proto_obj):
"""Clips (limits) the values in an array."""
opset_version = proto_obj.opset_version
if opset_version >= 11:
if len(inputs) == 1:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_max' : np.inf,
'a_min' : -np.inf})
elif len(inputs) == 2:
min_float = proto_obj._params[inputs[1].name].asnumpy()
new_attrs = translation_utils._add_extra_attributes(attrs, {'a_min': min_float[0],
'a_max': np.inf})
elif len(inputs) == 3:
min_float = proto_obj._params[inputs[1].name].asnumpy()
max_float = proto_obj._params[inputs[2].name].asnumpy()
new_attrs = translation_utils._add_extra_attributes(attrs, {'a_min': min_float[0],
'a_max': max_float[0]})
else:
new_attrs = translation_utils._fix_attribute_names(attrs, {'min' : 'a_min',
'max' : 'a_max'})
if 'a_max' not in new_attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_max' : np.inf})
if 'a_min' not in new_attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_min' : -np.inf})
return 'clip', new_attrs, inputs[0]
def gather(attrs, inputs, proto_obj):
"""Gather elements from an input array along the given axis."""
return 'take', attrs, inputs
#Powers
def reciprocal(attrs, inputs, proto_obj):
"""Returns the reciprocal of the argument, element-wise."""
return 'reciprocal', attrs, inputs
def squareroot(attrs, inputs, proto_obj):
"""Returns element-wise square-root value of the input."""
return 'sqrt', attrs, inputs
def power(attrs, inputs, proto_obj):
"""Returns element-wise result of base element raised to powers from exp element."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'exponent':'exp'})
if 'broadcast' in attrs:
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
if attrs['broadcast'] == 1:
return 'broadcast_power', new_attrs, inputs
else:
mxnet_op = symbol.pow(inputs[0], inputs[1])
return mxnet_op, new_attrs, inputs
mxnet_op = symbol.broadcast_power(inputs[0], inputs[1])
return mxnet_op, new_attrs, inputs
def exponent(attrs, inputs, proto_obj):
"""Elementwise exponent of input array."""
return 'exp', attrs, inputs
def _cos(attrs, inputs, proto_obj):
"""Elementwise cosine of input array."""
return 'cos', attrs, inputs
def _sin(attrs, inputs, proto_obj):
"""Elementwise sine of input array."""
return 'sin', attrs, inputs
def _tan(attrs, inputs, proto_obj):
"""Elementwise tan of input array."""
return 'tan', attrs, inputs
def arccos(attrs, inputs, proto_obj):
"""Elementwise inverse cos of input array."""
return 'arccos', attrs, inputs
def arcsin(attrs, inputs, proto_obj):
"""Elementwise inverse sin of input array."""
return 'arcsin', attrs, inputs
def arctan(attrs, inputs, proto_obj):
"""Elementwise inverse tan of input array."""
return 'arctan', attrs, inputs
def _log(attrs, inputs, proto_obj):
"""Elementwise log of input array."""
return 'log', attrs, inputs
# Reduce Functions
def reduce_max(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by maximum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'max', new_attrs, inputs
def reduce_mean(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by mean value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'mean', new_attrs, inputs
def reduce_min(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by minimum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'min', new_attrs, inputs
def reduce_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'sum', new_attrs, inputs
def reduce_prod(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by product value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'prod', new_attrs, inputs
def reduce_log_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
sum_op = symbol.sum(inputs[0], axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs
def reduce_log_sum_exp(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum exp value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
exp_op = symbol.exp(inputs[0])
sum_op = symbol.sum(exp_op, axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs
def reduce_sum_square(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum square value"""
square_op = symbol.square(inputs[0])
sum_op = symbol.sum(square_op, axis=attrs.get('axes'),
keepdims=attrs.get('keepdims'))
return sum_op, attrs, inputs
def reduce_l1(attrs, inputs, proto_obj):
"""Reduce input tensor by l1 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'ord' : 1})
return 'norm', new_attrs, inputs
def shape(attrs, inputs, proto_obj):
"""Returns shape of input array."""
return 'shape_array', attrs, inputs
def size(attrs, inputs, proto_obj):
"""Returns array containing size of data."""
return "size_array", attrs, inputs
def reduce_l2(attrs, inputs, proto_obj):
"""Reduce input tensor by l2 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'norm', new_attrs, inputs
def avg_pooling(attrs, inputs, proto_obj):
""" Average pooling"""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad',
})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pooling_convention': 'valid'
})
new_op = translation_utils._fix_pooling('avg', inputs, new_attrs)
return new_op, new_attrs, inputs
def lp_pooling(attrs, inputs, proto_obj):
"""LP Pooling"""
p_value = attrs.get('p', 2)
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad'
})
new_attrs = translation_utils._remove_attributes(new_attrs, ['p'])
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pooling_convention': 'valid',
'p_value': p_value
})
new_op = translation_utils._fix_pooling('lp', inputs, new_attrs)
return new_op, new_attrs, inputs
def max_pooling(attrs, inputs, proto_obj):
""" Average pooling"""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad',
})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pooling_convention': 'valid'
})
new_op = translation_utils._fix_pooling('max', inputs, new_attrs)
return new_op, new_attrs, inputs
def max_roi_pooling(attrs, inputs, proto_obj):
"""Max ROI Pooling."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'pooled_shape': 'pooled_size',
'spatial_scale': 'spatial_scale'
})
return 'ROIPooling', new_attrs, inputs
def depthtospace(attrs, inputs, proto_obj):
"""Rearranges data from depth into blocks of spatial data."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'})
return "depth_to_space", new_attrs, inputs
def spacetodepth(attrs, inputs, proto_obj):
"""Rearranges blocks of spatial data into depth."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'})
return "space_to_depth", new_attrs, inputs
def hardmax(attrs, inputs, proto_obj):
"""Returns batched one-hot vectors."""
input_tensor_data = proto_obj.model_metadata.get('input_tensor_data')[0]
input_shape = input_tensor_data[1]
axis = int(attrs.get('axis', 1))
axis = axis if axis >= 0 else len(input_shape) + axis
if axis == len(input_shape) - 1:
amax = symbol.argmax(inputs[0], axis=-1)
one_hot = symbol.one_hot(amax, depth=input_shape[-1])
return one_hot, attrs, inputs
# since reshape doesn't take a tensor for shape,
# computing with np.prod. This needs to be changed to
# to use mx.sym.prod() when mx.sym.reshape() is fixed.
# (https://github.com/apache/incubator-mxnet/issues/10789)
new_shape = (int(np.prod(input_shape[:axis])),
int(np.prod(input_shape[axis:])))
reshape_op = symbol.reshape(inputs[0], new_shape)
amax = symbol.argmax(reshape_op, axis=-1)
one_hot = symbol.one_hot(amax, depth=new_shape[-1])
hardmax_op = symbol.reshape(one_hot, input_shape)
return hardmax_op, attrs, inputs
def lpnormalization(attrs, inputs, proto_obj):
"""ONNX does not have eps attribute, so cannot map it to L2normalization in MXNet
without that, it works as norm operator discussion in PR:
https://github.com/onnx/onnx/pull/1330"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'p': 'ord'})
axis = int(attrs.get("axis", -1))
new_attrs.update(axis=axis)
return 'norm', new_attrs, inputs
def topk(attrs, inputs, proto_obj):
"""Returns the top k elements in an input array along the given axis."""
new_attrs = translation_utils._add_extra_attributes(attrs,
{'ret_typ': 'both',
'dtype': 'int64'})
opset_version = proto_obj.opset_version
if opset_version >= 10:
k_vals = proto_obj._params[inputs[1].name].asnumpy()
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'k': k_vals})
return 'topk', new_attrs, inputs[0]
else:
return 'topk', new_attrs, inputs
| leezu/mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | Python | apache-2.0 | 37,203 | [
"Gaussian"
] | 653df359680aef6e22e9505410c6ebde50f260ac1ce64da20e96a5ae684261fb |
# -*- coding: utf-8 -*-
#
# kmos documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 19 14:58:17 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig']
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig']
# document all members of a autodocced class
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'kmos'
copyright = u'2009-2013, Max J. Hoffmann'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from kmos import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'img/kmos_logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {'**':['globaltoc.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'kmosdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'kmos.tex', u'kmos Documentation',
u'Max J. Hoffmann', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
todo_include_todos = True
# Mock some modules
import sys
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
MOCK_MODULES = ['lxml',
'ase',
'ase.atoms',
'ase.data',
'ase.gui',
'ase.gui.images',
'goocanvas',
'kiwi',
'kiwi.ui',
'kiwi.ui.delegates',
'kiwi.ui.dialogs',
'kiwi.ui.objectlist',
'kiwi.ui.views',
'kiwi.datatypes',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
| mieand/kmos | doc/source/conf.py | Python | gpl-3.0 | 7,963 | [
"ASE"
] | a59c97463152f23066388189dce8d066781c0c81088fc8094c9bf00d01b12f31 |
#!/usr/bin/python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Alchemical factory for free energy calculations that operates directly on OpenMM swig System objects.
DESCRIPTION
This module contains enumerative factories for generating alchemically-modified System objects
usable for the calculation of free energy differences of hydration or ligand binding.
The code in this module operates directly on OpenMM Swig-wrapped System objects for efficiency.
EXAMPLES
COPYRIGHT
@author John D. Chodera <jchodera@gmail.com>
All code in this repository is released under the GNU General Public License.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
TODO
* Can we store serialized form of Force objects so that we can save time in reconstituting
Force objects when we make copies? We can even manipulate the XML representation directly.
* Allow protocols to automatically be resized to arbitrary number of states, to
allow number of states to be enlarged to be an integral multiple of number of GPUs.
* Add GBVI support to AlchemicalFactory.
* Add analytical dispersion correction to softcore Lennard-Jones, or find some other
way to deal with it (such as simply omitting it from lambda < 1 states).
* Deep copy Force objects that don't need to be modified instead of using explicit
handling routines to copy data. Eventually replace with removeForce once implemented?
* Can alchemically-modified System objects share unmodified Force objects to avoid overhead
of duplicating Forces that are not modified?
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import os
import numpy
import copy
import time
import simtk.openmm as openmm
from sets import Set
from alchemy import AlchemicalState, AbsoluteAlchemicalFactory
#=============================================================================================
# MAIN AND UNIT TESTS
#=============================================================================================
def testAlchemicalFactory(reference_system, coordinates, receptor_atoms, ligand_atoms, platform_name='Reference', annihilateElectrostatics=True, annihilateLennardJones=False):
"""
Compare energies of reference system and fully-interacting alchemically modified system.
ARGUMENTS
reference_system (simtk.openmm.System) - the reference System object to compare with
coordinates - the coordinates to assess energetics for
receptor_atoms (list of int) - the list of receptor atoms
ligand_atoms (list of int) - the list of ligand atoms to alchemically modify
"""
import simtk.unit as units
import simtk.openmm as openmm
import time
# Create a factory to produce alchemical intermediates.
print "Creating alchemical factory..."
initial_time = time.time()
factory = AbsoluteAlchemicalFactory(reference_system, ligand_atoms=ligand_atoms)
final_time = time.time()
elapsed_time = final_time - initial_time
print "AbsoluteAlchemicalFactory initialization took %.3f s" % elapsed_time
# Create an alchemically-perturbed state corresponding to nearly fully-interacting.
# NOTE: We use a lambda slightly smaller than 1.0 because the AlchemicalFactory does not use Custom*Force softcore versions if lambda = 1.0 identically.
lambda_value = 1.0 - 1.0e-6
alchemical_state = AlchemicalState(0.00, lambda_value, lambda_value, lambda_value)
alchemical_state.annihilateElectrostatics = annihilateElectrostatics
alchemical_state.annihilateLennardJones = annihilateLennardJones
#platform_name = 'Reference' # DEBUG
platform = openmm.Platform.getPlatformByName(platform_name)
# Create the perturbed system.
print "Creating alchemically-modified state..."
initial_time = time.time()
alchemical_system = factory.createPerturbedSystem(alchemical_state)
final_time = time.time()
elapsed_time = final_time - initial_time
# Compare energies.
timestep = 1.0 * units.femtosecond
print "Computing reference energies..."
reference_integrator = openmm.VerletIntegrator(timestep)
reference_context = openmm.Context(reference_system, reference_integrator, platform)
reference_context.setPositions(coordinates)
reference_state = reference_context.getState(getEnergy=True)
reference_potential = reference_state.getPotentialEnergy()
print "Computing alchemical energies..."
alchemical_integrator = openmm.VerletIntegrator(timestep)
alchemical_context = openmm.Context(alchemical_system, alchemical_integrator, platform)
alchemical_context.setPositions(coordinates)
alchemical_state = alchemical_context.getState(getEnergy=True)
alchemical_potential = alchemical_state.getPotentialEnergy()
delta = alchemical_potential - reference_potential
print "reference system : %24.8f kcal/mol" % (reference_potential / units.kilocalories_per_mole)
print "alchemically modified : %24.8f kcal/mol" % (alchemical_potential / units.kilocalories_per_mole)
print "ERROR : %24.8f kcal/mol" % ((alchemical_potential - reference_potential) / units.kilocalories_per_mole)
print "elapsed alchemical time %.3f s" % elapsed_time
return delta
def test_overlap():
"""
BUGS TO REPORT:
* Even if epsilon = 0, energy of two overlapping atoms is 'nan'.
* Periodicity in 'nan' if dr = 0.1 even in nonperiodic system
"""
# Create a reference system.
import testsystems
print "Creating Lennard-Jones cluster system..."
#[reference_system, coordinates] = testsystems.LennardJonesFluid()
#receptor_atoms = [0]
#ligand_atoms = [1]
[reference_system, coordinates] = testsystems.LysozymeImplicit()
receptor_atoms = range(0,2603) # T4 lysozyme L99A
ligand_atoms = range(2603,2621) # p-xylene
import simtk.unit as units
unit = coordinates.unit
coordinates = units.Quantity(numpy.array(coordinates / unit), unit)
factory = AbsoluteAlchemicalFactory(reference_system, ligand_atoms=ligand_atoms)
alchemical_state = AlchemicalState(0.00, 0.00, 0.00, 1.0)
# Create the perturbed system.
print "Creating alchemically-modified state..."
alchemical_system = factory.createPerturbedSystem(alchemical_state)
# Compare energies.
import simtk.unit as units
import simtk.openmm as openmm
timestep = 1.0 * units.femtosecond
print "Computing reference energies..."
integrator = openmm.VerletIntegrator(timestep)
context = openmm.Context(reference_system, integrator)
context.setPositions(coordinates)
state = context.getState(getEnergy=True)
reference_potential = state.getPotentialEnergy()
del state, context, integrator
print reference_potential
print "Computing alchemical energies..."
integrator = openmm.VerletIntegrator(timestep)
context = openmm.Context(alchemical_system, integrator)
dr = 0.1 * units.angstroms # TODO: Why does 0.1 cause periodic 'nan's?
a = receptor_atoms[-1]
b = ligand_atoms[-1]
delta = coordinates[a,:] - coordinates[b,:]
for k in range(3):
coordinates[ligand_atoms,k] += delta[k]
for i in range(30):
r = dr * i
coordinates[ligand_atoms,0] += dr
context.setPositions(coordinates)
state = context.getState(getEnergy=True)
alchemical_potential = state.getPotentialEnergy()
print "%8.3f A : %f " % (r / units.angstroms, alchemical_potential / units.kilocalories_per_mole)
del state, context, integrator
return
def test_intermediates():
# Run tests on individual systems.
import testsystems
print "Creating Lennard-Jones fluid system without dispersion correction..."
[reference_system, coordinates] = testsystems.LennardJonesFluid(dispersion_correction=False)
ligand_atoms = range(0,1) # first atom
receptor_atoms = range(2,3) # second atom
testAlchemicalFactory(reference_system, coordinates, receptor_atoms, ligand_atoms)
print ""
print "Creating Lennard-Jones fluid system with dispersion correction..."
[reference_system, coordinates] = testsystems.LennardJonesFluid(dispersion_correction=True)
ligand_atoms = range(0,1) # first atom
receptor_atoms = range(2,3) # second atom
testAlchemicalFactory(reference_system, coordinates, receptor_atoms, ligand_atoms)
print ""
print "Creating T4 lysozyme system..."
[reference_system, coordinates] = testsystems.LysozymeImplicit()
receptor_atoms = range(0,2603) # T4 lysozyme L99A
ligand_atoms = range(2603,2621) # p-xylene
testAlchemicalFactory(reference_system, coordinates, receptor_atoms, ligand_atoms)
print ""
print "Creating Lennard-Jones cluster..."
[reference_system, coordinates] = testsystems.LennardJonesCluster()
ligand_atoms = range(0,1) # first atom
receptor_atoms = range(1,2) # second atom
testAlchemicalFactory(reference_system, coordinates, receptor_atoms, ligand_atoms)
print ""
print "Creating alanine dipeptide implicit system..."
[reference_system, coordinates] = testsystems.AlanineDipeptideImplicit()
ligand_atoms = range(0,4) # methyl group
receptor_atoms = range(4,22) # rest of system
testAlchemicalFactory(reference_system, coordinates, receptor_atoms, ligand_atoms)
print ""
print "Creating alanine dipeptide explicit system..."
[reference_system, coordinates] = testsystems.AlanineDipeptideExplicit()
ligand_atoms = range(0,22) # alanine residue
receptor_atoms = range(22,25) # one water
testAlchemicalFactory(reference_system, coordinates, receptor_atoms, ligand_atoms)
print ""
print "Creating alanine dipeptide explicit system without dispersion correction..."
#forces = { reference_system.getForce(index).__class__.__name__ : reference_system.getForce(index)) for index in range(reference_system.getNumForces()) } # requires Python 2.7 features
forces = dict( (reference_system.getForce(index).__class__.__name__, reference_system.getForce(index)) for index in range(reference_system.getNumForces()) ) # python 2.6 compatible
forces['NonbondedForce'].setUseDispersionCorrection(False) # turn off dispersion correction
ligand_atoms = range(0,22) # alanine residue
receptor_atoms = range(22,25) # one water
testAlchemicalFactory(reference_system, coordinates, receptor_atoms, ligand_atoms)
print ""
#=============================================================================================
# MAIN
#=============================================================================================
if __name__ == "__main__":
# Run overlap tests.
#test_overlap()
# Test energy accuracy of intermediates near lambda = 1.
test_intermediates()
| choderalab/brokenyank | src/yank/alchemy-tests.py | Python | lgpl-3.0 | 11,648 | [
"OpenMM"
] | 678855a2441c15ff2b4ee6bbc786d193ca11f9de8206330d1513850e701cff86 |
# proxy module
from __future__ import absolute_import
from mayavi.sources.poly_data_reader import *
| enthought/etsproxy | enthought/mayavi/sources/poly_data_reader.py | Python | bsd-3-clause | 100 | [
"Mayavi"
] | 8e1192105594fce0574f6204b1b3ff5ab95f988c38bc159983111192eeaf02da |
# TODO: to be removed in a future release, ATLAS pipeline was purely developmental
# import os.path as op
# import pytest
# import ssbio.io
# import shutil
# import pandas as pd
# from ssbio.pipeline.atlas2 import ATLAS2
#
#
# @pytest.fixture(scope='class')
# def reference_gempro(test_files_gempro):
# gp = ssbio.io.load_json(op.join(test_files_gempro, 'test_id_dir_json', 'model', 'mini_gp.json'))
# gp.root_dir = test_files_gempro
# return gp
#
#
# @pytest.fixture(scope='class')
# def test_strains():
# return ['585395.4', '1169323.3', '1068619.3', '1005537.3', '868163.3']
#
#
# @pytest.fixture(scope='class')
# def strains_to_fasta(test_strains, test_files_atlas):
# d = {}
# for s in test_strains:
# d[s] = op.join(test_files_atlas, '{}.PATRIC.faa'.format(s))
# return d
#
#
# @pytest.fixture(scope='class')
# def premade_orthology(test_files_atlas):
# """Custom altered orthology matrix"""
# return op.join(test_files_atlas, 'mini_orth_matrix.csv')
#
#
# @pytest.fixture(scope='class')
# def my_atlas(reference_gempro, test_files_tempdir):
# return ATLAS2(atlas_name='test_atlas', root_dir=test_files_tempdir,
# reference_gempro=reference_gempro, reference_genome_path=reference_gempro.genome_path)
#
#
# class TestATLAS():
# @pytest.mark.run(order=1)
# def test_init(self, my_atlas):
# assert op.exists(my_atlas.base_dir)
# assert op.exists(my_atlas.model_dir)
# assert op.exists(my_atlas.data_dir)
# assert op.exists(my_atlas.sequences_dir)
# assert op.exists(my_atlas.sequences_by_gene_dir)
# assert op.exists(my_atlas.sequences_by_organism_dir)
#
# @pytest.mark.run(order=2)
# def test_load_strains(self, my_atlas, strains_to_fasta):
#
# my_atlas.load_strains(strains_to_fasta)
#
# assert len(my_atlas.strains) == len(strains_to_fasta)
# for s in my_atlas.strains:
# assert s.id in strains_to_fasta
# # Genome paths should be linked
# assert s.genome_path == strains_to_fasta[s.id]
# # No genes stored yet
# assert len(s.genes) == 0
#
# @pytest.mark.run(order=3)
# def test_get_orthology_matrix(self, my_atlas, premade_orthology):
# # Have to copy the premade DF to the atlas data_dir, just check if it was copied
# shutil.copy(premade_orthology, my_atlas.data_dir)
# assert op.exists(op.join(my_atlas.data_dir, op.basename(premade_orthology)))
#
# my_atlas.get_orthology_matrix(outfile=op.basename(premade_orthology), force_rerun=False)
# assert isinstance(my_atlas.df_orthology_matrix, pd.DataFrame)
#
# @pytest.mark.run(order=4)
# def test_filter_genes_and_strains(self, my_atlas, test_strains):
#
# for g in my_atlas.reference_gempro.genes:
# assert g.functional
#
# functional_genes = ['b0755', 'b0875', 'b1101', 'b1380', 'b1621', 'b1676', 'b1723', 'b1773', 'b1779', 'b1817',
# 'b1818', 'b1819', 'b1854', 'b2097', 'b2133', 'b2415', 'b2416', 'b2975', 'b2987', 'b3493',
# 'b3603', 'b2092', 'b0002']
#
# my_atlas.filter_genes_and_strains(custom_keep_strains=test_strains[:4])
#
# assert len(my_atlas.strains) == 4
#
# for g in my_atlas.reference_gempro.genes:
# if g.id in functional_genes:
# assert g.functional
# else:
# assert not g.functional
#
# @pytest.mark.run(order=5)
# def test_build_strain_specific_models(self, my_atlas):
#
# my_atlas.build_strain_specific_models(force_rerun=True)
#
# for s in my_atlas.strains:
# assert op.exists(op.join(my_atlas.model_dir, '{}_gp.pckl'.format(s.id)))
# assert len(s.genes) == len(my_atlas.reference_gempro.genes)
# assert len(s.functional_genes) < len(s.genes)
#
# for g in my_atlas.reference_gempro.functional_genes:
# if s.id == '585395.4' and g.id == 'b1773':
# assert not s.genes.get_by_id(g.id).functional
# elif s.id == '1169323.3' and g.id == 'b2133':
# assert not s.genes.get_by_id(g.id).functional
# elif s.id == '1068619.3' and g.id == 'b1779':
# assert not s.genes.get_by_id(g.id).functional
# elif s.id == '1005537.3' and (g.id == 'b2975' or g.id == 'b2987'):
# assert not s.genes.get_by_id(g.id).functional
# else:
# assert s.genes.get_by_id(g.id).functional
#
# @pytest.mark.run(order=6)
# def test_load_sequences_to_strains(self, my_atlas):
#
# my_atlas.load_sequences_to_strains(force_rerun=True)
#
# for s in my_atlas.strains:
# assert op.exists(op.join(my_atlas.model_dir, '{}_gp_withseqs.pckl'.format(s.id)))
# for g in s.functional_genes:
# assert g.protein.representative_sequence
#
# @pytest.mark.run(order=7)
# def test_load_sequences_to_reference(self, my_atlas):
#
# my_atlas.load_sequences_to_reference(force_rerun=True)
#
# for g in my_atlas.reference_gempro.functional_genes:
# assert op.exists(op.join(my_atlas.sequences_by_gene_dir, '{}_protein_withseqs.pckl'.format(g.id)))
# for s in my_atlas.strains:
# check_id = '{}_{}'.format(s.genes.get_by_id(g.id).id, s.id)
# if s.genes.get_by_id(g.id).functional:
# assert g.protein.sequences.has_id(check_id)
# else:
# assert not g.protein.sequences.has_id(check_id)
#
# @pytest.mark.run(order=8)
# def test_align_orthologous_genes_pairwise(self, my_atlas):
#
# my_atlas.align_orthologous_genes_pairwise(engine='biopython', force_rerun=True)
#
# for g in my_atlas.reference_gempro.functional_genes:
# if g.protein.representative_sequence:
# assert op.exists(op.join(my_atlas.sequences_by_gene_dir, '{}_protein_withseqs_aln.pckl'.format(g.id)))
# for s in my_atlas.strains:
# check_id = '{}_{}_{}'.format(g.id, s.genes.get_by_id(g.id).id, s.id)
# if s.genes.get_by_id(g.id).functional:
# assert g.protein.sequence_alignments.has_id(check_id)
# else:
# assert not g.protein.sequence_alignments.has_id(check_id)
# else:
# assert len(g.protein.sequence_alignments) == 0 | SBRG/ssbio | ssbio/test/test_pipeline_atlas.py | Python | mit | 6,592 | [
"Biopython"
] | 45eb43ec2dc131d99f556f791c3c1224ab415c7410b7844430028897190f3f1a |
../../../../../../../../share/pyshared/orca/scripts/apps/yelp/yelp_v3/script.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/yelp/yelp_v3/script.py | Python | gpl-3.0 | 79 | [
"ORCA"
] | 9a4ff0724360e8da9705c4930fe6f238003d2dc951ce31406a69c6f9702ea497 |
# encoding: utf-8
"""A simple configuration system."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import argparse
import copy
import logging
import os
import re
import sys
import json
from ast import literal_eval
from ipython_genutils.path import filefind
from ipython_genutils import py3compat
from ipython_genutils.encoding import DEFAULT_ENCODING
from ipython_genutils.py3compat import unicode_type, iteritems
from traitlets.traitlets import HasTraits, List, Any
#-----------------------------------------------------------------------------
# Exceptions
#-----------------------------------------------------------------------------
class ConfigError(Exception):
pass
class ConfigLoaderError(ConfigError):
pass
class ConfigFileNotFound(ConfigError):
pass
class ArgumentError(ConfigLoaderError):
pass
#-----------------------------------------------------------------------------
# Argparse fix
#-----------------------------------------------------------------------------
# Unfortunately argparse by default prints help messages to stderr instead of
# stdout. This makes it annoying to capture long help screens at the command
# line, since one must know how to pipe stderr, which many users don't know how
# to do. So we override the print_help method with one that defaults to
# stdout and use our class instead.
class ArgumentParser(argparse.ArgumentParser):
"""Simple argparse subclass that prints help to stdout by default."""
def print_help(self, file=None):
if file is None:
file = sys.stdout
return super(ArgumentParser, self).print_help(file)
print_help.__doc__ = argparse.ArgumentParser.print_help.__doc__
#-----------------------------------------------------------------------------
# Config class for holding config information
#-----------------------------------------------------------------------------
class LazyConfigValue(HasTraits):
"""Proxy object for exposing methods on configurable containers
Exposes:
- append, extend, insert on lists
- update on dicts
- update, add on sets
"""
_value = None
# list methods
_extend = List()
_prepend = List()
def append(self, obj):
self._extend.append(obj)
def extend(self, other):
self._extend.extend(other)
def prepend(self, other):
"""like list.extend, but for the front"""
self._prepend[:0] = other
_inserts = List()
def insert(self, index, other):
if not isinstance(index, int):
raise TypeError("An integer is required")
self._inserts.append((index, other))
# dict methods
# update is used for both dict and set
_update = Any()
def update(self, other):
if self._update is None:
if isinstance(other, dict):
self._update = {}
else:
self._update = set()
self._update.update(other)
# set methods
def add(self, obj):
self.update({obj})
def get_value(self, initial):
"""construct the value from the initial one
after applying any insert / extend / update changes
"""
if self._value is not None:
return self._value
value = copy.deepcopy(initial)
if isinstance(value, list):
for idx, obj in self._inserts:
value.insert(idx, obj)
value[:0] = self._prepend
value.extend(self._extend)
elif isinstance(value, dict):
if self._update:
value.update(self._update)
elif isinstance(value, set):
if self._update:
value.update(self._update)
self._value = value
return value
def to_dict(self):
"""return JSONable dict form of my data
Currently update as dict or set, extend, prepend as lists, and inserts as list of tuples.
"""
d = {}
if self._update:
d['update'] = self._update
if self._extend:
d['extend'] = self._extend
if self._prepend:
d['prepend'] = self._prepend
elif self._inserts:
d['inserts'] = self._inserts
return d
def _is_section_key(key):
"""Is a Config key a section name (does it start with a capital)?"""
if key and key[0].upper()==key[0] and not key.startswith('_'):
return True
else:
return False
class Config(dict):
"""An attribute based dict that can do smart merges."""
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self._ensure_subconfig()
def _ensure_subconfig(self):
"""ensure that sub-dicts that should be Config objects are
casts dicts that are under section keys to Config objects,
which is necessary for constructing Config objects from dict literals.
"""
for key in self:
obj = self[key]
if _is_section_key(key) \
and isinstance(obj, dict) \
and not isinstance(obj, Config):
setattr(self, key, Config(obj))
def _merge(self, other):
"""deprecated alias, use Config.merge()"""
self.merge(other)
def merge(self, other):
"""merge another config object into this one"""
to_update = {}
for k, v in iteritems(other):
if k not in self:
to_update[k] = v
else: # I have this key
if isinstance(v, Config) and isinstance(self[k], Config):
# Recursively merge common sub Configs
self[k].merge(v)
else:
# Plain updates for non-Configs
to_update[k] = v
self.update(to_update)
def collisions(self, other):
"""Check for collisions between two config objects.
Returns a dict of the form {"Class": {"trait": "collision message"}}`,
indicating which values have been ignored.
An empty dict indicates no collisions.
"""
collisions = {}
for section in self:
if section not in other:
continue
mine = self[section]
theirs = other[section]
for key in mine:
if key in theirs and mine[key] != theirs[key]:
collisions.setdefault(section, {})
collisions[section][key] = "%r ignored, using %r" % (mine[key], theirs[key])
return collisions
def __contains__(self, key):
# allow nested contains of the form `"Section.key" in config`
if '.' in key:
first, remainder = key.split('.', 1)
if first not in self:
return False
return remainder in self[first]
return super(Config, self).__contains__(key)
# .has_key is deprecated for dictionaries.
has_key = __contains__
def _has_section(self, key):
return _is_section_key(key) and key in self
def copy(self):
return type(self)(dict.copy(self))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
new_config = type(self)()
for key, value in self.items():
if isinstance(value, (Config, LazyConfigValue)):
# deep copy config objects
value = copy.deepcopy(value, memo)
elif type(value) in {dict, list, set, tuple}:
# shallow copy plain container traits
value = copy.copy(value)
new_config[key] = value
return new_config
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
if _is_section_key(key):
c = Config()
dict.__setitem__(self, key, c)
return c
elif not key.startswith('_'):
# undefined, create lazy value, used for container methods
v = LazyConfigValue()
dict.__setitem__(self, key, v)
return v
else:
raise KeyError
def __setitem__(self, key, value):
if _is_section_key(key):
if not isinstance(value, Config):
raise ValueError('values whose keys begin with an uppercase '
'char must be Config instances: %r, %r' % (key, value))
dict.__setitem__(self, key, value)
def __getattr__(self, key):
if key.startswith('__'):
return dict.__getattr__(self, key)
try:
return self.__getitem__(key)
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, key, value):
if key.startswith('__'):
return dict.__setattr__(self, key, value)
try:
self.__setitem__(key, value)
except KeyError as e:
raise AttributeError(e)
def __delattr__(self, key):
if key.startswith('__'):
return dict.__delattr__(self, key)
try:
dict.__delitem__(self, key)
except KeyError as e:
raise AttributeError(e)
#-----------------------------------------------------------------------------
# Config loading classes
#-----------------------------------------------------------------------------
class ConfigLoader(object):
"""A object for loading configurations from just about anywhere.
The resulting configuration is packaged as a :class:`Config`.
Notes
-----
A :class:`ConfigLoader` does one thing: load a config from a source
(file, command line arguments) and returns the data as a :class:`Config` object.
There are lots of things that :class:`ConfigLoader` does not do. It does
not implement complex logic for finding config files. It does not handle
default values or merge multiple configs. These things need to be
handled elsewhere.
"""
def _log_default(self):
from traitlets.log import get_logger
return get_logger()
def __init__(self, log=None):
"""A base class for config loaders.
log : instance of :class:`logging.Logger` to use.
By default loger of :meth:`traitlets.config.application.Application.instance()`
will be used
Examples
--------
>>> cl = ConfigLoader()
>>> config = cl.load_config()
>>> config
{}
"""
self.clear()
if log is None:
self.log = self._log_default()
self.log.debug('Using default logger')
else:
self.log = log
def clear(self):
self.config = Config()
def load_config(self):
"""Load a config from somewhere, return a :class:`Config` instance.
Usually, this will cause self.config to be set and then returned.
However, in most cases, :meth:`ConfigLoader.clear` should be called
to erase any previous state.
"""
self.clear()
return self.config
class FileConfigLoader(ConfigLoader):
"""A base class for file based configurations.
As we add more file based config loaders, the common logic should go
here.
"""
def __init__(self, filename, path=None, **kw):
"""Build a config loader for a filename and path.
Parameters
----------
filename : str
The file name of the config file.
path : str, list, tuple
The path to search for the config file on, or a sequence of
paths to try in order.
"""
super(FileConfigLoader, self).__init__(**kw)
self.filename = filename
self.path = path
self.full_filename = ''
def _find_file(self):
"""Try to find the file by searching the paths."""
self.full_filename = filefind(self.filename, self.path)
class JSONFileConfigLoader(FileConfigLoader):
"""A JSON file loader for config
Can also act as a context manager that rewrite the configuration file to disk on exit.
Example::
with JSONFileConfigLoader('myapp.json','/home/jupyter/configurations/') as c:
c.MyNewConfigurable.new_value = 'Updated'
"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
dct = self._read_file_as_dict()
self.config = self._convert_to_config(dct)
return self.config
def _read_file_as_dict(self):
with open(self.full_filename) as f:
return json.load(f)
def _convert_to_config(self, dictionary):
if 'version' in dictionary:
version = dictionary.pop('version')
else:
version = 1
self.log.warning("Unrecognized JSON config file version, assuming version {}".format(version))
if version == 1:
return Config(dictionary)
else:
raise ValueError('Unknown version of JSON config file: {version}'.format(version=version))
def __enter__(self):
self.load_config()
return self.config
def __exit__(self, exc_type, exc_value, traceback):
"""
Exit the context manager but do not handle any errors.
In case of any error, we do not want to write the potentially broken
configuration to disk.
"""
self.config.version = 1
json_config = json.dumps(self.config, indent=2)
with open(self.full_filename, 'w') as f:
f.write(json_config)
class PyFileConfigLoader(FileConfigLoader):
"""A config loader for pure python files.
This is responsible for locating a Python config file by filename and
path, then executing it to construct a Config object.
"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
self._read_file_as_dict()
return self.config
def load_subconfig(self, fname, path=None):
"""Injected into config file namespace as load_subconfig"""
if path is None:
path = self.path
loader = self.__class__(fname, path)
try:
sub_config = loader.load_config()
except ConfigFileNotFound:
# Pass silently if the sub config is not there,
# treat it as an empty config file.
pass
else:
self.config.merge(sub_config)
def _read_file_as_dict(self):
"""Load the config file into self.config, with recursive loading."""
def get_config():
"""Unnecessary now, but a deprecation warning is more trouble than it's worth."""
return self.config
namespace = dict(
c=self.config,
load_subconfig=self.load_subconfig,
get_config=get_config,
__file__=self.full_filename,
)
fs_encoding = sys.getfilesystemencoding() or 'ascii'
conf_filename = self.full_filename.encode(fs_encoding)
py3compat.execfile(conf_filename, namespace)
class CommandLineConfigLoader(ConfigLoader):
"""A config loader for command line arguments.
As we add more command line based loaders, the common logic should go
here.
"""
def _exec_config_str(self, lhs, rhs):
"""execute self.config.<lhs> = <rhs>
* expands ~ with expanduser
* tries to assign with literal_eval, otherwise assigns with just the string,
allowing `--C.a=foobar` and `--C.a="foobar"` to be equivalent. *Not*
equivalent are `--C.a=4` and `--C.a='4'`.
"""
rhs = os.path.expanduser(rhs)
try:
# Try to see if regular Python syntax will work. This
# won't handle strings as the quote marks are removed
# by the system shell.
value = literal_eval(rhs)
except (NameError, SyntaxError, ValueError):
# This case happens if the rhs is a string.
value = rhs
exec(u'self.config.%s = value' % lhs)
def _load_flag(self, cfg):
"""update self.config from a flag, which can be a dict or Config"""
if isinstance(cfg, (dict, Config)):
# don't clobber whole config sections, update
# each section from config:
for sec,c in iteritems(cfg):
self.config[sec].update(c)
else:
raise TypeError("Invalid flag: %r" % cfg)
# raw --identifier=value pattern
# but *also* accept '-' as wordsep, for aliases
# accepts: --foo=a
# --Class.trait=value
# --alias-name=value
# rejects: -foo=value
# --foo
# --Class.trait
kv_pattern = re.compile(r'\-\-[A-Za-z][\w\-]*(\.[\w\-]+)*\=.*')
# just flags, no assignments, with two *or one* leading '-'
# accepts: --foo
# -foo-bar-again
# rejects: --anything=anything
# --two.word
flag_pattern = re.compile(r'\-\-?\w+[\-\w]*$')
class KeyValueConfigLoader(CommandLineConfigLoader):
"""A config loader that loads key value pairs from the command line.
This allows command line options to be gives in the following form::
ipython --profile="foo" --InteractiveShell.autocall=False
"""
def __init__(self, argv=None, aliases=None, flags=None, **kw):
"""Create a key value pair config loader.
Parameters
----------
argv : list
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then sys.argv[1:] will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Vaues can be Config objects,
dicts, or "key=value" strings. If Config or dict, when the flag
is triggered, The flag is loaded as `self.config.update(m)`.
Returns
-------
config : Config
The resulting Config object.
Examples
--------
>>> from traitlets.config.loader import KeyValueConfigLoader
>>> cl = KeyValueConfigLoader()
>>> d = cl.load_config(["--A.name='brian'","--B.number=0"])
>>> sorted(d.items())
[('A', {'name': 'brian'}), ('B', {'number': 0})]
"""
super(KeyValueConfigLoader, self).__init__(**kw)
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
def clear(self):
super(KeyValueConfigLoader, self).clear()
self.extra_args = []
def _decode_argv(self, argv, enc=None):
"""decode argv if bytes, using stdin.encoding, falling back on default enc"""
uargv = []
if enc is None:
enc = DEFAULT_ENCODING
for arg in argv:
if not isinstance(arg, unicode_type):
# only decode if not already decoded
arg = arg.decode(enc)
uargv.append(arg)
return uargv
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse the configuration and generate the Config object.
After loading, any arguments that are not key-value or
flags will be stored in self.extra_args - a list of
unparsed command-line arguments. This is used for
arguments such as input files or subcommands.
Parameters
----------
argv : list, optional
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then self.argv will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Values can be Config objects
or dicts. When the flag is triggered, The config is loaded as
`self.config.update(cfg)`.
"""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
# ensure argv is a list of unicode strings:
uargv = self._decode_argv(argv)
for idx,raw in enumerate(uargv):
# strip leading '-'
item = raw.lstrip('-')
if raw == '--':
# don't parse arguments after '--'
# this is useful for relaying arguments to scripts, e.g.
# ipython -i foo.py --matplotlib=qt -- args after '--' go-to-foo.py
self.extra_args.extend(uargv[idx+1:])
break
if kv_pattern.match(raw):
lhs,rhs = item.split('=',1)
# Substitute longnames for aliases.
if lhs in aliases:
lhs = aliases[lhs]
if '.' not in lhs:
# probably a mistyped alias, but not technically illegal
self.log.warning("Unrecognized alias: '%s', it will probably have no effect.", raw)
try:
self._exec_config_str(lhs, rhs)
except Exception:
raise ArgumentError("Invalid argument: '%s'" % raw)
elif flag_pattern.match(raw):
if item in flags:
cfg,help = flags[item]
self._load_flag(cfg)
else:
raise ArgumentError("Unrecognized flag: '%s'"%raw)
elif raw.startswith('-'):
kv = '--'+item
if kv_pattern.match(kv):
raise ArgumentError("Invalid argument: '%s', did you mean '%s'?"%(raw, kv))
else:
raise ArgumentError("Invalid argument: '%s'"%raw)
else:
# keep all args that aren't valid in a list,
# in case our parent knows what to do with them.
self.extra_args.append(item)
return self.config
class ArgParseConfigLoader(CommandLineConfigLoader):
"""A loader that uses the argparse module to load from the command line."""
def __init__(self, argv=None, aliases=None, flags=None, log=None, *parser_args, **parser_kw):
"""Create a config loader for use with argparse.
Parameters
----------
argv : optional, list
If given, used to read command-line arguments from, otherwise
sys.argv[1:] is used.
parser_args : tuple
A tuple of positional arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
parser_kw : dict
A tuple of keyword arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
Returns
-------
config : Config
The resulting Config object.
"""
super(CommandLineConfigLoader, self).__init__(log=log)
self.clear()
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
self.parser_args = parser_args
self.version = parser_kw.pop("version", None)
kwargs = dict(argument_default=argparse.SUPPRESS)
kwargs.update(parser_kw)
self.parser_kw = kwargs
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse command line arguments and return as a Config object.
Parameters
----------
args : optional, list
If given, a list with the structure of sys.argv[1:] to parse
arguments from. If not given, the instance's self.argv attribute
(given at construction time) is used."""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
self._create_parser(aliases, flags)
self._parse_args(argv)
self._convert_to_config()
return self.config
def get_extra_args(self):
if hasattr(self, 'extra_args'):
return self.extra_args
else:
return []
def _create_parser(self, aliases=None, flags=None):
self.parser = ArgumentParser(*self.parser_args, **self.parser_kw)
self._add_arguments(aliases, flags)
def _add_arguments(self, aliases=None, flags=None):
raise NotImplementedError("subclasses must implement _add_arguments")
def _parse_args(self, args):
"""self.parser->self.parsed_data"""
# decode sys.argv to support unicode command-line options
enc = DEFAULT_ENCODING
uargs = [py3compat.cast_unicode(a, enc) for a in args]
self.parsed_data, self.extra_args = self.parser.parse_known_args(uargs)
def _convert_to_config(self):
"""self.parsed_data->self.config"""
for k, v in iteritems(vars(self.parsed_data)):
exec("self.config.%s = v"%k, locals(), globals())
class KVArgParseConfigLoader(ArgParseConfigLoader):
"""A config loader that loads aliases and flags with argparse,
but will use KVLoader for the rest. This allows better parsing
of common args, such as `ipython -c 'print 5'`, but still gets
arbitrary config with `ipython --InteractiveShell.use_readline=False`"""
def _add_arguments(self, aliases=None, flags=None):
self.alias_flags = {}
# print aliases, flags
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
paa = self.parser.add_argument
for key,value in iteritems(aliases):
if key in flags:
# flags
nargs = '?'
else:
nargs = None
if len(key) is 1:
paa('-'+key, '--'+key, type=unicode_type, dest=value, nargs=nargs)
else:
paa('--'+key, type=unicode_type, dest=value, nargs=nargs)
for key, (value, help) in iteritems(flags):
if key in self.aliases:
#
self.alias_flags[self.aliases[key]] = value
continue
if len(key) is 1:
paa('-'+key, '--'+key, action='append_const', dest='_flags', const=value)
else:
paa('--'+key, action='append_const', dest='_flags', const=value)
def _convert_to_config(self):
"""self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
# remove subconfigs list from namespace before transforming the Namespace
if '_flags' in self.parsed_data:
subcs = self.parsed_data._flags
del self.parsed_data._flags
else:
subcs = []
for k, v in iteritems(vars(self.parsed_data)):
if v is None:
# it was a flag that shares the name of an alias
subcs.append(self.alias_flags[k])
else:
# eval the KV assignment
self._exec_config_str(k, v)
for subc in subcs:
self._load_flag(subc)
if self.extra_args:
sub_parser = KeyValueConfigLoader(log=self.log)
sub_parser.load_config(self.extra_args)
self.config.merge(sub_parser.config)
self.extra_args = sub_parser.extra_args
def load_pyconfig_files(config_files, path):
"""Load multiple Python config files, merging each of them in turn.
Parameters
==========
config_files : list of str
List of config files names to load and merge into the config.
path : unicode
The full path to the location of the config files.
"""
config = Config()
for cf in config_files:
loader = PyFileConfigLoader(cf, path=path)
try:
next_config = loader.load_config()
except ConfigFileNotFound:
pass
except:
raise
else:
config.merge(next_config)
return config
| boompieman/iim_project | project_python2/lib/python2.7/site-packages/traitlets/config/loader.py | Python | gpl-3.0 | 28,943 | [
"Brian"
] | e87bbc8e0760616e574ea336b4d21920f0b4f8efba4c866aa4341a7573919c7f |
import dmenu_extended
import sys
import datetime
import os
import subprocess
import json
import pexpect
import keyring
path_config = dmenu_extended.path_prefs + '/jrnl.txt'
class extension(dmenu_extended.dmenu):
# Set the name to appear in the menu
title = 'Jrnl'
# Determines whether to attach the submenu indicator
is_submenu = True
config = None
current_journal = None
jrnl_installed = False
jrnl_configured = False
passwords = {}
bodies_current_journal_flag = False
def __init__(self):
self.startup_checks()
def startup_checks(self):
for path in self.system_path():
if not os.path.exists(path):
continue
if 'jrnl' in os.listdir(path):
self.jrnl_installed = True
break
if os.path.isfile(os.path.expanduser('~') + '/.jrnl_config'):
self.jrnl_configured = True
else:
self.jrnl_configured = False
self.config = {}
if self.jrnl_configured:
self.config = self.load_json(os.path.expanduser('~') + '/.jrnl_config')
# Override any jrnl config settings with local settings
config = self.load_config()
self.current_journal = "default"
for pref in config:
self.config[pref] = config[pref]
def update_config(self):
tmp = self.load_json(os.path.expanduser('~') + '/.jrnl_config')
for item in tmp:
self.config[item] = tmp[item]
def load_config(self):
defaults = {
"default_settings": {
"gui_editor": False,
"indicator_has_body": '*',
"indicator_no_body": '-',
"display_entry_titles_only": True,
"keep_open_after_edit": False
},
"user_settings": {
}
}
if os.path.isfile(path_config):
tmp = self.load_json(path_config)
# check for new options (upade as necessary)
match = True
if 'default_settings' not in tmp:
match = False
tmp['default_settings'] = {}
if 'user_settings' not in tmp:
match = False
tmp['user_settings'] = {}
for key in defaults['default_settings']:
if key not in tmp['default_settings'] or tmp['default_settings'][key] != defaults['default_settings'][key]:
match = False
if match == False:
for key in defaults['default_settings']:
tmp['default_settings'][key] = defaults['default_settings'][key]
self.save_json(path_config, tmp)
for key in tmp['user_settings']:
defaults['default_settings'][key] = tmp['user_settings'][key]
return defaults['default_settings']
else:
self.save_json(path_config, defaults)
return defaults['default_settings']
def get_journal_password(self, journal=None):
print("get journal password")
if journal is None:
journal = self.current_journal
if journal in self.passwords:
return self.passwords[journal]
else:
pword = self.get_password(helper_text=journal)
self.passwords[journal] = pword
return pword
def setup_jrnl(self):
command = "jrnl"
path = self.menu("~/journal.txt", prompt="Path to your journal file:")
encrypt = self.menu("No\nYes", prompt="Encrypt your journal?:")
if encrypt == "Yes":
pword = self.get_password(helper_text="encryption password")
store_in_keychain = self.menu("No\nYes", prompt="Store password in system keychain?:\n")
proc = pexpect.spawn(command)
proc.expect(["Path to your journal file.*"])
proc.sendline(path)
proc.expect(["Enter password for journal.*"])
if encrypt == "Yes":
proc.sendline(pword)
proc.expect(["Do you want to store the password in your keychain?.*"])
if store_in_keychain.lower()[0] == 'y':
proc.sendline('Y')
else:
proc.sendline('n')
else:
proc.sendline("")
proc.expect([".*Compose Entry.*"])
proc.close()
self.startup_checks()
def run_journal_command(self, command, journal=None, output=True, timeout=1):
print("run journal command")
if journal is None:
journal = self.current_journal
jrnl_json = None
if self.journal_is_encrypted():
try:
command = " ".join(command)
proc = pexpect.spawn(command, timeout=timeout)
if self.journal_password_managed(journal=journal) == False:
proc.expect(["Password: "])
# if "password" in self.config["journals"][journal]:
# pword = self.config["journals"][journal]["password"]
# else:
# pword = self.menu(" ", prompt="Password ("+self.current_journal+"): ")
# pword = self.get_journal_password(journal)
# proc.sendline(pword)
proc.sendline(self.get_journal_password(journal))
if output:
print("Expecting output")
jrnl_json = proc.read()
proc.expect(pexpect.EOF)
proc.close()
# if self.journal_password_managed(journal) == False:
# self.config["journals"][journal]["password"] = pword
except pexpect.exceptions.TIMEOUT:
print("except")
if output:
out = self.menu("Incorrect password", prompt="")
sys.exit()
else:
jrnl_json = subprocess.check_output(command)
if output and jrnl_json is None:
out = self.menu("There was an error dealing with the journal", prompt="Warning: ")
return jrnl_json
def get_journal(self, journal=None):
print("get journal")
if journal is None:
journal is self.current_journal
command = ["jrnl"]
if journal is not None and journal in self.config["journals"]:
command.append(journal)
command = command + ["--export", "json"]
jrnl_json = self.run_journal_command(command, journal)
tmp = json.loads(jrnl_json)
out = []
for entry in tmp['entries']:
if entry['body'] != '' and entry['body'] != '\n':
self.bodies_current_journal_flag = True
line = entry['date'] + ' '
if entry['body'] != '\n' and entry['body'] != '':
line += self.config["indicator_has_body"] + ' '
else:
line += self.config["indicator_no_body"] + ' '
line += entry['title'].rstrip(' ')
if self.config["display_entry_titles_only"] == False and entry['body'] != '\n':
if line[-1:] != '.':
line += '.'
line.rstrip(' ')
line += ' '
line += entry['body']
line = line.replace('\n', ' ')
out.append(line)
out.sort(reverse=True)
return out
def encrypt_journal(self, journal=None):
print("encrypt journal")
if journal is None:
journal is self.current_journal
pword = None
command = ["jrnl", journal, "--encrypt"]
print('step')
print(" ".join(command))
proc = pexpect.spawn(" ".join(command), timeout=10)
print('step')
if self.journal_is_encrypted(journal=journal) and \
self.journal_password_managed(journal=journal) == False:
proc.expect(["Password: "])
# if "password" in self.config["journals"][journal]:
# pword = self.config["journals"][journal]["password"]
# else:
# pword = self.menu(" ", prompt="Password ("+self.current_journal+"): ")
proc.sendline(self.get_journal_password(journal))
proc.expect(["Enter new password: "])
print('step')
pword = self.get_password(helper_text="create new password")
kchain = self.menu(["No", "Yes"], prompt="Add password to your keychain?")
print('step')
print(pword)
print(kchain)
proc.sendline(pword)
print('step')
proc.expect(["Do you want to store the password in your keychain.*"])
print('step')
if kchain == "Yes":
proc.sendline("Y")
else:
proc.sendline("n")
# keyring.delete_password("jrnl", journal)
proc.expect(pexpect.EOF)
proc.close()
self.passwords[journal] = pword
# self.config["journals"][journal]["password"] = pword
# self.update_config()
def decrypt_journal(self, journal=None):
print("decrypt journal")
if journal is None:
journal is self.current_journal
self.run_journal_command(["jrnl", journal, "--decrypt"], output=False, timeout=5)
# self.update_config()
def journal_is_encrypted(self, journal=None):
print(self.config["journals"])
print("journal encrypted?"),
self.update_config()
if journal is None:
journal = self.current_journal
if "encrypt" in self.config["journals"][self.current_journal] and \
self.config["journals"][self.current_journal]["encrypt"] == True:
if keyring.get_password("jrnl", journal) is not None:
self.config["journals"][self.current_journal]["managed"] = True
else:
self.config["journals"][self.current_journal]["managed"] = False
print("yes")
return True
else:
print("no")
return False
def journal_password_managed(self, journal=None):
print("journal managed?"),
if journal is None:
journal = self.current_journal
if self.journal_is_encrypted(journal):
if self.config["journals"][self.current_journal]["managed"]:
print("yes")
return True
else:
print("no")
return False
else:
print("no")
return False
def settings(self):
print("settings")
options = [
self.prefs['indicator_edit'] + " Edit jrnl preferences",
self.prefs['indicator_edit'] + " Edit plugin preferences"
]
if self.journal_is_encrypted():
options.append(self.prefs['indicator_submenu'] + " Decrypt journal (remove password for '"+self.current_journal+"')")
options.append(self.prefs['indicator_submenu'] + " Change password for '"+self.current_journal+"'")
else:
options.append(self.prefs['indicator_submenu'] + " Encrypt journal (add password to '"+self.current_journal+"')")
options.append(self.prefs['indicator_submenu'] + " Return")
out = self.select(options)
if out == options[0]:
self.open_file(os.path.expanduser('~') + '/.jrnl_config')
elif out == options[1]:
self.open_file(path_config)
elif out == options[2]:
if self.journal_is_encrypted():
self.decrypt_journal(self.current_journal)
self.main()
else:
self.encrypt_journal(self.current_journal)
self.main()
elif out == self.prefs['indicator_submenu'] + " Change password for '"+self.current_journal+"'":
self.encrypt_journal(self.current_journal)
def edit(self, entry, date):
print("editing entry")
date_str = date.strftime(self.config['timeformat'])
if self.config["gui_editor"]:
# Gui based
command = ["jrnl", self.current_journal, "-from", date_str, "-to", date_str, "--edit"]
# self.execute("jrnl -from {date} -to {date} --edit".format(date=date_str))
self.run_journal_command(command, output=False, timeout=None)
if self.config["keep_open_after_edit"]:
return
else:
# Terminal based
self.open_terminal("jrnl -from {date} -to {date} --edit".format(date=date_str))
def iso_date_str(self, date_in):
return date_in.strftime("%Y-%m-%d")
def add_entry(self):
print("adding entry")
today = datetime.datetime.today()
daydiff = datetime.timedelta(days=1)
opts = [
self.prefs['indicator_submenu'] + " " + self.iso_date_str(today) + " - Today (" + today.strftime("%A") + ")",
self.prefs['indicator_submenu'] + " " + self.iso_date_str(today) + " - Yesterday (" + (today - daydiff).strftime("%A") + ")",
]
for i in range(2,256):
out = [self.prefs['indicator_submenu']]
# tmp = datetime.timedelta(days=i)
dtmp = today - i * daydiff
out.append(self.iso_date_str(dtmp))
out.append("-")
if i < 7:
out.append(dtmp.strftime("%A"))
elif i <= 13:
out.append("Last")
out.append(dtmp.strftime("%A"))
elif i <= 22:
out.append(dtmp.strftime("%A"))
out.append("before last")
else:
out.append(dtmp.strftime("%A %d %B"))
opts.append(" ".join(out))
date = self.select(opts, prompt="Entry date: ")[len(self.prefs['indicator_submenu']) + 1:len(self.prefs['indicator_submenu']) + 11]
content = self.menu(" ", prompt=date + ": ")
command = ["jrnl", self.current_journal, date + ":", content]
self.run_journal_command(command, self.current_journal)
# subprocess.call(["jrnl", date + ":", content])
def view_short_entries(self, state):
self.config["display_entry_titles_only"] = state
def switch_journal(self, journal_name):
print("Switching to journal: " + journal_name)
self.current_journal = journal_name
def main(self):
if self.jrnl_configured == False:
print("jrnl not configured")
self.setup_jrnl()
else:
print("jrnl configured")
while True:
print("main")
# Prompt the user to install jrnl if not already installed
if self.jrnl_installed == False:
options = [
"Could not find jrnl - do you have it installed?"
]
options.append(self.prefs["indicator_submenu"] + " Visit jrnl website")
out = self.select(options, prompt="Error!")
if out == options[1]:
self.open_url("http://jrnl.sh")
sys.exit()
options = [
self.prefs['indicator_submenu'] + " New entry",
]
journal_data = self.get_journal(self.current_journal)
if len(journal_data) > 0 and self.bodies_current_journal_flag == True:
if self.config["display_entry_titles_only"] == True:
options.append(self.prefs['indicator_submenu'] + " Show full entries")
else:
options.append(self.prefs['indicator_submenu'] + " Show only titles")
journal_switch_prefix = self.prefs['indicator_submenu'] + " Switch to '"
for journal_name in [a for a in self.config["journals"] if a != self.current_journal]:
options.append(journal_switch_prefix + journal_name + '\'')
options = options + [self.prefs['indicator_submenu'] + " Settings"]
options = options + journal_data
out = self.select(options)
if out == options[0]:
self.add_entry()
elif out == self.prefs['indicator_submenu'] + " Settings":
self.settings()
sys.exit()
elif out == self.prefs['indicator_submenu'] + " Show full entries" or \
out == self.prefs['indicator_submenu'] + " Show only titles":
if self.config["display_entry_titles_only"]:
self.view_short_entries(False)
else:
self.view_short_entries(True)
elif out[:len(journal_switch_prefix)] == journal_switch_prefix:
print("Switching journals")
self.switch_journal(out[len(journal_switch_prefix):-1])
else:
entry_date = None
try:
date_length = len(datetime.datetime.today().strftime(self.config['timeformat']))
entry_date = datetime.datetime.strptime(out[:date_length], self.config['timeformat'])
except:
pass
if entry_date is not None:
self.edit(out, entry_date)
def run(self, input_text):
self.main()
| markjones112358/dmenu-extended-plugins | plugin_jrnl.py | Python | mit | 15,055 | [
"VisIt"
] | e5b5024647e2fd36e0af2a20afe56865b9f2e7b288de3db4248681255c5ef999 |
# -*- coding: utf-8 -*-
def url(url_str=''):
"""
Decorator for specifying the URL the test should visit, relative to the test
class's `base_url`.
"""
def decorator(method):
method.__url = url_str
return method
return decorator
def waitfor(css_selector, text=None, classes=None):
"""
Decorator for specifying elements (selected by a CSS-style selector) to
explicitly wait for before taking a screenshot. If text is set, wait for the
element to contain that text before taking the screenshot. If classes is
present, wait until the element has all classes.
"""
def decorator(method):
if not isinstance(getattr(method, '__waitfors', None), list):
setattr(method, '__waitfors', [])
method.__waitfors.append({
'css_selector': css_selector,
'text': text,
'classes': classes
})
return method
return decorator
def waitforjs(js_string):
"""
Decorator which executes `js_string` on the page, and waits until it returns
true before taking a screenshot. `js_string` must return something; e.g.
'return window.height > 20'
"""
def decorator(method):
if not isinstance(getattr(method, '__wait_for_js_strings', None), list):
setattr(method, '__wait_for_js_strings', [])
method.__wait_for_js_strings.append(js_string)
return method
return decorator
def dontwaitfor(css_selector):
"""
Decorator for specifying elements that should not be waited for, if they're
specified to be waited for in the class's `wait_for` or `wait_fors`
attribute. Used to override the class setting.
"""
def decorator(method):
if not isinstance(getattr(method, '__dontwait', None), list):
setattr(method, '__dontwait', [])
method.__dontwait.append(css_selector)
return method
return decorator
def hide(css_selector):
"""
Hides (by setting `el.hidden = true` in javascript) elements matching the
selector. Useful for elements which may change from test to test and thus
should be hidden.
"""
def decorator(method):
if not isinstance(getattr(method, '__hide', None), list):
setattr(method, '__hide', [])
method.__hide.append(css_selector)
return method
return decorator
def with_metaclass(mcls):
"""
For metaclass compatibility between Python 2 and 3.
cf. http://stackoverflow.com/questions/22409430/portable-meta-class-between-python2-and-python3
"""
def decorator(cls):
body = vars(cls).copy()
# clean out class body
body.pop('__dict__', None)
body.pop('__weakref__', None)
return mcls(cls.__name__, cls.__bases__, body)
return decorator
| hammerlab/seltest | seltest/helpers.py | Python | apache-2.0 | 2,817 | [
"VisIt"
] | 4ea9f2348d82baeade49b47a29af1e2ba55b8155069bdadbcba590b26eaac9b6 |
# coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests ImportVisitor and related classes."""
from __future__ import unicode_literals
import copy
import os
import shutil
import tempfile
import textwrap
import unittest
from grumpy_tools.compiler import imputil
from grumpy_tools.compiler import util
from grumpy_tools.compiler.parser import patch_pythonparser
import pythonparser
patch_pythonparser()
class ImportVisitorTest(unittest.TestCase):
_PATH_SPEC = {
'foo.py': None,
'qux.py': None,
'bar/': {
'fred/': {
'__init__.py': None,
'quux.py': None,
},
'__init__.py': None,
'baz.py': None,
'foo.py': None,
},
'baz.py': None,
}
def setUp(self):
self.rootdir = tempfile.mkdtemp()
self.pydir = os.path.join(self.rootdir, 'src', '__python__')
self._materialize_tree(
self.rootdir, {'src/': {'__python__/': self._PATH_SPEC}})
foo_script = os.path.join(self.rootdir, 'foo.py')
self.importer = imputil.Importer(self.rootdir, 'foo', foo_script, False)
bar_script = os.path.join(self.pydir, 'bar', '__init__.py')
self.bar_importer = imputil.Importer(
self.rootdir, 'bar', bar_script, False)
fred_script = os.path.join(self.pydir, 'bar', 'fred', '__init__.py')
self.fred_importer = imputil.Importer(
self.rootdir, 'bar.fred', fred_script, False)
self.foo_import = imputil.Import(
'foo', os.path.join(self.pydir, 'foo.py'))
self.qux_import = imputil.Import(
'qux', os.path.join(self.pydir, 'qux.py'))
self.bar_import = imputil.Import(
'bar', os.path.join(self.pydir, 'bar/__init__.py'))
self.fred_import = imputil.Import(
'bar.fred', os.path.join(self.pydir, 'bar/fred/__init__.py'))
self.quux_import = imputil.Import(
'bar.fred.quux', os.path.join(self.pydir, 'bar/fred/quux.py'))
self.baz2_import = imputil.Import(
'bar.baz', os.path.join(self.pydir, 'bar/baz.py'))
self.foo2_import = imputil.Import(
'bar.foo', os.path.join(self.pydir, 'bar/foo.py'))
self.baz_import = imputil.Import(
'baz', os.path.join(self.pydir, 'baz.py'))
def tearDown(self):
shutil.rmtree(self.rootdir)
def testImportTopLevelModule(self):
imp = copy.deepcopy(self.qux_import)
imp.add_binding(imputil.Import.MODULE, 'qux', 0)
self._check_imports('import qux', [imp])
def testImportTopLevelPackage(self):
imp = copy.deepcopy(self.bar_import)
imp.add_binding(imputil.Import.MODULE, 'bar', 0)
self._check_imports('import bar', [imp])
def testImportPackageModuleAbsolute(self):
imp = copy.deepcopy(self.baz2_import)
imp.add_binding(imputil.Import.MODULE, 'bar', 0)
self._check_imports('import bar.baz', [imp])
def testImportFromSubModule(self):
imp = copy.deepcopy(self.baz2_import)
imp.add_binding(imputil.Import.MODULE, 'baz', 1)
self._check_imports('from bar import baz', [imp])
def testImportPackageModuleRelative(self):
imp = copy.deepcopy(self.baz2_import)
imp.add_binding(imputil.Import.MODULE, 'baz', 1)
got = self.bar_importer.visit(pythonparser.parse('import baz').body[0])
self._assert_imports_equal([imp], got)
def testImportPackageModuleRelativeFromSubModule(self):
imp = copy.deepcopy(self.baz2_import)
imp.add_binding(imputil.Import.MODULE, 'baz', 1)
foo_script = os.path.join(self.pydir, 'bar', 'foo.py')
importer = imputil.Importer(self.rootdir, 'bar.foo', foo_script, False)
got = importer.visit(pythonparser.parse('import baz').body[0])
self._assert_imports_equal([imp], got)
def testImportPackageModuleAbsoluteImport(self):
imp = copy.deepcopy(self.baz_import)
imp.add_binding(imputil.Import.MODULE, 'baz', 0)
bar_script = os.path.join(self.pydir, 'bar', '__init__.py')
importer = imputil.Importer(self.rootdir, 'bar', bar_script, True)
got = importer.visit(pythonparser.parse('import baz').body[0])
self._assert_imports_equal([imp], got)
def testImportMultiple(self):
imp1 = copy.deepcopy(self.foo_import)
imp1.add_binding(imputil.Import.MODULE, 'foo', 0)
imp2 = copy.deepcopy(self.baz2_import)
imp2.add_binding(imputil.Import.MODULE, 'bar', 0)
self._check_imports('import foo, bar.baz', [imp1, imp2])
def testImportAs(self):
imp = copy.deepcopy(self.foo_import)
imp.add_binding(imputil.Import.MODULE, 'bar', 0)
self._check_imports('import foo as bar', [imp])
def testImportFrom(self):
imp = copy.deepcopy(self.baz2_import)
imp.add_binding(imputil.Import.MODULE, 'baz', 1)
self._check_imports('from bar import baz', [imp])
def testImportFromMember(self):
imp = copy.deepcopy(self.foo_import)
imp.add_binding(imputil.Import.MEMBER, 'bar', 'bar')
self._check_imports('from foo import bar', [imp])
def testImportFromMultiple(self):
imp1 = copy.deepcopy(self.baz2_import)
imp1.add_binding(imputil.Import.MODULE, 'baz', 1)
imp2 = copy.deepcopy(self.foo2_import)
imp2.add_binding(imputil.Import.MODULE, 'foo', 1)
self._check_imports('from bar import baz, foo', [imp1, imp2])
def testImportFromMixedMembers(self):
imp1 = copy.deepcopy(self.bar_import)
imp1.add_binding(imputil.Import.MEMBER, 'qux', 'qux')
imp2 = copy.deepcopy(self.baz2_import)
imp2.add_binding(imputil.Import.MODULE, 'baz', 1)
self._check_imports('from bar import qux, baz', [imp1, imp2])
def testImportFromAs(self):
imp = copy.deepcopy(self.baz2_import)
imp.add_binding(imputil.Import.MODULE, 'qux', 1)
self._check_imports('from bar import baz as qux', [imp])
def testImportFromAsMembers(self):
imp = copy.deepcopy(self.foo_import)
imp.add_binding(imputil.Import.MEMBER, 'baz', 'bar')
self._check_imports('from foo import bar as baz', [imp])
# def testImportFromWildcardRaises(self):
# self._check_imports('from foo import *', [])
def testImportFromFuture(self):
self._check_imports('from __future__ import print_function', [])
def testImportFromNative(self):
imp = imputil.Import('__go__/fmt', is_native=True)
imp.add_binding(imputil.Import.MEMBER, 'Printf', 'Printf')
self._check_imports('from "__go__/fmt" import Printf', [imp])
def testImportFromNativeMultiple(self):
imp = imputil.Import('__go__/fmt', is_native=True)
imp.add_binding(imputil.Import.MEMBER, 'Printf', 'Printf')
imp.add_binding(imputil.Import.MEMBER, 'Println', 'Println')
self._check_imports('from "__go__/fmt" import Printf, Println', [imp])
def testImportFromNativeAs(self):
imp = imputil.Import('__go__/fmt', is_native=True)
imp.add_binding(imputil.Import.MEMBER, 'foo', 'Printf')
self._check_imports('from "__go__/fmt" import Printf as foo', [imp])
# def testRelativeImportNonPackage(self):
# self.assertRaises(util.ImportError, self.importer.visit,
# pythonparser.parse('from . import bar').body[0])
def testRelativeImportBeyondTopLevel(self):
self.assertRaises(util.ImportError, self.bar_importer.visit,
pythonparser.parse('from .. import qux').body[0])
def testRelativeModuleNoExist(self):
imp = copy.deepcopy(self.bar_import)
imp.add_binding(imputil.Import.MEMBER, 'qux', 'qux')
node = pythonparser.parse('from . import qux').body[0]
self._assert_imports_equal([imp], self.bar_importer.visit(node))
def testRelativeModule(self):
imp = copy.deepcopy(self.foo2_import)
imp.add_binding(imputil.Import.MODULE, 'foo', 1)
node = pythonparser.parse('from . import foo').body[0]
self._assert_imports_equal([imp], self.bar_importer.visit(node))
def testRelativeModuleFromSubModule(self):
imp = copy.deepcopy(self.foo2_import)
imp.add_binding(imputil.Import.MODULE, 'foo', 1)
baz_script = os.path.join(self.pydir, 'bar', 'baz.py')
importer = imputil.Importer(self.rootdir, 'bar.baz', baz_script, False)
node = pythonparser.parse('from . import foo').body[0]
self._assert_imports_equal([imp], importer.visit(node))
def testRelativeModuleMember(self):
imp = copy.deepcopy(self.foo2_import)
imp.add_binding(imputil.Import.MEMBER, 'qux', 'qux')
node = pythonparser.parse('from .foo import qux').body[0]
self._assert_imports_equal([imp], self.bar_importer.visit(node))
def testRelativeModuleMemberMixed(self):
imp1 = copy.deepcopy(self.fred_import)
imp1.add_binding(imputil.Import.MEMBER, 'qux', 'qux')
imp2 = copy.deepcopy(self.quux_import)
imp2.add_binding(imputil.Import.MODULE, 'quux', 2)
node = pythonparser.parse('from .fred import qux, quux').body[0]
self._assert_imports_equal([imp1, imp2], self.bar_importer.visit(node))
def testRelativeUpLevel(self):
imp = copy.deepcopy(self.foo2_import)
imp.add_binding(imputil.Import.MODULE, 'foo', 1)
node = pythonparser.parse('from .. import foo').body[0]
self._assert_imports_equal([imp], self.fred_importer.visit(node))
def testRelativeUpLevelMember(self):
imp = copy.deepcopy(self.foo2_import)
imp.add_binding(imputil.Import.MEMBER, 'qux', 'qux')
node = pythonparser.parse('from ..foo import qux').body[0]
self._assert_imports_equal([imp], self.fred_importer.visit(node))
def _check_imports(self, stmt, want):
got = self.importer.visit(pythonparser.parse(stmt).body[0])
self._assert_imports_equal(want, got)
def _assert_imports_equal(self, want, got):
self.assertEqual([imp.__dict__ for imp in want],
[imp.__dict__ for imp in got])
def _materialize_tree(self, dirname, spec):
for name, sub_spec in spec.iteritems():
if name.endswith('/'):
subdir = os.path.join(dirname, name[:-1])
os.mkdir(subdir)
self._materialize_tree(subdir, sub_spec)
else:
with open(os.path.join(dirname, name), 'w'):
pass
class MakeFutureFeaturesTest(unittest.TestCase):
def testImportFromFuture(self):
testcases = [
('from __future__ import print_function',
imputil.FutureFeatures(print_function=True)),
('from __future__ import generators', imputil.FutureFeatures()),
('from __future__ import generators, print_function',
imputil.FutureFeatures(print_function=True)),
]
for tc in testcases:
source, want = tc
mod = pythonparser.parse(textwrap.dedent(source))
node = mod.body[0]
got = imputil._make_future_features(node) # pylint: disable=protected-access
self.assertEqual(want.__dict__, got.__dict__)
def testImportFromFutureParseError(self):
testcases = [
# NOTE: move this group to testImportFromFuture as they are implemented
# by grumpy
('from __future__ import division',
r'future feature \w+ not yet implemented'),
('from __future__ import braces', 'not a chance'),
('from __future__ import nonexistant_feature',
r'future feature \w+ is not defined'),
]
for tc in testcases:
source, want_regexp = tc
mod = pythonparser.parse(source)
node = mod.body[0]
self.assertRaisesRegexp(util.ParseError, want_regexp,
imputil._make_future_features, node) # pylint: disable=protected-access
class ParseFutureFeaturesTest(unittest.TestCase):
def testFutureFeatures(self):
testcases = [
('from __future__ import print_function',
imputil.FutureFeatures(print_function=True)),
("""\
"module docstring"
from __future__ import print_function
""", imputil.FutureFeatures(print_function=True)),
("""\
"module docstring"
from __future__ import print_function, with_statement
from __future__ import nested_scopes
""", imputil.FutureFeatures(print_function=True)),
('from __future__ import absolute_import',
imputil.FutureFeatures(absolute_import=True)),
('from __future__ import absolute_import, print_function',
imputil.FutureFeatures(absolute_import=True, print_function=True)),
('foo = 123\nfrom __future__ import print_function',
imputil.FutureFeatures()),
('import os\nfrom __future__ import print_function',
imputil.FutureFeatures()),
]
for tc in testcases:
source, want = tc
mod = pythonparser.parse(textwrap.dedent(source))
_, got = imputil.parse_future_features(mod)
self.assertEqual(want.__dict__, got.__dict__)
def testUnimplementedFutureRaises(self):
mod = pythonparser.parse('from __future__ import division')
msg = 'future feature division not yet implemented by grumpy'
self.assertRaisesRegexp(util.ParseError, msg,
imputil.parse_future_features, mod)
def testUndefinedFutureRaises(self):
mod = pythonparser.parse('from __future__ import foo')
self.assertRaisesRegexp(
util.ParseError, 'future feature foo is not defined',
imputil.parse_future_features, mod)
if __name__ == '__main__':
unittest.main()
| pombredanne/grumpy | grumpy-tools-src/grumpy_tools/compiler/imputil_test.py | Python | apache-2.0 | 13,627 | [
"VisIt"
] | 438cd0e3a9a5e15de220a661852e58e81fde0da5030e318ac6d62a20c4fc2a73 |
import argparse
import graph as gg
from itertools import chain
from parse_pv import parse_file
import subprocess
import os
def main(file_name, trimmed='trimmed.out'):
"""
This script greps the unit cell parameters, pressure and volume
parsing a cp2k output
"""
# trimmed files
cmd = 'grep -A16 "STEP NUMBER" {} > {}'.format(file_name, trimmed)
subprocess.run(cmd, shell=True)
rs = parse_file(trimmed)
# remove temporary file
os.remove(trimmed)
xlabel = 'Step'
# thingsToPlot = ['Pressure', 'Pressure[avg.]','Volume', 'Volume[Avg.]','a', 'b', 'c', 'alpha', 'beta', 'gamma']
ylabel = 'Pressure'
title = 'Pressure [Avg.]'
output = 'pressur.png'
gg.simplePlot(rs[:, 0], rs[:, 2], xlabel, ylabel, title, output)
ylabel = 'Volume (Bohr^3)'
title = 'Volume [Avg.]'
output = 'volume.png'
gg.simplePlot(rs[:, 0], rs[:, 4], xlabel, ylabel, title, output)
formats = ['g-', 'b-', 'r-']
args1 = createArgs(rs[:, 0], rs[:, 5:8].transpose(), formats)
ylabel = 'Cell lenghts (Bohr)'
title = 'Cell lengths [Avg.]'
output = 'lengths.png'
gg.plotLengths(xlabel, ylabel, title, output, args1)
args2 = createArgs(rs[:, 0], rs[:, 8:11].transpose(), formats)
ylabel = 'Cell angles (deg)'
title = 'Cell angles [Avg.]'
output = 'angles.png'
gg.plotLengths(xlabel, ylabel, title, output, args2)
def createArgs(x, ys, formats):
"""
generates a list for matplotlib
[ x, y1, 'format', x, y2, 'format' ]
"""
args = [(x, y, f) for y, f in zip(ys, formats)]
return list(chain(*args))
if __name__ == "__main__":
msg = " get_structural_param -p <path/to/output>"
parser = argparse.ArgumentParser(description=msg)
parser.add_argument('-p', required=True, help='path to the cp2k output')
args = parser.parse_args()
main(args.p)
| ccaratelli/insertion_deletion | get_structural_param.py | Python | gpl-3.0 | 1,872 | [
"CP2K"
] | 22a42ff3cb04a385e84846c2f8d802239955db9c6fbdf7b68e77869dcc6302a5 |
# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
dataset_destination_path = '/Users/seb/Desktop/vtk_volume_v2'
# -----------------------------------------------------------------------------
from vtk import *
from vtk.web.dataset_builder import *
# -----------------------------------------------------------------------------
# VTK Helper methods
# -----------------------------------------------------------------------------
def updatePieceWise(pwf, dataRange, center, halfSpread):
scalarOpacity.RemoveAllPoints()
if (center - halfSpread) <= dataRange[0]:
scalarOpacity.AddPoint(dataRange[0], 0.0)
scalarOpacity.AddPoint(center, 1.0)
else:
scalarOpacity.AddPoint(dataRange[0], 0.0)
scalarOpacity.AddPoint(center - halfSpread, 0.0)
scalarOpacity.AddPoint(center, 1.0)
if (center + halfSpread) >= dataRange[1]:
scalarOpacity.AddPoint(dataRange[1], 0.0)
else:
scalarOpacity.AddPoint(center + halfSpread, 0.0)
scalarOpacity.AddPoint(dataRange[1], 0.0)
# -----------------------------------------------------------------------------
imageWriter = vtkPNGWriter()
def writeDepthMap(imageData, path):
width = imageData.GetDimensions()[0]
height = imageData.GetDimensions()[1]
nbTuples = width * height
inputArray = imageData.GetPointData().GetArray(0)
array = bytearray(nbTuples)
for idx in range(inputArray.GetNumberOfTuples()):
array[idx] = 255 - int(inputArray.GetValue(idx))
with open(path, 'wb') as f:
f.write(array)
def writeColorMap(imageData, path):
imageWriter.SetInputData(imageData)
imageWriter.SetFileName(path)
imageWriter.Write()
# -----------------------------------------------------------------------------
# VTK Pipeline creation
# -----------------------------------------------------------------------------
source = vtkRTAnalyticSource()
mapper = vtkGPUVolumeRayCastMapper()
mapper.SetInputConnection(source.GetOutputPort())
mapper.RenderToImageOn()
colorFunction = vtkColorTransferFunction()
colorFunction.AddRGBPoint(37.35310363769531, 0.231373, 0.298039, 0.752941)
colorFunction.AddRGBPoint(157.0909652709961, 0.865003, 0.865003, 0.865003)
colorFunction.AddRGBPoint(276.8288269042969, 0.705882, 0.0156863, 0.14902)
dataRange = [37.3, 276.8]
nbSteps = 10
halfSpread = (dataRange[1] - dataRange[0]) / float(2*nbSteps)
centers = [ dataRange[0] + halfSpread*float(2*i+1) for i in range(nbSteps)]
scalarOpacity = vtkPiecewiseFunction()
volumeProperty = vtkVolumeProperty()
# volumeProperty.ShadeOn()
volumeProperty.SetInterpolationType(VTK_LINEAR_INTERPOLATION)
volumeProperty.SetColor(colorFunction)
volumeProperty.SetScalarOpacity(scalarOpacity)
volume = vtkVolume()
volume.SetMapper(mapper)
volume.SetProperty(volumeProperty)
window = vtkRenderWindow()
window.SetSize(500, 500)
renderer = vtkRenderer()
renderer.SetBackground(0.5, 0.5, 0.6)
window.AddRenderer(renderer)
renderer.AddVolume(volume)
renderer.ResetCamera()
window.Render()
colorMap = vtkImageData()
depthMap = vtkImageData()
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
# Create Image Builder
dsb = ImageDataSetBuilder(dataset_destination_path, 'image/png', {'type': 'spherical', 'phi': range(0, 360, 30), 'theta': range(-60, 61, 30)})
# Add PieceWise navigation
dsb.getDataHandler().registerArgument(priority=1, name='pwf', label='Transfer function', values=centers, ui='slider')
# Add Depth data
dsb.getDataHandler().registerData(name='depth', type='array', fileName='_depth.uint8', metadata={ 'dimensions': window.GetSize() })
# Loop over data and generate images
dsb.start(window, renderer)
for center in dsb.getDataHandler().pwf:
updatePieceWise(scalarOpacity, dataRange, center, halfSpread)
for camera in dsb.getCamera():
dsb.updateCamera(camera)
mapper.GetColorImage(colorMap)
writeColorMap(colorMap, dsb.getDataHandler().getDataAbsoluteFilePath('image'))
mapper.GetDepthImage(depthMap)
writeDepthMap(depthMap, dsb.getDataHandler().getDataAbsoluteFilePath('depth'))
dsb.stop()
| Kitware/arctic-viewer | scripts/examples/vtk/samples/syntax-evolution-volume_v2.py | Python | bsd-3-clause | 4,349 | [
"VTK"
] | 936e4fd5ea3857947c1d8d9c798835ea280aab6eb8f7d4eb426486f55f9423ea |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
#!/usr/bin/env python
from __future__ import division, unicode_literals
"""
#TODO: Write module doc.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '8/1/15'
import warnings
warnings.warn("pymatgen.io.vaspio_set has been moved pymatgen.io.vasp.sets. "
"This stub will be removed in pymatgen 4.0.", DeprecationWarning)
from .vasp.sets import *
| Bismarrck/pymatgen | pymatgen/io/vaspio_set.py | Python | mit | 611 | [
"VASP",
"pymatgen"
] | cf9d74b5b8d2be064b70c0d4a180151ba3fe24f1cbb473871f4377c1d6a956f1 |
from threading import Thread
import json, requests, os, urllib, time
# Open config.json to get token (used for downloading replays of banned players) - Only for Community Managers
with open("config.json", "r") as f:
config = json.load(f)
# Gets JSON from a given beatnap
def getJSON(url):
try:
data = requests.get(url=url).json()
return data
except requests.exceptions.Timeout:
data = requests.get(url=url).json()
except requests.exceptions.TooManyRedirects:
print("Invalid link given")
except requests.exceptions.RequestException as e:
print (e)
# Downloads replays ONLY from a given user
def UserReplays(username, mode):
print('Starting to download all replays from %s' % username)
# Get data from Ripple API
url = 'https://ripple.moe/api/v1/users/scores/best?name={}&mode={}&token={}'.format(username, mode, config["token"])
data = getJSON(url)
# Create path with the player's username if it doesn't already exist.
newpath = os.getcwd() + "/" + username
if not os.path.exists(newpath):
os.makedirs(newpath)
try:
for score in data['scores']:
# Get required data from API
songName = score['beatmap']['song_name']
scoreId = score['id']
# Replace any nasty characters in the song's name - Error prevention
nastyCharacters = ["\/", "\\", "<", ">", "?", ":", "*", "|", "\"", "/"]
for char in nastyCharacters:
songName = songName.replace(char, " ")
# Specify the full file path
directory = os.path.join(os.getcwd() + "/" + username)
fullfilename = directory + "/" + username + " - " + songName + '.osr'
try:
# Create opener w/ headers
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
urllib.request.install_opener(opener)
url = 'https://ripple.moe/web/replays/' + str(scoreId)
local = str(fullfilename)
# Download Replay
urllib.request.urlretrieve(url, local)
print("Downloading " + songName + ".osr...")
except Exception as e:
print("ERROR: Could not download file: " + songName + ".osr", e)
print("Downloading replays is complete!")
return
except Exception as e:
print("Can't download replays because the user doesn't have any scores for this mode.", e)
# Gets the top 50 replays of a given beatmap
def LeaderBoardReplays(beatmapid, mode):
# Get data from Ripple API
scores = getJSON('https://ripple.moe/api/v1/scores?b=%s' % beatmapid)
# Create path if it doesn't exist
newpath = os.getcwd() + "/" + beatmapid
if not os.path.exists(newpath):
os.makedirs(newpath)
try:
for score in scores['scores']:
# Get required data
scoreSetter = score['user']['username']
scoreId = score['id']
# Specify file path
directory = os.path.join(os.getcwd() + "/" + beatmapid)
fullfilename = directory + "/" + scoreSetter + '.osr'
try:
# Opener w/ headers
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
urllib.request.install_opener(opener)
# Download Replay
urllib.request.urlretrieve('https://ripple.moe/web/replays/' + str(scoreId), str(fullfilename))
print("Downloading " + scoreSetter + ".osr...")
except Exception as e:
print("ERROR: Could not download file: " + scoreSetter + ".osr", e)
print("Download Complete.")
return
except Exception as e:
print(e)
# Downloads a given user's top 50 replays + the .osu files as well
def UserReplaysWithDifficulty(username, mode):
# Get Data from API
print('Starting to download all replays by %s with the .osu files' % username)
data = getJSON('https://ripple.moe/api/v1/users/scores/best?name={}&mode={}&token={}'.format(username, mode, config["token"]))
# Create path if doesn't exist
newpath = os.getcwd() + "/" + username + "/beatmaps/"
if not os.path.exists(newpath):
os.makedirs(newpath)
# specify all nasty characters
nastyCharacters = ["\/", "\\", "<", ">", "?", ":", "*", "|", "\"", "/"]
for score in data['scores']:
# Get the song's name and remove the nasty characters from it
songName = score['beatmap']['song_name']
for char in nastyCharacters:
songName = songName.replace(char, " ")
# Get beatmap_md5 from the given beatmap
beatmap_md5 = score['beatmap_md5']
url2 = getJSON('https://ripple.moe/api/get_beatmaps?h=' + beatmap_md5)
# Specify file path
directory = os.path.join(os.getcwd() + "/" + username + "/beatmaps/")
fullfilename = directory + "/" + songName + '.osu'
# Opener w/ headers
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
urllib.request.install_opener(opener)
# Get .osu file
urllib.request.urlretrieve('http://osu.ppy.sh/osu/' + url2[0]["beatmap_id"], str(fullfilename))
print("Downloading " + songName + ".osu...")
print("Downloading beatmaps complete!")
return
# Main Execution
print("""
Options:
1. Download the top 50 replays from a beatmap
2. Download the top 50 replays from a given user
3. Download the top 50 replays from a given user + .osu files
Game Mode:
1. osu!
2. Taiko
3. CTB
4. Mania
""")
# Get option from user
option = int(input('Select an option to begin: '))
if option == 1:
beatmapid = input('Enter a beatmapId (/b/) to begin: ')
mode = int(input('Game Mode: '))
Thread(target=LeaderBoardReplays, args=(beatmapid, mode-1, )).start()
elif option == 2:
username = input('Enter a username to begin: ')
mode = int(input('Game Mode: '))
Thread(target=UserReplays, args=(username, mode-1,)).start()
elif option == 3:
username = input('Enter a username to begin: ')
mode = int(input('Gane Mode: '))
Thread(target=UserReplays, args=(username, mode-1, )).start()
Thread(target=UserReplaysWithDifficulty, args=(username, mode-1, )).start()
else:
print('Invalid option. Try again.') | Swan/Riplay | riplay.py | Python | mit | 6,821 | [
"MOE"
] | 152940d5746903bd011aaeed9570ff1c09bdc60a9e0e9dd2cb6d4872ec59d1e4 |
import numpy as np
from numpy import atleast_2d as vec
from scipy.stats import multivariate_normal
def gaussian_mix(query):
# Assign multivariate gaussians to be present in the space.
gaussians = [multivariate_normal(mean = [0.9, 0.1], cov = [[.05, 0], [0, .05]])]
gaussians.append(multivariate_normal(mean = [0.9, 0.9], cov = [[0.07, 0.01], [0.01, .07]]))
gaussians.append(multivariate_normal(mean = [0.15, 0.7], cov = [[.03, 0], [0, .03]]))
# Initialize initial value.
value = 0.0
# Iterate through each gaussian in the space.
for j in xrange(len(gaussians)):
value += gaussians[j].pdf(query)
# Take the average.
gaussian_function = value / len(gaussians)
return vec(gaussian_function) # vec(np.array([query.ravel(), gaussian_function]).ravel())
if __name__ == "__main__":
X = gaussian_mix(np.array([0.5, 0.5]))
print X
print X.shape
| RuiShu/Neural-Net-Bayesian-Optimization | learning_objective/gaussian_mix.py | Python | mit | 907 | [
"Gaussian"
] | d271d019c87341757988d261d04a18daeace82e82535daf9eaadb4dc4220f028 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import json
import os
import unittest
from monty.json import MontyDecoder, MontyEncoder
from pymatgen.apps.battery.insertion_battery import (
InsertionElectrode,
InsertionVoltagePair,
)
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.util.testing import PymatgenTest
class InsertionElectrodeTest(unittest.TestCase):
def setUp(self):
self.entry_Li = ComputedEntry("Li", -1.90753119)
self.entry_Ca = ComputedEntry("Ca", -1.99689568)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "LiTiO2_batt.json")) as f:
self.entries_LTO = json.load(f, cls=MontyDecoder)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "MgVO_batt.json")) as file:
self.entries_MVO = json.load(file, cls=MontyDecoder)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "Mg_batt.json")) as file:
self.entry_Mg = json.load(file, cls=MontyDecoder)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "CaMoO2_batt.json")) as f:
self.entries_CMO = json.load(f, cls=MontyDecoder)
self.ie_LTO = InsertionElectrode.from_entries(self.entries_LTO, self.entry_Li)
self.ie_MVO = InsertionElectrode.from_entries(self.entries_MVO, self.entry_Mg)
self.ie_CMO = InsertionElectrode.from_entries(self.entries_CMO, self.entry_Ca)
def test_voltage(self):
# test basic voltage
self.assertAlmostEqual(self.ie_LTO.max_voltage, 2.78583901, 3)
self.assertAlmostEqual(self.ie_LTO.min_voltage, 0.89702381, 3)
self.assertAlmostEqual(self.ie_LTO.get_average_voltage(), 1.84143141, 3)
# test voltage range selectors
self.assertAlmostEqual(self.ie_LTO.get_average_voltage(0, 1), 0.89702381, 3)
self.assertAlmostEqual(self.ie_LTO.get_average_voltage(2, 3), 2.78583901, 3)
# test non-existing voltage range
self.assertAlmostEqual(self.ie_LTO.get_average_voltage(0, 0.1), 0, 3)
self.assertAlmostEqual(self.ie_LTO.get_average_voltage(4, 5), 0, 3)
self.assertAlmostEqual(self.ie_MVO.get_average_voltage(), 2.513767, 3)
def test_capacities(self):
# test basic capacity
self.assertAlmostEqual(self.ie_LTO.get_capacity_grav(), 308.74865045, 3)
self.assertAlmostEqual(self.ie_LTO.get_capacity_vol(), 1205.99391136, 3)
# test capacity selector
self.assertAlmostEqual(self.ie_LTO.get_capacity_grav(1, 3), 154.374325225, 3)
# test alternate normalization option
self.assertAlmostEqual(self.ie_LTO.get_capacity_grav(1, 3, False), 160.803169506, 3)
self.assertIsNotNone(self.ie_LTO.get_summary_dict(True))
self.assertAlmostEqual(self.ie_MVO.get_capacity_grav(), 281.845548242, 3)
self.assertAlmostEqual(self.ie_MVO.get_capacity_vol(), 1145.80087994, 3)
def test_get_instability(self):
self.assertIsNone(self.ie_LTO.get_max_instability())
self.assertAlmostEqual(self.ie_MVO.get_max_instability(), 0.7233711650000014)
self.assertAlmostEqual(self.ie_MVO.get_min_instability(), 0.4913575099999994)
def test_get_muO2(self):
self.assertIsNone(self.ie_LTO.get_max_muO2())
self.assertAlmostEqual(self.ie_MVO.get_max_muO2(), -4.93552791875)
self.assertAlmostEqual(self.ie_MVO.get_min_muO2(), -11.06599657)
def test_entries(self):
# test that the proper number of sub-electrodes are returned
self.assertEqual(len(self.ie_LTO.get_sub_electrodes(False, True)), 3)
self.assertEqual(len(self.ie_LTO.get_sub_electrodes(True, True)), 2)
def test_get_all_entries(self):
self.ie_LTO.get_all_entries()
def test_to_from_dict(self):
d = self.ie_LTO.as_dict()
ie = InsertionElectrode.from_dict(d)
self.assertAlmostEqual(ie.max_voltage, 2.78583901, 3)
self.assertAlmostEqual(ie.min_voltage, 0.89702381, 3)
self.assertAlmostEqual(ie.get_average_voltage(), 1.84143141, 3)
# Just to make sure json string works.
json_str = json.dumps(self.ie_LTO, cls=MontyEncoder)
ie = json.loads(json_str, cls=MontyDecoder)
self.assertAlmostEqual(ie.max_voltage, 2.78583901, 3)
self.assertAlmostEqual(ie.min_voltage, 0.89702381, 3)
self.assertAlmostEqual(ie.get_average_voltage(), 1.84143141, 3)
def test_voltage_pair(self):
vpair = self.ie_LTO[0]
self.assertAlmostEqual(vpair.voltage, 2.78583901)
self.assertAlmostEqual(vpair.mAh, 13400.7411749, 2)
self.assertAlmostEqual(vpair.mass_charge, 79.8658)
self.assertAlmostEqual(vpair.mass_discharge, 83.3363)
self.assertAlmostEqual(vpair.vol_charge, 37.553684467)
self.assertAlmostEqual(vpair.vol_discharge, 37.917719932)
self.assertAlmostEqual(vpair.frac_charge, 0.0)
self.assertAlmostEqual(vpair.frac_discharge, 0.14285714285714285)
self.assertAlmostEqual(vpair.x_charge, 0.0)
self.assertAlmostEqual(vpair.x_discharge, 0.5)
# reconstruct the voltage pair
dd = vpair.as_dict()
vv = InsertionVoltagePair.from_dict(dd)
self.assertAlmostEqual(vv.entry_charge.energy, -105.53608265)
self.assertAlmostEqual(vv.voltage, 2.78583901)
def test_get_summary_dict(self):
d = self.ie_CMO.get_summary_dict()
self.assertAlmostEqual(d["stability_charge"], 0.2346574583333325)
self.assertAlmostEqual(d["stability_discharge"], 0.33379544031249786)
self.assertAlmostEqual(d["muO2_data"]["mp-714969"][0]["chempot"], -4.93552791875)
self.assertAlmostEqual(d["adj_pairs"][0]["muO2_data"]["mp-714969"][0]["chempot"], -4.93552791875)
self.assertAlmostEqual(d["framework_formula"], "MoO2")
self.assertAlmostEqual(d["adj_pairs"][1]["framework_formula"], "MoO2")
def test_init_no_structure(self):
def remove_structure(entries):
ents = []
for ient in entries:
dd = ient.as_dict()
ent = ComputedEntry.from_dict(dd)
ent.data["volume"] = ient.structure.volume
ents.append(ent)
return ents
ie_CMO_no_struct = InsertionElectrode.from_entries(remove_structure(self.entries_CMO), self.entry_Ca)
d = ie_CMO_no_struct.get_summary_dict()
self.assertAlmostEqual(d["stability_charge"], 0.2346574583333325)
self.assertAlmostEqual(d["stability_discharge"], 0.33379544031249786)
self.assertAlmostEqual(d["muO2_data"]["mp-714969"][0]["chempot"], -4.93552791875)
ie_LTO_no_struct = InsertionElectrode.from_entries(self.entries_LTO, self.entry_Li, strip_structures=True)
vols_no_struct = [ient.data["volume"] for ient in ie_LTO_no_struct.get_all_entries()]
vols_struct = [ient.structure.volume for ient in self.ie_LTO.get_all_entries()]
self.assertAlmostEqual(vols_no_struct, vols_struct)
if __name__ == "__main__":
unittest.main()
| materialsproject/pymatgen | pymatgen/apps/battery/tests/test_insertion_battery.py | Python | mit | 7,049 | [
"pymatgen"
] | a173e37bab2ecad8b4a56af1514a18eaa673ad9a1245404d4cbc489f70003f28 |
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=True)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| WangWenjun559/Weiss | summary/sumy/sklearn/metrics/pairwise.py | Python | apache-2.0 | 42,672 | [
"Gaussian"
] | a7c986089684e78e1785fbd500e312521ddcaf62f9cf0deb99dca32df7f88825 |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Localization based on ALPHA orbitals from UHF/UKS check files
#
from functools import reduce
import numpy
import scipy.linalg
from pyscf import tools,gto,scf,dft
import h5py
from pyscf.tools import molden
def sqrtm(s):
e, v = numpy.linalg.eigh(s)
return numpy.dot(v*numpy.sqrt(e), v.T.conj())
def lowdin(s):
e, v = numpy.linalg.eigh(s)
return numpy.dot(v/numpy.sqrt(e), v.T.conj())
# Sort by <i|n|i>
def psort(ova,fav,pT,coeff):
# Compute expectation value
pTnew = 2.0*reduce(numpy.dot,(coeff.T,ova,pT,ova,coeff))
nocc = numpy.diag(pTnew)
index = numpy.argsort(-nocc)
ncoeff = coeff[:,index]
nocc = nocc[index]
enorb = numpy.diag(reduce(numpy.dot,(coeff.T,ova,fav,ova,coeff)))
enorb = enorb[index]
return ncoeff,nocc,enorb
def lowdinPop(mol,coeff,ova,enorb,occ):
print '\nLowdin population for LMOs:'
nb,nc = coeff.shape
s12 = sqrtm(ova)
lcoeff = s12.dot(coeff)
diff = reduce(numpy.dot,(lcoeff.T,lcoeff)) - numpy.identity(nc)
print 'diff=',numpy.linalg.norm(diff)
pthresh = 0.05
labels = mol.ao_labels(None)
nelec = 0.0
for iorb in range(nc):
vec = lcoeff[:,iorb]**2
idx = list(numpy.argwhere(vec>pthresh))
print ' iorb=',iorb,' occ=',occ[iorb],' <i|F|i>=',enorb[iorb]
for iao in idx:
print ' iao=',labels[iao],' pop=',vec[iao]
nelec += occ[iorb]
print 'nelec=',nelec
return 0
def scdm(coeff,ova,aux):
no = coeff.shape[1]
ova = reduce(numpy.dot,(coeff.T,ova,aux))
# ova = no*nb
q,r,piv = scipy.linalg.qr(ova,pivoting=True)
bc = ova[:,piv[:no]]
ova2 = numpy.dot(bc.T,bc)
s12inv = lowdin(ova2)
cnew = reduce(numpy.dot,(coeff,bc,s12inv))
return cnew
def dumpLMO(mol,fname,lmo):
print 'Dump into '+fname+'.h5'
f = h5py.File(fname+'.h5','w')
f.create_dataset("lmo",data=lmo)
f.close()
print 'Dump into '+fname+'_lmo.molden'
with open(fname+'_lmo.molden','w') as thefile:
molden.header(mol,thefile)
molden.orbital_coeff(mol,thefile,lmo)
return 0
#=============================
# DUMP from chkfile to molden
#=============================
def dumpLocal(fname):
chkfile = fname+'.chk'
outfile = fname+'_cmo.molden'
tools.molden.from_chkfile(outfile, chkfile)
mol,mf = scf.chkfile.load_scf(chkfile)
mo_coeff = mf["mo_coeff"]
ova=mol.intor_symmetric("cint1e_ovlp_sph")
nb = mo_coeff.shape[1]
nalpha = (mol.nelectron+mol.spin)/2
nbeta = (mol.nelectron-mol.spin)/2
print 'nalpha,nbeta,mol.spin,nb:',\
nalpha,nbeta,mol.spin,nb
# UHF-alpha/beta
ma = mo_coeff[0]
mb = mo_coeff[1]
#=============================
# Localization
#=============================
ma_c = ma[:,:nalpha].copy()
ma_v = ma[:,nalpha:].copy()
#--------------------
# Occupied space: PM
#--------------------
import pmloc
ierr,uc = pmloc.loc(mol,ma_c)
mc = numpy.dot(ma_c,uc)
#--------------------
# Virtual space: PAO
#--------------------
from pyscf import lo
aux = lo.orth_ao(mol,method='meta_lowdin')
mv = scdm(ma_v,ova,aux)
# P[dm]
pa = numpy.dot(ma[:,:nalpha],ma[:,:nalpha].T)
pb = numpy.dot(mb[:,:nbeta],mb[:,:nbeta].T)
pT = 0.5*(pa+pb)
# E-SORT
enorb = mf["mo_energy"]
fa = reduce(numpy.dot,(ma,numpy.diag(enorb[0]),ma.T))
fb = reduce(numpy.dot,(mb,numpy.diag(enorb[1]),mb.T))
fav = 0.5*(fa+fb)
mc,occ_c,ec = psort(ova,fav,pT,mc)
mv,occ_v,ev = psort(ova,fav,pT,mv)
#---Check---
tij = reduce(numpy.dot,(mc.T,ova,ma_c))
sig = scipy.linalg.svd(tij,compute_uv=False)
print 'nc=',nalpha,numpy.sum(sig**2)
assert abs(nalpha-numpy.sum(sig**2))<1.e-8
tij = reduce(numpy.dot,(mv.T,ova,ma_v))
sig = scipy.linalg.svd(tij,compute_uv=False)
print 'nv=',nb-nalpha,numpy.sum(sig**2)
assert abs(nb-nalpha-numpy.sum(sig**2))<1.e-8
lmo = numpy.hstack([mc,mv])
enorb = numpy.hstack([ec,ev])
occ = numpy.hstack([occ_c,occ_v])
lowdinPop(mol,lmo,ova,enorb,occ)
dumpLMO(mol,fname,lmo)
print 'nalpha,nbeta,mol.spin,nb:',\
nalpha,nbeta,mol.spin,nb
return 0
if __name__ == '__main__':
fname = 'hs_bp86'
dumpLocal(fname)
| sunqm/pyscf | examples/local_orb/ulocal.py | Python | apache-2.0 | 4,775 | [
"PySCF"
] | c46221c36c2436c15a9a96b36da210f8c9ce53bac8aa5049c71c29826d4252c9 |
# Copyright 2008-2011 by Peter Cock. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.AlignIO support for "fasta-m10" output from Bill Pearson's FASTA tools.
You are expected to use this module via the Bio.AlignIO functions (or the
Bio.SeqIO functions if you want to work directly with the gapped sequences).
This module contains a parser for the pairwise alignments produced by Bill
Pearson's FASTA tools, for use from the Bio.AlignIO interface where it is
refered to as the "fasta-m10" file format (as we only support the machine
readable output format selected with the -m 10 command line option).
This module does NOT cover the generic "fasta" file format originally
developed as an input format to the FASTA tools. The Bio.AlignIO and
Bio.SeqIO both use the Bio.SeqIO.FastaIO module to deal with these files,
which can also be used to store a multiple sequence alignments.
"""
from __future__ import print_function
import sys
# Add path to Bio
sys.path.append('../..')
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from Bio.Alphabet import single_letter_alphabet, generic_dna, generic_protein
from Bio.Alphabet import Gapped
__docformat__ = "restructuredtext en"
def _extract_alignment_region(alignment_seq_with_flanking, annotation):
"""Helper function for the main parsing code (PRIVATE).
To get the actual pairwise alignment sequences, we must first
translate the un-gapped sequence based coordinates into positions
in the gapped sequence (which may have a flanking region shown
using leading - characters). To date, I have never seen any
trailing flanking region shown in the m10 file, but the
following code should also cope with that.
Note that this code seems to work fine even when the "sq_offset"
entries are prsent as a result of using the -X command line option.
"""
align_stripped = alignment_seq_with_flanking.strip("-")
display_start = int(annotation['al_display_start'])
if int(annotation['al_start']) <= int(annotation['al_stop']):
start = int(annotation['al_start']) \
- display_start
end = int(annotation['al_stop']) \
- display_start + 1
else:
# FASTA has flipped this sequence...
start = display_start \
- int(annotation['al_start'])
end = display_start \
- int(annotation['al_stop']) + 1
end += align_stripped.count("-")
assert 0 <= start and start < end and end <= len(align_stripped), \
"Problem with sequence start/stop,\n%s[%i:%i]\n%s" \
% (alignment_seq_with_flanking, start, end, annotation)
return align_stripped[start:end]
def FastaM10Iterator(handle, alphabet=single_letter_alphabet):
"""Alignment iterator for the FASTA tool's pairwise alignment output.
This is for reading the pairwise alignments output by Bill Pearson's
FASTA program when called with the -m 10 command line option for machine
readable output. For more details about the FASTA tools, see the website
http://fasta.bioch.virginia.edu/ and the paper:
W.R. Pearson & D.J. Lipman PNAS (1988) 85:2444-2448
This class is intended to be used via the Bio.AlignIO.parse() function
by specifying the format as "fasta-m10" as shown in the following code::
from Bio import AlignIO
handle = ...
for a in AlignIO.parse(handle, "fasta-m10"):
assert len(a) == 2, "Should be pairwise!"
print("Alignment length %i" % a.get_alignment_length())
for record in a:
print("%s %s %s" % (record.seq, record.name, record.id))
Note that this is not a full blown parser for all the information
in the FASTA output - for example, most of the header and all of the
footer is ignored. Also, the alignments are not batched according to
the input queries.
Also note that there can be up to about 30 letters of flanking region
included in the raw FASTA output as contextual information. This is NOT
part of the alignment itself, and is not included in the resulting
MultipleSeqAlignment objects returned.
"""
if alphabet is None:
alphabet = single_letter_alphabet
state_PREAMBLE = -1
state_NONE = 0
state_QUERY_HEADER = 1
state_ALIGN_HEADER = 2
state_ALIGN_QUERY = 3
state_ALIGN_MATCH = 4
state_ALIGN_CONS = 5
def build_hsp():
if not query_tags and not match_tags:
raise ValueError("No data for query %r, match %r"
% (query_id, match_id))
assert query_tags, query_tags
assert match_tags, match_tags
evalue = align_tags.get("fa_expect", None)
q = "?" # Just for printing len(q) in debug below
m = "?" # Just for printing len(m) in debug below
tool = global_tags.get("tool", "").upper()
try:
q = _extract_alignment_region(query_seq, query_tags)
if tool in ["TFASTX"] and len(match_seq) == len(q):
m = match_seq
# Quick hack until I can work out how -, * and / characters
# and the apparent mix of aa and bp coordinates works.
else:
m = _extract_alignment_region(match_seq, match_tags)
assert len(q) == len(m)
except AssertionError as err:
print("Darn... amino acids vs nucleotide coordinates?")
print(tool)
print(query_seq)
print(query_tags)
print("%s %i" % (q, len(q)))
print(match_seq)
print(match_tags)
print("%s %i" % (m, len(m)))
print(handle.name)
raise err
assert alphabet is not None
alignment = MultipleSeqAlignment([], alphabet)
# TODO - Introduce an annotated alignment class?
# For now, store the annotation a new private property:
alignment._annotations = {}
# Want to record both the query header tags, and the alignment tags.
for key, value in header_tags.items():
alignment._annotations[key] = value
for key, value in align_tags.items():
alignment._annotations[key] = value
# Query
# =====
record = SeqRecord(Seq(q, alphabet),
id=query_id,
name="query",
description=query_descr,
annotations={"original_length": int(query_tags["sq_len"])})
# TODO - handle start/end coordinates properly. Short term hack for now:
record._al_start = int(query_tags["al_start"])
record._al_stop = int(query_tags["al_stop"])
alignment.append(record)
# TODO - What if a specific alphabet has been requested?
# TODO - Use an IUPAC alphabet?
# TODO - Can FASTA output RNA?
if alphabet == single_letter_alphabet and "sq_type" in query_tags:
if query_tags["sq_type"] == "D":
record.seq.alphabet = generic_dna
elif query_tags["sq_type"] == "p":
record.seq.alphabet = generic_protein
if "-" in q:
if not hasattr(record.seq.alphabet, "gap_char"):
record.seq.alphabet = Gapped(record.seq.alphabet, "-")
# Match
# =====
record = SeqRecord(Seq(m, alphabet),
id=match_id,
name="match",
description=match_descr,
annotations={"original_length": int(match_tags["sq_len"])})
# TODO - handle start/end coordinates properly. Short term hack for now:
record._al_start = int(match_tags["al_start"])
record._al_stop = int(match_tags["al_stop"])
alignment.append(record)
# This is still a very crude way of dealing with the alphabet:
if alphabet == single_letter_alphabet and "sq_type" in match_tags:
if match_tags["sq_type"] == "D":
record.seq.alphabet = generic_dna
elif match_tags["sq_type"] == "p":
record.seq.alphabet = generic_protein
if "-" in m:
if not hasattr(record.seq.alphabet, "gap_char"):
record.seq.alphabet = Gapped(record.seq.alphabet, "-")
return alignment
state = state_PREAMBLE
query_id = None
match_id = None
query_descr = ""
match_descr = ""
global_tags = {}
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
for line in handle:
if ">>>" in line and not line.startswith(">>>"):
if query_id and match_id:
# This happens on old FASTA output which lacked an end of
# query >>><<< marker line.
yield build_hsp()
state = state_NONE
query_descr = line[line.find(">>>")+3:].strip()
query_id = query_descr.split(None, 1)[0]
match_id = None
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
elif line.startswith("!! No "):
# e.g.
# !! No library sequences with E() < 0.5
# or on more recent versions,
# No sequences with E() < 0.05
assert state == state_NONE
assert not header_tags
assert not align_tags
assert not match_tags
assert not query_tags
assert match_id is None
assert not query_seq
assert not match_seq
assert not cons_seq
query_id = None
elif line.strip() in [">>><<<", ">>>///"]:
# End of query, possible end of all queries
if query_id and match_id:
yield build_hsp()
state = state_NONE
query_id = None
match_id = None
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
elif line.startswith(">>>"):
# Should be start of a match!
assert query_id is not None
assert line[3:].split(", ", 1)[0] == query_id, line
assert match_id is None
assert not header_tags
assert not align_tags
assert not query_tags
assert not match_tags
assert not match_seq
assert not query_seq
assert not cons_seq
state = state_QUERY_HEADER
elif line.startswith(">>"):
# Should now be at start of a match alignment!
if query_id and match_id:
yield build_hsp()
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
match_descr = line[2:].strip()
match_id = match_descr.split(None, 1)[0]
state = state_ALIGN_HEADER
elif line.startswith(">--"):
# End of one HSP
assert query_id and match_id, line
yield build_hsp()
# Clean up read for next HSP
# but reuse header_tags
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
state = state_ALIGN_HEADER
elif line.startswith(">"):
if state == state_ALIGN_HEADER:
# Should be start of query alignment seq...
assert query_id is not None, line
assert match_id is not None, line
assert query_id.startswith(line[1:].split(None, 1)[0]), line
state = state_ALIGN_QUERY
elif state == state_ALIGN_QUERY:
# Should be start of match alignment seq
assert query_id is not None, line
assert match_id is not None, line
assert match_id.startswith(line[1:].split(None, 1)[0]), line
state = state_ALIGN_MATCH
elif state == state_NONE:
# Can get > as the last line of a histogram
pass
else:
assert False, "state %i got %r" % (state, line)
elif line.startswith("; al_cons"):
assert state == state_ALIGN_MATCH, line
state = state_ALIGN_CONS
# Next line(s) should be consensus seq...
elif line.startswith("; "):
if ": " in line:
key, value = [s.strip() for s in line[2:].split(": ", 1)]
else:
import warnings
# Seen in lalign36, specifically version 36.3.4 Apr, 2011
# Fixed in version 36.3.5b Oct, 2011(preload8)
warnings.warn("Missing colon in line: %r" % line)
try:
key, value = [s.strip() for s in line[2:].split(" ", 1)]
except ValueError:
raise ValueError("Bad line: %r" % line)
if state == state_QUERY_HEADER:
header_tags[key] = value
elif state == state_ALIGN_HEADER:
align_tags[key] = value
elif state == state_ALIGN_QUERY:
query_tags[key] = value
elif state == state_ALIGN_MATCH:
match_tags[key] = value
else:
assert False, "Unexpected state %r, %r" % (state, line)
elif state == state_ALIGN_QUERY:
query_seq += line.strip()
elif state == state_ALIGN_MATCH:
match_seq += line.strip()
elif state == state_ALIGN_CONS:
cons_seq += line.strip("\n")
elif state == state_PREAMBLE:
if line.startswith("#"):
global_tags["command"] = line[1:].strip()
elif line.startswith(" version "):
global_tags["version"] = line[9:].strip()
elif " compares a " in line:
global_tags["tool"] = line[:line.find(" compares a ")].strip()
elif " searches a " in line:
global_tags["tool"] = line[:line.find(" searches a ")].strip()
else:
pass
if __name__ == "__main__":
print("Running a quick self-test")
# http://emboss.sourceforge.net/docs/themes/alnformats/align.simple
simple_example = \
"""# /opt/fasta/fasta34 -Q -H -E 1 -m 10 NC_002127.faa NC_009649.faa
FASTA searches a protein or DNA sequence data bank
version 34.26 January 12, 2007
Please cite:
W.R. Pearson & D.J. Lipman PNAS (1988) 85:2444-2448
Query library NC_002127.faa vs NC_009649.faa library
searching NC_009649.faa library
1>>>gi|10955263|ref|NP_052604.1| plasmid mobilization [Escherichia coli O157:H7 s 107 aa - 107 aa
vs NC_009649.faa library
45119 residues in 180 sequences
Expectation_n fit: rho(ln(x))= 6.9146+/-0.0249; mu= -5.7948+/- 1.273
mean_var=53.6859+/-13.609, 0's: 0 Z-trim: 1 B-trim: 9 in 1/25
Lambda= 0.175043
FASTA (3.5 Sept 2006) function [optimized, BL50 matrix (15:-5)] ktup: 2
join: 36, opt: 24, open/ext: -10/-2, width: 16
Scan time: 0.000
The best scores are: opt bits E(180)
gi|152973457|ref|YP_001338508.1| ATPase with chape ( 931) 71 24.9 0.58
gi|152973588|ref|YP_001338639.1| F pilus assembly ( 459) 63 23.1 0.99
>>>gi|10955263|ref|NP_052604.1|, 107 aa vs NC_009649.faa library
; pg_name: /opt/fasta/fasta34
; pg_ver: 34.26
; pg_argv: /opt/fasta/fasta34 -Q -H -E 1 -m 10 NC_002127.faa NC_009649.faa
; pg_name: FASTA
; pg_ver: 3.5 Sept 2006
; pg_matrix: BL50 (15:-5)
; pg_open-ext: -10 -2
; pg_ktup: 2
; pg_optcut: 24
; pg_cgap: 36
; mp_extrap: 60000 180
; mp_stats: Expectation_n fit: rho(ln(x))= 6.9146+/-0.0249; mu= -5.7948+/- 1.273 mean_var=53.6859+/-13.609, 0's: 0 Z-trim: 1 B-trim: 9 in 1/25 Lambda= 0.175043
; mp_KS: -0.0000 (N=0) at 8159228
>>gi|152973457|ref|YP_001338508.1| ATPase with chaperone activity, ATP-binding subunit [Klebsiella pneumoniae subsp. pneumoniae MGH 78578]
; fa_frame: f
; fa_initn: 65
; fa_init1: 43
; fa_opt: 71
; fa_z-score: 90.3
; fa_bits: 24.9
; fa_expect: 0.58
; sw_score: 71
; sw_ident: 0.250
; sw_sim: 0.574
; sw_overlap: 108
>gi|10955263| ..
; sq_len: 107
; sq_offset: 1
; sq_type: p
; al_start: 5
; al_stop: 103
; al_display_start: 1
--------------------------MTKRSGSNT-RRRAISRPVRLTAE
ED---QEIRKRAAECGKTVSGFLRAAALGKKVNSLTDDRVLKEVM-----
RLGALQKKLFIDGKRVGDREYAEVLIAITEYHRALLSRLMAD
>gi|152973457|ref|YP_001338508.1| ..
; sq_len: 931
; sq_type: p
; al_start: 96
; al_stop: 195
; al_display_start: 66
SDFFRIGDDATPVAADTDDVVDASFGEPAAAGSGAPRRRGSGLASRISEQ
SEALLQEAAKHAAEFGRS------EVDTEHLLLALADSDVVKTILGQFKI
KVDDLKRQIESEAKR-GDKPF-EGEIGVSPRVKDALSRAFVASNELGHSY
VGPEHFLIGLAEEGEGLAANLLRRYGLTPQ
>>gi|152973588|ref|YP_001338639.1| F pilus assembly protein [Klebsiella pneumoniae subsp. pneumoniae MGH 78578]
; fa_frame: f
; fa_initn: 33
; fa_init1: 33
; fa_opt: 63
; fa_z-score: 86.1
; fa_bits: 23.1
; fa_expect: 0.99
; sw_score: 63
; sw_ident: 0.266
; sw_sim: 0.656
; sw_overlap: 64
>gi|10955263| ..
; sq_len: 107
; sq_offset: 1
; sq_type: p
; al_start: 32
; al_stop: 94
; al_display_start: 2
TKRSGSNTRRRAISRPVRLTAEEDQEIRKRAAECGKTVSGFLRAAALGKK
VNSLTDDRVLKEV-MRLGALQKKLFIDGKRVGDREYAEVLIAITEYHRAL
LSRLMAD
>gi|152973588|ref|YP_001338639.1| ..
; sq_len: 459
; sq_type: p
; al_start: 191
; al_stop: 248
; al_display_start: 161
VGGLFPRTQVAQQKVCQDIAGESNIFSDWAASRQGCTVGG--KMDSVQDK
ASDKDKERVMKNINIMWNALSKNRLFDG----NKELKEFIMTLTGTLIFG
ENSEITPLPARTTDQDLIRAMMEGGTAKIYHCNDSDKCLKVVADATVTIT
SNKALKSQISALLSSIQNKAVADEKLTDQE
2>>>gi|10955264|ref|NP_052605.1| hypothetical protein pOSAK1_02 [Escherichia coli O157:H7 s 126 aa - 126 aa
vs NC_009649.faa library
45119 residues in 180 sequences
Expectation_n fit: rho(ln(x))= 7.1374+/-0.0246; mu= -7.6540+/- 1.313
mean_var=51.1189+/-13.171, 0's: 0 Z-trim: 1 B-trim: 8 in 1/25
Lambda= 0.179384
FASTA (3.5 Sept 2006) function [optimized, BL50 matrix (15:-5)] ktup: 2
join: 36, opt: 24, open/ext: -10/-2, width: 16
Scan time: 0.000
The best scores are: opt bits E(180)
gi|152973462|ref|YP_001338513.1| hypothetical prot ( 101) 58 22.9 0.29
>>>gi|10955264|ref|NP_052605.1|, 126 aa vs NC_009649.faa library
; pg_name: /opt/fasta/fasta34
; pg_ver: 34.26
; pg_argv: /opt/fasta/fasta34 -Q -H -E 1 -m 10 NC_002127.faa NC_009649.faa
; pg_name: FASTA
; pg_ver: 3.5 Sept 2006
; pg_matrix: BL50 (15:-5)
; pg_open-ext: -10 -2
; pg_ktup: 2
; pg_optcut: 24
; pg_cgap: 36
; mp_extrap: 60000 180
; mp_stats: Expectation_n fit: rho(ln(x))= 7.1374+/-0.0246; mu= -7.6540+/- 1.313 mean_var=51.1189+/-13.171, 0's: 0 Z-trim: 1 B-trim: 8 in 1/25 Lambda= 0.179384
; mp_KS: -0.0000 (N=0) at 8159228
>>gi|152973462|ref|YP_001338513.1| hypothetical protein KPN_pKPN3p05904 [Klebsiella pneumoniae subsp. pneumoniae MGH 78578]
; fa_frame: f
; fa_initn: 50
; fa_init1: 50
; fa_opt: 58
; fa_z-score: 95.8
; fa_bits: 22.9
; fa_expect: 0.29
; sw_score: 58
; sw_ident: 0.289
; sw_sim: 0.632
; sw_overlap: 38
>gi|10955264| ..
; sq_len: 126
; sq_offset: 1
; sq_type: p
; al_start: 1
; al_stop: 38
; al_display_start: 1
------------------------------MKKDKKYQIEAIKNKDKTLF
IVYATDIYSPSEFFSKIESDLKKKKSKGDVFFDLIIPNGGKKDRYVYTSF
NGEKFSSYTLNKVTKTDEYN
>gi|152973462|ref|YP_001338513.1| ..
; sq_len: 101
; sq_type: p
; al_start: 44
; al_stop: 81
; al_display_start: 14
DALLGEIQRLRKQVHQLQLERDILTKANELIKKDLGVSFLKLKNREKTLI
VDALKKKYPVAELLSVLQLARSCYFYQNVCTISMRKYA
3>>>gi|10955265|ref|NP_052606.1| hypothetical protein pOSAK1_03 [Escherichia coli O157:H7 s 346 aa - 346 aa
vs NC_009649.faa library
45119 residues in 180 sequences
Expectation_n fit: rho(ln(x))= 6.0276+/-0.0276; mu= 3.0670+/- 1.461
mean_var=37.1634+/- 8.980, 0's: 0 Z-trim: 1 B-trim: 14 in 1/25
Lambda= 0.210386
FASTA (3.5 Sept 2006) function [optimized, BL50 matrix (15:-5)] ktup: 2
join: 37, opt: 25, open/ext: -10/-2, width: 16
Scan time: 0.020
The best scores are: opt bits E(180)
gi|152973545|ref|YP_001338596.1| putative plasmid ( 242) 70 27.5 0.082
>>>gi|10955265|ref|NP_052606.1|, 346 aa vs NC_009649.faa library
; pg_name: /opt/fasta/fasta34
; pg_ver: 34.26
; pg_argv: /opt/fasta/fasta34 -Q -H -E 1 -m 10 NC_002127.faa NC_009649.faa
; pg_name: FASTA
; pg_ver: 3.5 Sept 2006
; pg_matrix: BL50 (15:-5)
; pg_open-ext: -10 -2
; pg_ktup: 2
; pg_optcut: 25
; pg_cgap: 37
; mp_extrap: 60000 180
; mp_stats: Expectation_n fit: rho(ln(x))= 6.0276+/-0.0276; mu= 3.0670+/- 1.461 mean_var=37.1634+/- 8.980, 0's: 0 Z-trim: 1 B-trim: 14 in 1/25 Lambda= 0.210386
; mp_KS: -0.0000 (N=0) at 8159228
>>gi|152973545|ref|YP_001338596.1| putative plasmid SOS inhibition protein A [Klebsiella pneumoniae subsp. pneumoniae MGH 78578]
; fa_frame: f
; fa_initn: 52
; fa_init1: 52
; fa_opt: 70
; fa_z-score: 105.5
; fa_bits: 27.5
; fa_expect: 0.082
; sw_score: 70
; sw_ident: 0.279
; sw_sim: 0.651
; sw_overlap: 43
>gi|10955265| ..
; sq_len: 346
; sq_offset: 1
; sq_type: p
; al_start: 197
; al_stop: 238
; al_display_start: 167
DFMCSILNMKEIVEQKNKEFNVDIKKETIESELHSKLPKSIDKIHEDIKK
QLSC-SLIMKKIDVEMEDYSTYCFSALRAIEGFIYQILNDVCNPSSSKNL
GEYFTENKPKYIIREIHQET
>gi|152973545|ref|YP_001338596.1| ..
; sq_len: 242
; sq_type: p
; al_start: 52
; al_stop: 94
; al_display_start: 22
IMTVEEARQRGARLPSMPHVRTFLRLLTGCSRINSDVARRIPGIHRDPKD
RLSSLKQVEEALDMLISSHGEYCPLPLTMDVQAENFPEVLHTRTVRRLKR
QDFAFTRKMRREARQVEQSW
>>><<<
579 residues in 3 query sequences
45119 residues in 180 library sequences
Scomplib [34.26]
start: Tue May 20 16:38:45 2008 done: Tue May 20 16:38:45 2008
Total Scan time: 0.020 Total Display time: 0.010
Function used was FASTA [version 34.26 January 12, 2007]
"""
from Bio._py3k import StringIO
alignments = list(FastaM10Iterator(StringIO(simple_example)))
assert len(alignments) == 4, len(alignments)
assert len(alignments[0]) == 2
for a in alignments:
print("Alignment %i sequences of length %i"
% (len(a), a.get_alignment_length()))
for r in a:
print("%s %s %i" % (r.seq, r.id, r.annotations["original_length"]))
# print(a.annotations)
print("Done")
import os
path = "../../Tests/Fasta/"
files = sorted(f for f in os.listdir(path) if os.path.splitext(f)[-1] == ".m10")
for filename in files:
if os.path.splitext(filename)[-1] == ".m10":
print("")
print(filename)
print("=" * len(filename))
for i, a in enumerate(FastaM10Iterator(open(os.path.join(path, filename)))):
print("#%i, %s" % (i+1, a))
for r in a:
if "-" in r.seq:
assert r.seq.alphabet.gap_char == "-"
else:
assert not hasattr(r.seq.alphabet, "gap_char")
| Ambuj-UF/ConCat-1.0 | src/Utils/Bio/AlignIO/FastaIO.py | Python | gpl-2.0 | 23,277 | [
"Biopython"
] | ada36c7a9e6814b36da1b2ff1a2aa89aee90423de742bd242e3d202665e62ee6 |
# -*- coding=utf-8 -*-
"""
All entry points for API are defined here.
Attributes:
mod (Blueprint): Flask Blueprint object used to separate api from
website code.
"""
from flask import Blueprint, request, send_from_directory
from flask_restful import Api, Resource
import os
from datetime import datetime
from PIL import Image as PILImage
from app import db
from models.data import City, Country, Location, Price, Tour, Image, Comment, Rating, SpecificTour
from models.users import User
mod = Blueprint('api/data', __name__)
api = Api(mod)
class TourAPI(Resource):
"""Services that allow user to get, update or delete tour identified
with the given key.
"""
def put(self, oid):
"""Update already existing tour.
Request should be formated as JSON file. For example:
{
"name": "",
"description": "",
"guide_fee": 10.25,
"thumbnail_id": 2,
"locations": [2, 51, 16, 43]
}
Available fields are:
name (str), description (str), guide_fee (float),
thumbnail_id (int), locations (list of integers)
Fields can be omitted.
Returns:
JSON file. For example:
Update succeeded
{
"success": true
}
Update failed
{
"success": false,
"message": "Field name incorrect"
}
"""
req = request.get_json(force=True, silent=True)
if req:
tour = db.session.query(Tour).filter_by(oid=oid,).one_or_none()
if not tour:
return ({'success':False,
'message':'Specified tour not found.'}, 404)
for key in req.keys():
if key == 'locations':
for location in req['locations']:
loc = db.session.query(Location).filter_by(oid=location,).one()
loc.tours.append(tour)
elif hasattr(tour, key):
setattr(tour, key, req[key])
else:
return ({'success':False, 'message':'Field name incorrect'},
400)
db.session.commit()
return ({'success': True}, 200)
return ({'success':False, 'message':'Not JSON'}, 400)
def get(self, oid):
"""Fetch tour corresponding to the given identifier.
Returns:
JSON file containing id, name, description, guide_fee, thumbnail_id
and location identifiers of selected tour. For example:
{
"id": 17,
"name": "Walk Through History",
"description": "Visit some of the finest castles and
mansions in all of Europe.",
"guide_fee": 10,
"thumbnail": 2,
"locations": [...]
}
If a requested tour is not found, then JSON file has an empty
object.
"""
response = {}
tour = db.session.query(Tour).filter_by(oid=oid,).one_or_none()
if tour:
thumbnail = db.session.query(Image).filter_by(oid=tour.thumbnail_id).one()
comments = []
ratings = db.session.query(Rating).filter_by(tour=tour.oid,).all()
rating = 0
if not ratings:
rating = 0
else:
for r in ratings:
rating = rating + r.rating
rating = rating / len(ratings)
for comment in tour.comments:
comments.append(comment.oid)
response = {
'id':tour.oid,
'name':tour.name,
'description':tour.description,
'guide_fee':tour.guide_fee,
'thumbnail': {
'id': thumbnail.oid,
'src': 'http://localhost:5000/static/' + thumbnail.file_name,
'width': thumbnail.width,
'height': thumbnail.height,
'alt': 'thumbnail'
},
'locations': [
{'id': location.oid, 'name': location.name} \
for location in tour.locations
],
'images': [
{
'id': image.oid,
'src': 'http://localhost:5000/static/' + image.file_name,
'width': image.width,
'height': image.height,
'alt': 'image'
} for image in tour.images
],
'rating': rating,
'commentIds': comments
}
return (response, 200)
return (response, 404)
def delete(self, oid):
"""Delete tour corresponding to the given identifier.
Returns:
JSON file. For example:
Deletion succeeded
{
"success": true
}
Deletion failed
{
"success": false,
"message": "Specified tour not found"
}
"""
num = db.session.query(Tour).filter_by(oid=oid,).delete()
if num:
return ({'success': True}, 200)
return ({'success':False,
'message':'Specified tour not found'}, 404)
class TourListAPI(Resource):
"""Services that allow user to get all tours or to add new tour."""
def post(self):
"""Add new tour.
Request should be formated as JSON file. For example:
{
"name": "",
"description": "",
"guide_fee": 10.25,
"thumbnail": 2,
"locations": [2, 51, 16, 43],
"images": [ 34, 5, 63]
}
Returns:
JSON file. For example:
Addition succeeded
{
"success": true
}
Addition failed
{
"success": false,
"message": "Not JSON"
}
"""
req = request.get_json(force=True, silent=True)
if req:
tour = Tour(
name=req['name'],
guide_fee=req['guide_fee'],
description=req['description'],
thumbnail_id=req['thumbnail']
)
for location in req['locations']:
loc = db.session.query(Location).filter_by(oid=location,).one()
loc.tours.append(tour)
for image_id in req['images']:
image = db.session.query(Image).filter_by(oid=image_id).one()
tour.images.append(image)
db.session.add(tour)
db.session.commit()
return ({'success': True, 'id': tour.oid}, 200)
return ({'success':False, 'message':'Not JSON'}, 400)
def get(self):
"""Fetch all tours.
Returns:
JSON file containing id, name, description, guide_fee, thumbnail_id
and location identifiers of all selected tours. For example:
[
{
"id": 17,
"name": "Walk Through History",
"description": "Visit some of the finest castles and
mansions in all of Europe.",
"guide_fee": 10,
"thumbnail": 2,
"locations": [...]
},
{
"id": 17,
"name": "It's Time to Party",
"description": "Have a wonderful time with young and
wicked people in Sydney's most
spectacular night clubs.",
"guide_fee": 14,
"thumbnail": 403,
"locations": [...]
},
...
]
If database is not populated, then JSON file has an empty array.
"""
response = []
for tour in db.session.query(Tour).all():
thumbnail = db.session.query(Image).filter_by(oid=tour.thumbnail_id).one()
response.append(
{
'id':tour.oid,
'name':tour.name,
'description':tour.description,
'guide_fee':tour.guide_fee,
'thumbnail': {
'id': thumbnail.oid,
'src': 'http://localhost:5000/static/' + thumbnail.file_name,
'width': thumbnail.width,
'height': thumbnail.height,
'alt': 'thumbnail'
},
'locations': [
{'id': location.oid, 'name': location.name} \
for location in tour.locations
],
'photos': [
{
'id': image.oid,
'src': 'http://localhost:5000/static/' + image.file_name,
'width': image.width,
'height': image.height,
'alt': 'image'
} for image in tour.images
],
'rating': 3,
'commentIds': [1]
}
)
return (response, 200)
class SpecificTourAPI(Resource):
def put(self, oid):
pass
def get(self, oid):
response = {}
specific_tour = db.session.query(SpecificTour).filter_by(oid=oid,).one_or_none()
if specific_tour:
response = {
'startDate': specific_tour.start_date,
'endDate': specific_tour.end_date,
'tourId': specific_tour.tour_id,
}
return (response, 200)
return (response, 404)
def delete(self, oid):
pass
class SpecificTourListAPI(Resource):
def post(self):
req = request.get_json(force=True, silent=True)
if req:
print(req)
specific_tour = SpecificTour(
start_date=req['startDate'],
end_date=req['endDate'],
tour_id=req['tourId']
)
db.session.add(specific_tour)
db.session.commit()
return ({'success': True, 'id': specific_tour.oid}, 200)
return ({'success':False, 'message':'Not JSON'}, 400)
class SpecificToursByTourAPI(Resource):
def get(self, oid):
tour = db.session.query(Tour).filter_by(oid=oid).one_or_none()
if tour:
response = []
for specific_tour in tour.specific_tours:
response.append({
'startDate': str(specific_tour.start_date),
'endDate': str(specific_tour.end_date),
'tourId': specific_tour.tour_id,
})
db.session.add(specific_tour)
db.session.commit()
return (response, 200)
return ({'success':False,
'message':'Specified tour not found'}, 404)
class LocationAPI(Resource):
"""Services that allow user to get, update or delete location identified
with the given key.
"""
def put(self, oid):
"""Update already existing location.
Request should be formated as JSON file. For example:
{
"name": "",
"description": "",
"city_id": 2,
"country_id: 4,
"price": 10.25
}
Available fields are:
name (str), description (str), city_id (int), country_id (int)
Fields can be omitted.
Returns:
JSON file. For example:
Update succeeded
{
"success": true
}
Update failed
{
"success": false,
"message": "Field name incorrect"
}
"""
req = request.get_json(force=True, silent=True)
if req:
location = db.session.query(Location).filter_by(oid=oid,).one_or_none()
if not location:
return ({'success':False,
'message':'Specified location not found.'}, 404)
for key in req.keys():
if key == 'price':
location.price.amount = req[key]
elif hasattr(location, key):
setattr(location, key, req[key])
else:
return ({'success':False,
'message':'Field name incorrect'}, 400)
db.session.commit()
return ({'success': True}, 200)
return ({'success':False, 'message':'Not JSON'}, 400)
def get(self, oid):
"""Fetch location corresponding to the given identifier.
Returns:
JSON file containing id, name, description, price, city and
country identifiers of the selected location. For example:
{
"id": 17,
"name": "Jovan Jovanovic Zmaj",
"description": "Gimnazija",
"price": 0,
"city_id": 1,
"country_id": 207
}
If a requested location is not found, then JSON file has an empty
object.
"""
response = {}
location = db.session.query(Location).filter_by(oid=oid,).one_or_none()
if location:
response = {
'id':location.oid,
'name':location.name,
'description':location.description,
'price':location.price[0].amount,
'city_id':location.city_id,
'country_id':location.country_id
}
return (response, 200)
return (response, 404)
def delete(self, oid):
"""Delete location corresponding to the given identifier.
Returns:
JSON file. For example:
Deletion succeeded
{
"success": true
}
Deletion failed
{
"success": false,
"message": "Specified location not found"
}
"""
num = db.session.query(Location).filter_by(oid=oid,).delete()
if num:
return ({'success': True}, 200)
return ({'success':False,
'message':'Specified location not found'}, 404)
class LocationListAPI(Resource):
"""Services that allow user to get all locations or to add new tour."""
def post(self):
"""Add new location.
Request should be formated as JSON file. For example:
{
"name": "",
"description": "",
"city_id": 2,
"country_id: 4,
"price": 10.25
}
Returns:
JSON file. For example:
Addition succeeded
{
"success": true,
"id": 43
}
Addition failed
{
"success": false,
"message": "Not JSON"
}
"""
req = request.get_json(force=True, silent=True)
if req:
location = Location(
name=req['name'],
description=req['description'],
city_id=req['city_id'] if 'city_id' in req else None,
country_id=req['country_id'] if 'country_id' in req else None
)
db.session.add(location)
db.session.commit()
price = Price(location.oid, req['price'])
db.session.add(price)
db.session.commit()
return ({'success': True, 'id': location.oid}, 200)
return ({'success':False, 'message':'Not JSON'}, 400)
def get(self):
"""Fetch all locations.
Returns:
JSON file containing id, name, description, price, city and
country identifiers of all selected locations. For example:
[
{
"id": 17,
"name": "Jovan Jovanovic Zmaj",
"description": "Gimnazija",
"price": 0,
"city_id": 1,
"country_id": 207
},
{
"id": 17,
"name": "Jovan Jovanovic Zmaj",
"description": "Gimnazija",
"price": 0,
"city_id": 52,
"country_id": 78
},
...
]
If database is not populated, then JSON file has an empty array.
"""
response = []
for location in db.session.query(Location).all():
response.append(
{
'id':location.oid,
'name':location.name,
'description':location.description,
'price':location.price[0].amount,
'city_id':location.city_id,
'country_id':location.country_id
}
)
return (response, 200)
class LocationByCityAPI(Resource):
def post(self, oid):
response = []
city = db.session.query(City).filter_by(oid=oid).one_or_none()
if city:
for location in city.locations:
response.append(
{
'id': location.oid,
'name': location.name,
'description': location.description,
'price': location.price.amount,
}
)
return (response, 200)
return ({'success':False,
'message':'Specified country not found'}, 404)
class CityAPI(Resource):
"""Services that allow user to get, update or delete city identified
with the given key.
"""
def put(self, oid):
"""Update already existing city.
Request should be formated as JSON file. For example:
{
"name": "",
"country_id: 4
}
Available fields are:
name (str), country_id (int)
Fields can be omitted.
Returns:
JSON file. For example:
Update succeeded
{
"success": true
}
Update failed
{
"success": false,
"message": "Field name incorrect"
}
"""
req = request.get_json(force=True, silent=True)
if req:
city = db.session.query(City).filter_by(oid=oid,).one_or_none()
if not city:
return ({'success':False,
'message':'Specified city not found.'}, 404)
for key in req.keys():
if hasattr(city, key):
setattr(city, key, req[key])
else:
return ({'success':False,
'message':'Field name incorrect'}, 400)
db.session.commit()
return ({'success': True}, 200)
return ({'success':False, 'message':'Not JSON'}, 400)
def get(self, oid):
"""Fetches city corresponding to the given identifier.
Returns:
JSON file containing id, name, country identifier and addresses of
the selected city. For example:
{
"id": 1,
"name": "Novi Sad",
"country_id": 1
}
If a requested city is not found, then JSON file has empty object.
"""
city = db.session.query(City).filter_by(oid=oid,).one_or_none()
response = {}
if city:
response = {
'id':city.oid,
'name':city.name,
'country_id':city.country_id
}
return (response, 200)
return (response, 404)
def delete(self, oid):
"""Delete city corresponding to the given identifier.
Returns:
JSON file. For example:
Deletion succeeded
{
"success": true
}
Deletion failed
{
"success": false,
"message": "Specified city not found"
}
"""
num = db.session.query(City).filter_by(oid=oid,).delete()
if num:
return ({'success': True}, 200)
return ({'success':False,
'message':'Specified city not found'}, 404)
class CityListAPI(Resource):
"""Services that allow user to get all cities or to add new tour."""
def post(self):
"""Adds new city.
Request should be formated as JSON file. For example:
{
"name": "",
"country_id": 10
}
Returns:
JSON file. For example:
Addition succeeded
{
"success": true,
"id": 21,
}
Addition failed
{
"success": false,
"message": "Not JSON"
}
"""
req = request.get_json(force=True, silent=True)
if req:
thumbnail_id = req['thumbnail_id'] if 'thumbnail_id' in req else None
city = City(
name=req['name'],
country_id=int(req['country_id']),
thumbnail_id=thumbnail_id
)
db.session.add(city)
db.session.commit()
return ({'success': True, 'id': city.oid}, 200)
return ({'success':False, 'message':'Not JSON'}, 400)
def get(self):
"""Fetch all cities.
Returns:
JSON file containing id, name, country identifier and addresses of
all cities. For example:
[
{
"id": 1,
"name": "Novi Sad",
"country_id": 207
},
{
"id": 10,
"name": "New York",
"country_id": 249
},
...
]
If a requested city is not found, then JSON file has an empty array.
"""
response = []
for city in db.session.query(City).all():
response.append(
{
'id':city.id,
'name':city.name,
'country_id':city.country_id
}
)
return (response, 200)
class CountryAPI(Resource):
"""Services that allow user to get, update or delete country identified
with the given key.
"""
def put(self, oid):
"""Update already existing country.
Request should be formated as JSON file. For example:
{
"name": "",
}
Available fields are:
name (str)
Fields can be omitted.
Returns:
JSON file. For example:
Update succeeded
{
"success": true
}
Update failed
{
"success": false,
"message": "Field name incorrect"
}
"""
req = request.get_json(force=True, silent=True)
if req:
country = db.session.query(Country).filter_by(oid=oid,).one_or_none()
if not country:
return ({'success':False,
'message':'Specified country not found.'}, 404)
for key in req.keys():
if hasattr(country, key):
setattr(country, key, req[key])
else:
return ({'success':False,
'message':'Field name incorrect'}, 400)
db.session.commit()
return ({'success': True}, 200)
return ({'success':False, 'message':'Not JSON'}, 400)
def get(self, oid):
"""Fetches country corresponding to the given identifier.
Args:
oid: An int that is the unique identifier of a object to fetch.
Returns:
JSON file containing id, name and continent identifier fields of the
selected country. For example:
{
"id": 207,
"name": "Serbia",
"continent": 5
}
If a requested country is not found, then JSON file has an empty
object.
"""
country = db.session.query(Country).filter_by(oid=oid,).one_or_none()
response = {}
if country:
response = {
'id':country.oid,
'name':country.name,
'continent':country.continent
}
return (response, 200)
return (response, 404)
def delete(self, oid):
"""Delete country corresponding to the given identifier.
Returns:
JSON file. For example:
Deletion succeeded
{
"success": true
}
Deletion failed
{
"success": false,
"message": "Specified country not found"
}
"""
num = db.session.query(Country).filter_by(oid=oid,).delete()
if num:
return ({'success': True}, 200)
return ({'success':False,
'message':'Specified country not found'}, 404)
class CountryListAPI(Resource):
"""Services that allow user to get all countries or to add new tour."""
def post(self):
"""Adds new country.
This function is reserved for administrators.
All countries recognized by the UN, by the year 2017, are added before
webapp deployment and this function is intended to allow the addition
of new countries that could become new UN member states in the future.
Request should be formated as JSON file. For example:
{
"name": "",
"continent": 1,
}
Returns:
JSON file. For example:
Addition succeeded
{
"success": true
}
Addition failed
{
"success": false,
"message": "Field name incorrect"
}
"""
req = request.get_json(force=True, silent=True)
if req:
country = Country(name=req['name'], continent=req['continent'])
db.session.add(country)
db.session.commit()
return ({'success': True, 'id': country.oid}, 200)
return ({'success':False, 'message':'Not JSON'}, 400)
def get(self):
"""Fetches all countries.
Returns:
JSON file containing id, name and continent identifier fields of all
countries. For example:
[
{
"id": 207,
"name": "Serbia",
"continent": 5
},
{
"id": 107,
"name": "India",
"continent": 3
},
...
]
If database is not populated, then JSON file has an empty array.
"""
response = []
for country in db.session.query(Country).all():
response.append(
{
'id':country.oid,
'name':country.name,
'continent':country.continent
}
)
return (response, 200)
class CitiesByCountryAPI(Resource):
def get(self, oid):
"""Fetches all cities that belong to the requested country.
Returns:
JSON file containing id and name all cities belonging to the
requested country. For example:
[
{
"id": 1,
"name": "Novi Sad",
},
{
"id": 10,
"name": "New York",
},
...
]
If there isn't any city found, then JSON file has an empty array.
"""
response = []
country = db.session.query(Country).filter_by(oid=oid).one_or_none()
if country:
for city in country.cities:
response.append(
{
'id': city.oid,
'name': city.name,
}
)
return (response, 200)
return ({'success':False,
'message':'Specified country not found'}, 404)
class LocationsByCityAPI(Resource):
def get(self, oid):
"""Fetch all locations.
Returns:
JSON file containing id, name, description, price, city and
country identifiers of all selected locations. For example:
[
{
"id": 17,
"name": "Jovan Jovanovic Zmaj",
"description": "Gimnazija",
"price": 0
},
{
"id": 17,
"name": "Jovan Jovanovic Zmaj",
"description": "Gimnazija",
"price": 0
},
...
]
If database is not populated, then JSON file has an empty array.
"""
city = db.session.query(City).filter_by(oid=oid).one_or_none()
if city:
response = []
for location in city.locations:
response.append(
{
'id':location.oid,
'name':location.name,
'description':location.description,
'price':location.price[0].amount
}
)
return (response, 200)
return ({'success':False,
'message':'Specified city not found'}, 404)
class FilesAPI(Resource):
def post(self):
UPLOAD_FOLDER = '../static'
image_ids = []
for fi in request.files:
extension = request.files[fi].filename.split('.')[-1]
filename = datetime.now().strftime("%d_%m_%Y_%H_%M_%S_%f" + '.' + extension)
path = os.path.join(UPLOAD_FOLDER, filename)
request.files[fi].save(path)
with PILImage.open(path) as img:
width, height = img.size
image = Image(filename, width, height)
db.session.add(image)
db.session.commit()
image_ids.append({
'id': image.oid,
'src': 'http://localhost:5000/static/' + filename,
'width': width,
'height': height,
'alt': 'image',
})
return (image_ids)
class CommentOnTourAPI(Resource):
def get(self, tour_id):
response = {'com':[]}
tour = db.session.query(Tour).filter_by(oid=tour_id,).one_or_none()
comments = tour.comments
if comments:
for comm in comments:
user = db.session.query(User).filter_by(id=comm.user_id).one()
image = db.session.query(Image).filter_by(oid=user.image,).one_or_none()
response['com'].append({
'comment': comm.text,
'userId': user.id,
'userName': user.first_name,
'userPhoto': 'http://localhost:5000/static/' + image.file_name,
'likes': 0,
'dislikes': 0,
'current': 0
})
return (response, 200)
return (response, 404)
class CommentAPI(Resource):
"""Services that allow user to get, post or delete comment
with the given key.
"""
def get(self, oid):
"""Fetch comment corresponding to the given identifier.
Returns:
JSON file containing id, name, description, guide_fee and location
identifiers of selected tour. For example:
{
"id": 17,
"name": "Walk Through History",
"description": "Visit some of the finest castles and
mansions in all of Europe.",
"guide_fee": 10,
"locations": [...]
}
If a requested tour is not found, then JSON file has an empty
object.
"""
response = {}
comment = db.session.query(Comment).filter_by(oid=oid,).one_or_none()
if comment:
user = db.session.query(User).filter_by(id=comment.user_id).one()
image = db.session.query(Image).filter_by(oid=user.image,).one_or_none()
response = {
'comment': comment.text,
'userId': user.id,
'userName': user.first_name,
'userPhoto': 'http://localhost:5000/static/' + image.file_name,
'likes': 0,
'dislikes': 0,
'current': 0
}
return (response, 200)
return (response, 404)
def post(self):
"""Add new comment.
Request should be formated as JSON file. For example:
{
"text": "Comment text",
"tour_id": 63,
"user_id": 23,
}
Returns:
JSON file. For example:
Addition succeeded
{
"success": true
}
Addition failed
{
"success": false,
"message": "Not JSON"
}
"""
req = request.get_json(force=True, silent=True)
if req:
comment = Comment(
text=req['text'],
user_id=req['user_id']
)
tour = db.session.query(Tour).filter_by(oid=req['tour_id']).one()
tour.comments.append(comment)
db.session.add(comment)
db.session.add(tour)
db.session.commit()
return ({'success': True, 'id': tour.oid}, 200)
return ({'success':False, 'message':'Not JSON'}, 400)
class RatingAPI(Resource):
def post(self):
req = request.get_json(force=True, silent=True)
if req:
req_user_id = req['user_id'],
req_tour_id = req['tour_id'],
req_rating = req['rating'],
rating = db.session.query(Rating).filter_by(user=req_user_id, tour=req_tour_id).one_or_none()
if rating:
rating.rating = req_rating
else:
rating = Rating(
user_id = req['user_id'],
tour_id = req['tour_id'],
rating = req['rating'],
)
db.session.add(rating)
db.session.commit()
return ({'success': True}, 200)
return ({'success':False, 'message':'Not JSON'}, 400)
api.add_resource(TourListAPI, '/tours')
api.add_resource(SpecificTourListAPI, '/tours/specific')
api.add_resource(TourAPI, '/tours/<int:oid>')
api.add_resource(SpecificToursByTourAPI, '/tour/<int:oid>/specific')
api.add_resource(LocationListAPI, '/locations')
api.add_resource(LocationAPI, '/locations/<int:oid>')
api.add_resource(LocationsByCityAPI, '/city/<int:oid>/locations')
api.add_resource(CityListAPI, '/cities')
api.add_resource(CityAPI, '/cities/<int:oid>')
api.add_resource(CitiesByCountryAPI, '/country/<int:oid>/cities')
api.add_resource(CountryListAPI, '/countries')
api.add_resource(CountryAPI, '/countries/<int:oid>')
api.add_resource(FilesAPI, '/upload')
api.add_resource(CommentOnTourAPI, '/comment/<int:tour_id>')
api.add_resource(CommentAPI, '/comment')
api.add_resource(RatingAPI, '/rating')
| Pseudonick47/sherlock | backend/api/data.py | Python | gpl-3.0 | 37,646 | [
"VisIt"
] | 6322772d45662b437821e175086b836b355ded89247d1faaf990b50ce689cab0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 19 07:43:49 2019
@author: arijit
"""
import numpy as np
import os,zipfile
from PIL import Image
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
import scipy.constants as cn
import re
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text) ]
def injector(fileNoStart,fileNoStop,NoImages,
fileNameString="CaF18Jul1900",
remotePath="//PH-TEW105/Users/rfmot/Desktop/AbsImages/",
dirPath="C:/Users/cafmot/Box Sync/CaF MOT/MOTData/MOTMasterData/"):
imgs=os.listdir(remotePath)
imgs.sort(key=natural_keys)
if len(imgs)==(fileNoStop-fileNoStart+1)*NoImages:
print 'Inserting images to the zip files...'
l=0
for fileNo in range(fileNoStart,fileNoStop+1):
filepath=os.path.join(dirPath,fileNameString+'_'+str(fileNo).zfill(3)+'.zip')
with zipfile.ZipFile(filepath, 'a') as archive:
files=archive.namelist()
for _ in range(NoImages):
if imgs[l] not in files:
archive.write(os.path.join(remotePath,imgs[l]),imgs[l])
l+=1
for img in imgs:
os.remove(os.path.join(remotePath,img))
elif len(imgs)==0:
print 'No Image to insert'
elif len(imgs)<(fileNoStart-fileNoStop+1)*NoImages:
print 'There seems to be less number of images than required!'
elif len(imgs)>(fileNoStart-fileNoStop+1)*NoImages:
print 'There are more images than expected!'
def gaussianFit(x,y):
f= lambda x,a,c,s,o: a*np.exp(-(x-c)**2/(2*s**2))+o
loc_trial=np.argmax(y)
a_trial=y[loc_trial]
c_trial=x[loc_trial]
o_trial=np.min(y)
s_trial=np.sqrt(np.abs(((x[int(loc_trial+4)]-c_trial)**2-\
(x[int(loc_trial)]-c_trial)**2)/\
(2*np.log(np.abs(y[int(loc_trial+4)]/y[int(loc_trial)])))))
popt,_=curve_fit(f,x,y,p0=[a_trial,c_trial,s_trial,o_trial])
return f,popt[0],popt[1],popt[2],popt[3]
def gaussianFitX(x,y):
f= lambda x,a,c,s,o: a*np.exp(-(x-c)**2/(2*s**2))+o
loc_trial=np.argmax(y)
a_trial=y[loc_trial]
c_trial=x[loc_trial]
o_trial=np.min(y)
d = np.abs((y-o_trial)-(a_trial-o_trial)/2.0)<(a_trial-o_trial)/10.0
indexes = np.where(d > 0)
s_trial=(x[indexes[0][-1]]-x[indexes[0][0]])/2
popt,_=curve_fit(f,x,y,p0=[a_trial,c_trial,s_trial,o_trial])
return f,popt[0],popt[1],popt[2],popt[3]
def Absorption(fileNoStart,fileNoStop,param,detuningInVolt,crop,
centre,width,height,fileNameString,showPlot=True,showOd=False,
dirPath="C:/Users/cafmot/Box Sync/CaF MOT/MOTData/MOTMasterData/"):
analysis=defaultCaF()
analysis.dirPath=dirPath
analysis.fileNameString=fileNameString
N_mean_list=[]
N_std_list=[]
paramVals=[]
for fileNo in range(fileNoStart,fileNoStop+1):
images,paramsDict=analysis.readFromZip(fileNo,dstr='R')
paramVals.append(paramsDict[param])
clouds=images[0::3,:,:]
probes=images[1::3,:,:]
bgs=images[2::3,:,:]
od=np.log((probes-bgs)/(clouds-bgs))
od[np.isnan(od)] = 0.0
od[od == -np.inf] = 0.0
od[od == np.inf] = 0.0
if crop:
od=od[:,centre[0]-height/2:centre[0]+height/2,
centre[1]-width/2:centre[1]+width/2]
N=(1+4*(detuningInVolt*14.7)**2/36)*(2.4*4*6.4e-6)**2*np.sum(od,axis=(1,2))/(3*780e-9**2/(2*np.pi))
N_mean_list.append(np.mean(N))
N_std_list.append(np.std(N)/np.sqrt(len(N)))
if showOd:
fig,ax=plt.subplots()
im=ax.imshow(np.mean(od,axis=0))
fig.colorbar(im)
if showPlot:
fig,ax=plt.subplots()
ax.errorbar(np.array(paramVals),np.array(N_mean_list),
yerr=np.array(N_std_list),
fmt='ok')
ax.set_ylabel('MOT Number')
ax.set_xlabel(param)
return np.array(N_mean_list),\
np.array(N_std_list),\
np.array(paramVals)
def AbsorptionDensity(fileNoStart,fileNoStop,param,detuningInVolt,crop,
centre,width,height,fileNameString,showPlot=True,showOd=False,showFits=False,
dirPath="C:/Users/cafmot/Box Sync/CaF MOT/MOTData/MOTMasterData/"):
analysis=defaultCaF()
analysis.dirPath=dirPath
analysis.fileNameString=fileNameString
radialSigmas=[]
axialSigmas=[]
paramVals=[]
densities_mean_list=[]
densities_std_list=[]
pixelSize=6.4e-6
binSize=4
mag=0.416
for fileNo in range(fileNoStart,fileNoStop+1):
images,paramsDict=analysis.readFromZip(fileNo,dstr='R')
paramVals.append(paramsDict[param])
clouds=images[0::3,:,:]
probes=images[1::3,:,:]
bgs=images[2::3,:,:]
l,m,p=np.shape(probes)
binProbes=probes#.reshape((l,m/2,2,p/2,2)).sum(2).sum(3)
binClouds=clouds#.reshape((l,m/2,2,p/2,2)).sum(2).sum(3)
binBgs=bgs#.reshape((l,m/2,2,p/2,2)).sum(2).sum(3)
od=np.log((binProbes-binBgs)/(binClouds-binBgs))
od[np.isnan(od)] = 0.0
od[od == -np.inf] = 0.0
od[od == np.inf] = 0.0
if crop:
od=od[:,centre[0]-height/2:centre[0]+height/2,
centre[1]-width/2:centre[1]+width/2]
od_mean=np.mean(od,axis=0)
N=(1+4*(detuningInVolt*14.7)**2/36)*(2.4*4*6.4e-6)**2*np.sum(od,axis=(1,2))/(3*780e-9**2/(2*np.pi))
radialY=np.sum(od_mean,axis=0)
axialY=np.sum(od_mean,axis=1)
radialYLength=len(radialY)
axialYLength=len(axialY)
radialX=pixelSize*(binSize/mag)*np.arange(0,radialYLength)
axialX=pixelSize*(binSize/mag)*np.arange(0,axialYLength)
radialGaussian,radialA,radialC,radialSigma,radiaOffset=gaussianFit(radialX,radialY)
axialGaussian,axialA,axialC,axialSigma,axialOffset=gaussianFit(axialX,axialY)
radialSigmas.append(np.abs(radialSigma))
axialSigmas.append(np.abs(axialSigma))
density=(1e-6*N/((2*np.pi)**(1.5)*np.abs(axialSigma)*radialSigma**2))
densities_mean_list.append(np.mean(density))
densities_std_list.append(np.std(density)/np.sqrt(len(density)))
if showOd:
fig,ax=plt.subplots()
im=ax.imshow(od_mean)
fig.colorbar(im)
print ''
if showFits:
fig, ax = plt.subplots(1,1)
ax.plot(radialX,radialY,'ob')
ax.plot(axialX,axialY,'og')
ax.plot(radialX,radialGaussian(radialX,radialA,radialC,radialSigma,radiaOffset),'-r')
ax.plot(axialX,axialGaussian(axialX,axialA,axialC,axialSigma,axialOffset),'-k')
plt.show()
densities_mean_list=np.array(densities_mean_list)
densities_std_list=np.array(densities_std_list)
paramVals=np.array(paramVals)
fig, ax = plt.subplots()
ax.errorbar(paramVals,densities_mean_list,yerr=densities_std_list, fmt='ok')
return np.array(radialSigmas), np.array(axialSigmas)
def AbsorptionTemperature(fileNoStart,fileNoStop,param,detuningInVolt,crop,
centre,width,height,fileNameString,showPlot=True,showOd=False,showFits=False,
dirPath="C:/Users/cafmot/Box Sync/CaF MOT/MOTData/MOTMasterData/"):
analysis=defaultCaF()
analysis.dirPath=dirPath
analysis.fileNameString=fileNameString
radialSigmas=[]
axialSigmas=[]
paramVals=[]
pixelSize=6.4e-6
binSize=4
mag=0.39
for fileNo in range(fileNoStart,fileNoStop+1):
images,paramsDict=analysis.readFromZip(fileNo,dstr='R')
paramVals.append(paramsDict[param])
clouds=images[0::3,:,:]
probes=images[1::3,:,:]
bgs=images[2::3,:,:]
l,m,p=np.shape(probes)
binProbes=probes#.reshape((l,m/2,2,p/2,2)).sum(2).sum(3)
binClouds=clouds#.reshape((l,m/2,2,p/2,2)).sum(2).sum(3)
binBgs=bgs#.reshape((l,m/2,2,p/2,2)).sum(2).sum(3)
od=np.log((binProbes-binBgs)/(binClouds-binBgs))
od[np.isnan(od)] = 0.0
od[od == -np.inf] = 0.0
od[od == np.inf] = 0.0
if crop:
od=od[:,centre[0]-height/2:centre[0]+height/2,
centre[1]-width/2:centre[1]+width/2]
od_mean=np.mean(od,axis=0)
radialY=np.sum(od_mean,axis=0)
axialY=np.sum(od_mean,axis=1)
radialYLength=len(radialY)
axialYLength=len(axialY)
radialX=pixelSize*(binSize/mag)*np.arange(0,radialYLength)
axialX=pixelSize*(binSize/mag)*np.arange(0,axialYLength)
radialGaussian,radialA,radialC,radialSigma,radiaOffset=gaussianFit(radialX,radialY)
axialGaussian,axialA,axialC,axialSigma,axialOffset=gaussianFit(axialX,axialY)
radialSigmas.append(radialSigma)
axialSigmas.append(axialSigma)
if showOd:
fig,ax=plt.subplots()
im=ax.imshow(od_mean)
fig.colorbar(im)
if showFits:
fig, ax = plt.subplots(1,1)
ax.plot(radialX,radialY,'ob')
ax.plot(axialX,axialY,'og')
ax.plot(radialX,radialGaussian(radialX,radialA,radialC,radialSigma,radiaOffset),'-r')
ax.plot(axialX,axialGaussian(axialX,axialA,axialC,axialSigma,axialOffset),'-k')
plt.show()
axialSigmas=np.array(axialSigmas)
radialSigmas=np.array(radialSigmas)
paramVals=np.array(paramVals)*1e-5
axialLin,axialM,axialC=analysis.linearFit(paramVals**2,axialSigmas**2)
radialLin,radialM,radialC=analysis.linearFit(paramVals**2,radialSigmas**2)
axialTemp=axialM*(86.9*cn.u/cn.k)*1e3
radialTemp=radialM*(86.9*cn.u/cn.k)*1e3
timeValsInterpolated=np.linspace(np.min(paramVals),
np.max(paramVals),
100)
fig, ax = plt.subplots(1,2)
ax[0].plot(paramVals**2*1e6,radialSigmas**2*1e6,'ok')
ax[1].plot(paramVals**2*1e6,axialSigmas**2*1e6,'ok')
ax[0].plot(timeValsInterpolated**2*1e6,
radialLin(timeValsInterpolated**2,radialM,
radialC)*1e6,'-r')
ax[1].plot(timeValsInterpolated**2*1e6,
axialLin(timeValsInterpolated**2,axialM,axialC)*1e6,'-r')
ax[0].set_title('Tr: {0:2.4f} [mK]'.format(radialTemp))
ax[1].set_title('Ta: {0:2.4f} [mK]'.format(axialTemp))
ax[1].yaxis.tick_right()
ax[1].yaxis.set_label_position("right")
for axis in ax:
axis.xaxis.set_minor_locator(AutoMinorLocator())
axis.yaxis.set_minor_locator(AutoMinorLocator())
axis.set_xlabel('time^2 [ms^2]')
axis.set_ylabel('size^2 [mm^2]')
def gaussianFit2D((x, y), amplitude, xo, yo, sigma_x, sigma_y, theta, offset):
xo = float(xo)
yo = float(yo)
a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
g = offset + amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2)))
return g.ravel()
def getInitialGuesses(od,pixelSize,binSize,magFactor):
amplitude=np.max(od)
offset=np.min(od)
x=np.sum(od,axis=0)
xd=np.arange(0,len(x))*pixelSize*(binSize/magFactor)
y=np.sum(od,axis=1)
yd=np.arange(0,len(y))*pixelSize*(binSize/magFactor)
xo=np.argmax(x)
yo=np.argmax(y)
return (amplitude,xo,yo,20*pixelSize*(binSize/magFactor),
10*pixelSize*(binSize/magFactor),0,offset)
def getOdFitted(od,pixelSize,binSize,magFactor):
l,m,n=np.shape(od)
odFitted=np.zeros_like(od)
f=pixelSize*(binSize/magFactor)
x = np.arange(0, n)*f
y = np.arange(0, m)*f
x, y = np.meshgrid(x, y)
popts=[]
for i in range(l):
p0 = (1,30*f,40*f,20*f,10*f,0,0)
popt, _ = curve_fit(gaussianFit2D, (x, y), od[i,:,:].reshape((m*n)),p0=p0)
odFitted[i,:,:] = gaussianFit2D((x, y), *popt).reshape((m,n))
popts.append(popt)
return odFitted,np.array(popts)
def getOdCleaned(probes,clouds,bgs):
probes=probes-bgs
clouds=clouds-bgs
clouds[clouds<=0]=1.0
od=np.log(probes/clouds)
od[np.isnan(od)] = 0.0
od[od == -np.inf] = 0.0
od[od == np.inf] = 0.0
return od
def imshowArray(ifarray,array):
if ifarray:
fig,ax=plt.subplots()
im=ax.imshow(np.mean(array,axis=0))
fig.colorbar(im)
plt.show()
def AbsorptionAnalysis(fileNoStart,fileNoStop,param,detuningInVolt,crop,
centre,width,height,fileNameString,
numberByFit=True,
showFits=True,
showOd=False,
pixelSize=6.4e-6,
binSize=4,
magFactor=0.41,
dirPath="C:/Users/cafmot/Box Sync/CaF MOT/MOTData/MOTMasterData/"):
returnDict={}
analysis=defaultCaF()
analysis.dirPath=dirPath
analysis.fileNameString=fileNameString
s0=(1+4*(detuningInVolt*14.7)**2/36)/(3*780e-9**2/(2*np.pi))
paramVals=[]; amplitudesMean=[]; sigmas_xMean=[]; sigmas_yMean=[]
xosMean=[]; yosMean=[]; numbersMean=[]; numbersStd=[]; amplitudesStd=[]
sigmas_xStd=[]; sigmas_yStd=[]; xosStd=[]; yosStd=[];ods=[]
for fileNo in range(fileNoStart,fileNoStop+1):
images,paramsDict=analysis.readFromZip(fileNo,dstr='R')
paramVals.append(paramsDict[param])
clouds=images[0::3,:,:]
probes=images[1::3,:,:]
bgs=images[2::3,:,:]
od=getOdCleaned(probes,clouds,bgs)
if crop:
od=od[:,centre[0]-height/2:centre[0]+height/2,
centre[1]-width/2:centre[1]+width/2]
imshowArray(showOd,od)
ods.append(np.mean(od,axis=0))
if numberByFit:
odFitted,popt=getOdFitted(od,pixelSize,binSize,magFactor)
imshowArray(showFits,odFitted)
l=np.sqrt(len(popt[:,0]))
amplitude=popt[:,0]
xo=popt[:,1]
yo=popt[:,2]
sigma_x=popt[:,3]
sigma_y=popt[:,4]
N=(2*np.pi)*amplitude*np.abs(sigma_x)*np.abs(sigma_y)*s0
amplitudesMean.append(np.mean(amplitude))
xosMean.append(np.mean(xo))
yosMean.append(np.mean(yo))
sigmas_xMean.append(np.mean(sigma_x))
sigmas_yMean.append(np.mean(sigma_y))
amplitudesStd.append(np.std(amplitude)/l)
xosStd.append(np.std(xo)/l)
yosStd.append(np.std(yo)/l)
sigmas_xStd.append(np.std(sigma_x)/l)
sigmas_yStd.append(np.std(sigma_y)/l)
else:
N=(pixelSize*(binSize/magFactor))**2*np.sum(od,axis=(1,2))*s0
numbersMean.append(np.mean(N))
numbersStd.append(np.std(N)/np.sqrt(len(N)))
returnDict['N_mean']=np.array(numbersMean)
returnDict['N_std']=np.array(numbersStd)
returnDict['paramVals']=np.array(paramVals)
returnDict['ods']=np.array(ods)
if numberByFit:
returnDict['fitSigmas_xMean']=np.array(sigmas_xMean)
returnDict['fitSigmas_xStd']=np.array(sigmas_xStd)
returnDict['fitSigmas_yMean']=np.array(sigmas_yMean)
returnDict['fitSigmas_yStd']=np.array(sigmas_yStd)
returnDict['fitAmplitudesMean']=np.array(amplitudesMean)
returnDict['fitAmplitudesStd']=np.array(amplitudesStd)
returnDict['fitXosMean']=np.array(xosMean)
returnDict['fitXosStd']=np.array(xosStd)
returnDict['fitYosMean']=np.array(yosMean)
returnDict['fitYosStd']=np.array(yosStd)
return returnDict
def expFit(x,y):
f= lambda x,a,c,s: a*np.exp(-(x-c)/s)
a_trial=np.max(y)
c_trial=0#x[0]
s_trial=100
popt,_=curve_fit(f,x,y,p0=[a_trial,c_trial,s_trial])
return f,popt[0],popt[1],popt[2]
class Analysis:
"""
This is the analysis object for CaF and Rb MOT \n
Input \n
fileNoStart=starting No of the files to be analysed \n,
fileNoStop=ending No of the files to be analysed \n,
fileNoBG=file No of the file with background \n,
requirement=allows a switch to select from \n
'Image' : To get the images of all the files \n
'Number': To get the number variation of all the files\n
'Temperature' : To get the temperature from the expansion set \n
'Lifetime': to get the lifetime from the dataset\n
param=parameter of the variation\n
fit=True to fit the data points\n
fitType=type of fitting if fit is true, choose from\n
'exp': for exponential fit [y=a*exp(-(x-c)/s)]\n
'lin': for linear fit [y=m*x+c]\n
'gauss': for gaussian fit [y=a*exp(-(x-c)**2/(2*s**2))]
trigType=choose from\n
'single': for single trigger images\n
'double': for double trigger normalizations\n
N_interpolate=integer for number of points in the fitted curve\n
fmt=plotting format, default is 'ok'\n,
showFits=True if want to have the gaussian fit to the cloud data\n
imageCols=integer for number of coumns for 'Image' or showFits\n
"""
def __init__(self,args={}):
for key in args:
self.__dict__[key]=args[key]
def __setattr__(self,name,value):
self.__dict__[name]=value
def getFilepath(self,fileNo):
"""This method create the full filepath from the fileNo input
"""
return os.path.join(self.dirPath,
self.fileNameString+'_'+str(fileNo).zfill(3)+'.zip')
def convertRawToCount(self,raw):
return (2**self.bitDepth-1)*raw
def convertCountsToPhotons(self,counts):
return counts*(np.float(self.fullWellCapacity)/(2**self.bitsPerChannel-1))/self.etaQ
def convertPhotonsToNumber(self,photonCount):
return photonCount/(self.exposureTime*self.gamma*self.collectionSolidAngle)
def readFromZip(self,fileNo,dstr=''):
archive=zipfile.ZipFile(self.getFilepath(fileNo),'r')
imgs=[]
files=archive.namelist()
files.sort(key=natural_keys)
for f in files:
if f[-3:]=='tif':
if dstr=='':
with archive.open(f) as filename:
imgs.append(np.array(Image.open(filename),dtype=float))
if dstr=='R':
if f[0]=='R':
with archive.open(f) as filename:
imgs.append(np.array(Image.open(filename),dtype=float))
if dstr=='C':
if f[0]=='C':
with archive.open(f) as filename:
imgs.append(np.array(Image.open(filename),dtype=float))
if f[-14:]=='parameters.txt':
with archive.open(f) as filename:
scriptParams=filename.readlines()
if f[-18:]=='hardwareReport.txt':
with archive.open(f) as filename:
hardwareParams=filename.readlines()
tempDict={}
for param in scriptParams:
paramSplit=param.split(b'\t')
tempDict[paramSplit[0]]=np.float(paramSplit[1])
for param in hardwareParams:
paramSplit=param.split(b'\t')
tempDict[paramSplit[0]]=np.float(paramSplit[1]) if \
paramSplit[1].isdigit() else paramSplit[1]
paramDict={}
for key in tempDict:
paramDict[key.decode("utf-8")]=tempDict[key]
return np.array(imgs),paramDict
def getImagesFromTwoTriggerData(self,fileNo):
imgs,paramsDict=self.readFromZip(fileNo,dstr='C')
return imgs[::2,:,:],imgs[1::2,:,:],paramsDict
def getAvgImageFromTwoTriggerData(self,fileNo):
normImages,measImages,_=self.getImagesFromTwoTriggerData(fileNo)
return np.mean(normImages,axis=0),np.mean(measImages,axis=0)
def getImagesFromOneTriggerData(self,fileNo):
imgs,paramsDict=self.readFromZip(fileNo,dstr='C')
return imgs,paramsDict
def getAvgImageFromOneTriggerData(self,fileNo):
imgs,_=self.getImagesFromOneTriggerData(fileNo)
return np.mean(imgs,axis=0)
def cropImages(self,imageArray):
h_top=int(self.cropCentre[0]-self.cropHeight/2)
h_bottom=int(self.cropCentre[0]+self.cropHeight/2)
w_left=int(self.cropCentre[1]-self.cropWidth/2)
w_right=int(self.cropCentre[1]+self.cropWidth/2)
return imageArray[:,h_top:h_bottom,w_left:w_right]
def cropSingleImages(self,imageArray):
h_top=self.cropCentre[1]-self.cropHeight/2
h_bottom=self.cropCentre[1]+self.cropHeight/2
w_left=self.cropCentre[0]-self.cropWidth/2
w_right=self.cropCentre[0]+self.cropWidth/2
return imageArray[w_left:w_right,h_top:h_bottom]
def getMOTNumber(self,imageArray):
totalCount=np.sum(self.cropImages(imageArray),axis=(1,2))
totalMolecules=self.convertPhotonsToNumber(
self.convertCountsToPhotons(totalCount))
return totalMolecules
def singleImageNumberWithBG(self,fileNo,fileNoBG,
param='Frame0Trigger'):
imagesBG,_=self.readFromZip(fileNoBG,dstr='C')
images,paramsDict=self.readFromZip(fileNo,dstr='C')
imageSubBG=images-imagesBG
imageCropped=imageSubBG#self.cropImages(imageSubBG)
numbers=self.getMOTNumber(imageCropped)
return np.mean(numbers),\
np.std(numbers)/np.sqrt(len(numbers)),\
paramsDict[param]
def singleImageNumberRange(self,fileNoStart,fileNoStop,fileNoBG,
param='Frame0Trigger'):
meanNoList=[]
stdNoList=[]
paramsValList=[]
for fileNo in range(fileNoStart,fileNoStop+1):
meanNo,stdNo,paramsVal=\
self.singleImageNumberWithBG(fileNo,fileNoBG,param)
meanNoList.append(meanNo)
stdNoList.append(stdNo)
paramsValList.append(paramsVal)
paramsValListSorted=np.sort(paramsValList)
paramsValListSortIndex=np.argsort(paramsValList)
meanNoListSorted=np.array(meanNoList)[paramsValListSortIndex]
stdNoListSorted=np.array(stdNoList)[paramsValListSortIndex]
return meanNoListSorted,stdNoListSorted,paramsValListSorted
def twoImageNormalisedNumberWithBG(self,fileNo,fileNoBG,
param='Frame0Trigger'):
avgNormImageBG,avgMeasImageBG=self.getAvgImageFromTwoTriggerData(fileNoBG)
normImages,measImages,paramsDict=self.getImagesFromTwoTriggerData(fileNo)
normImagesSubBG=normImages-avgNormImageBG
measImagesSubBG=measImages-avgMeasImageBG
normNums=self.getMOTNumber(normImagesSubBG[1:])
measNums=self.getMOTNumber(measImagesSubBG[1:])
propsTrapped=measNums/normNums
return np.mean(propsTrapped),\
np.std(propsTrapped)/np.sqrt(len(propsTrapped)),\
paramsDict[param]
def twoImageNormalisedNumberRange(self,fileNoStart,fileNoStop,fileNoBG,
param='Frame0Trigger'):
meanNoList=[]
stdNoList=[]
paramsValList=[]
for fileNo in range(fileNoStart,fileNoStop+1):
meanNo,stdNo,paramsVal=self.twoImageNormalisedNumberWithBG(fileNo,
fileNoBG,param)
meanNoList.append(meanNo)
stdNoList.append(stdNo)
paramsValList.append(paramsVal)
paramsValListSorted=np.sort(paramsValList)
paramsValListSortIndex=np.argsort(paramsValList)
paramsValListSorted=np.array(paramsValList)
meanNoListSorted=np.array(meanNoList)#[paramsValListSortIndex]
stdNoListSorted=np.array(stdNoList)#[paramsValListSortIndex]
return meanNoListSorted,stdNoListSorted,paramsValListSorted
def linearFit(self,x,y):
f= lambda x,m,c: m*x+c
m_trial=(y[-1]-y[0])/(x[-1]-x[0])
c_trial=np.max(y) if m_trial<0 else np.min(y)
popt,_=curve_fit(f,x,y,p0=[m_trial,c_trial])
return f,popt[0],popt[1]
def expFit(self,x,y):
f= lambda x,a,c,s: a*np.exp(-(x-c)/s)
a_trial=np.max(y)
c_trial=x[np.argmax(y)]
s_trial=np.abs((x[-1]-x[0])/np.log(np.abs(y[-1]/y[0])))
popt,_=curve_fit(f,x,y,p0=[a_trial,c_trial,s_trial])
return f,popt[0],popt[1],popt[2]
def expFitOffset(self,x,y):
f= lambda x,a,c,s,o: a*np.exp(-(x-c)/s)+o
a_trial=np.max(y)
o_trial=np.min(y)
c_trial=x[np.argmax(y)]
s_trial=np.abs((x[-1]-x[0])/np.log(np.abs(y[-1]/y[0])))
popt,_=curve_fit(f,x,y,p0=[a_trial,c_trial,s_trial,o_trial])
return f,popt[0],popt[1],popt[2],popt[3]
def gaussianFit(self,x,y):
f= lambda x,a,c,s: a*np.exp(-(x-c)**2/(2*s**2))
loc_trial=np.argmax(y)
a_trial=y[loc_trial]
c_trial=x[loc_trial]
s_trial=np.sqrt(np.abs(((x[int(loc_trial+4)]-c_trial)**2-\
(x[int(loc_trial)]-c_trial)**2)/\
(2*np.log(np.abs(y[int(loc_trial+4)]/y[int(loc_trial)])))))
popt,_=curve_fit(f,x,y,p0=[a_trial,c_trial,s_trial])
return f,popt[0],popt[1],popt[2]
def numberFit(self,meanNos,paramVals,fitType,N_interpolate):
valdict={}
valdict['paramValsInterpolated']=np.linspace(np.min(paramVals),
np.max(paramVals),N_interpolate)
if fitType=='lin':
valdict['numberLin'],valdict['m'],valdict['c']=\
self.linearFit(paramVals,meanNos)
valdict['meanNosInterpolated']=\
valdict['numberLin'](valdict['paramValsInterpolated'],
valdict['m'],valdict['c'])
elif fitType=='exp':
valdict['numberExp'],valdict['a'],valdict['c'],valdict['s']=\
self.expFit(paramVals,meanNos)
valdict['meanNosInterpolated']=\
valdict['numberExp'](valdict['paramValsInterpolated'],
valdict['a'],valdict['c'],valdict['s'])
elif fitType=='gauss':
valdict['numberGauss'],valdict['a'],valdict['c'],valdict['s']=\
self.gaussianFit(paramVals,meanNos)
valdict['meanNosInterpolated']=\
valdict['numberGauss'](valdict['paramValsInterpolated'],
valdict['a'],valdict['c'],valdict['s'])
return valdict
def number(self,fileNoStart,fileNoStop,fileNoBG,param,trigType,
fit,fmt,fitType,N_interpolate,extParam,extParamVals):
valdict={}
if trigType=='single':
meanNos,stdNos,paramVals=\
self.singleImageNumberRange(fileNoStart,fileNoStop,fileNoBG,param)
else:
meanNos,stdNos,paramVals=\
self.twoImageNormalisedNumberRange(fileNoStart,fileNoStop,
fileNoBG,param)
'''fig, ax = plt.subplots()
if len(extParamVals):
paramVals=np.array(extParamVals)
param=extParam
ax.errorbar(paramVals,meanNos,yerr=stdNos,fmt=fmt)
if fit:
valdictFit=self.numberFit(meanNos,paramVals,fitType,
N_interpolate)
valdict.update(valdictFit)
ax.plot(valdictFit['paramValsInterpolated'],
valdictFit['meanNosInterpolated'],'-r')
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.set_xlabel(param)
ax.set_ylabel('MOT number')
plt.show()'''
valdict['meanNos']=meanNos
valdict['paramVals']=paramVals
valdict['stdNos']=stdNos
return valdict
def gaussianFitToCloud(self,imageArray):
valdict={}
peakPos=np.unravel_index(np.argmax(imageArray, axis=None),
imageArray.shape)
radialXLength=len(imageArray[peakPos[0],:])
axialXLength=len(imageArray[:,peakPos[1]])
radialX=self.pixelSize*1e6*np.arange(-radialXLength/2.0,
radialXLength/2.0)
axialX=self.pixelSize*1e6*np.arange(-axialXLength/2.0,
axialXLength/2.0)
radialY=imageArray[peakPos[0],:]
axialY=imageArray[:,peakPos[1]]
valdict['radialGaussian'],valdict['radialA'],valdict['radialC'],\
valdict['radialSigma']=self.gaussianFit(radialX,radialY)
valdict['axialGaussian'],valdict['axialA'],valdict['axialC'],\
valdict['axialSigma']=self.gaussianFit(axialX,axialY)
valdict['radialX']=radialX
valdict['radialY']=radialY
valdict['axialX']=axialX
valdict['axialY']=axialY
return valdict
def gaussianFitToCloud2(self,imageArray):
valdict={}
radialY=np.sum(imageArray,axis=0)
axialY=np.sum(imageArray,axis=1)
radialYLength=len(radialY)
axialYLength=len(axialY)
radialX=self.pixelSize*(self.binSize/self.magFactor)*np.arange(0,radialYLength)
axialX=self.pixelSize*(self.binSize/self.magFactor)*np.arange(0,axialYLength)
valdict['radialGaussian'],valdict['radialA'],valdict['radialC'],\
valdict['radialSigma']=self.gaussianFit(radialX,radialY)
valdict['axialGaussian'],valdict['axialA'],valdict['axialC'],\
valdict['axialSigma']=self.gaussianFit(axialX,axialY)
valdict['radialX']=radialX
valdict['radialY']=radialY
valdict['axialX']=axialX
valdict['axialY']=axialY
return valdict
def getTemperature(self,fileNoStart,fileNoStop,fileNoBG,param):
valdict={}
timeVals=[]
radialSigmas=[]
axialSigmas=[]
bg,_=self.readFromZip(fileNoBG,dstr='C')
valdictFits=[]
for fileNo in range(fileNoStart,fileNoStop+1):
imgs,paramsDict=self.readFromZip(fileNo,dstr='C')
imgsSubBG=imgs-bg
avgImage=np.mean(self.cropImages(imgsSubBG),axis=0)
valdictFit=self.gaussianFitToCloud2(avgImage)
timeVals.append(paramsDict[param])
radialSigmas.append(valdictFit['radialSigma'])
axialSigmas.append(valdictFit['axialSigma'])
valdictFits.append(valdictFit)
valdict['valdictFits']=valdictFits
valdict['axialSigmas']=np.array(axialSigmas)
valdict['radialSigmas']=np.array(radialSigmas)
valdict['timeVals']=np.array(timeVals)*1e-5
valdict['axialLin'],valdict['axialM'],valdict['axialC']=\
self.linearFit(valdict['timeVals']**2,valdict['axialSigmas']**2)
valdict['radialLin'],valdict['radialM'],valdict['radialC']=\
self.linearFit(valdict['timeVals']**2,valdict['radialSigmas']**2)
return valdict
def temperature(self,fileNoStart,fileNoStop,fileNoBG,N_interpolate,
param,showFits=True,cols=4):
valdict=self.getTemperature(fileNoStart,fileNoStop,fileNoBG,param)
valdict['axialTemp']=valdict['axialM']*(59*cn.u/cn.k)*1e3
valdict['radialTemp']=valdict['radialM']*(59*cn.u/cn.k)*1e3
timeValsInterpolated=np.linspace(np.min(valdict['timeVals']),
np.max(valdict['timeVals']),
N_interpolate)
fig, ax = plt.subplots(1,2)
ax[0].plot(valdict['timeVals']**2*1e6,valdict['radialSigmas']**2*1e6,'ok')
ax[1].plot(valdict['timeVals']**2*1e6,valdict['axialSigmas']**2*1e6,'ok')
ax[0].plot(timeValsInterpolated**2*1e6,
valdict['radialLin'](timeValsInterpolated**2,
valdict['radialM'],
valdict['radialC'])*1e6,'-r')
ax[1].plot(timeValsInterpolated**2*1e6,
valdict['axialLin'](timeValsInterpolated**2,
valdict['axialM'],
valdict['axialC'])*1e6,'-r')
ax[0].set_title('Tr: {0:2.4f} [mK]'.format(valdict['radialTemp']))
ax[1].set_title('Ta: {0:2.4f} [mK]'.format(valdict['axialTemp']))
ax[1].yaxis.tick_right()
ax[1].yaxis.set_label_position("right")
for axis in ax:
axis.xaxis.set_minor_locator(AutoMinorLocator())
axis.yaxis.set_minor_locator(AutoMinorLocator())
axis.set_xlabel('time^2 [ms^2]')
axis.set_ylabel('size^2 [mm^2]')
valdict['timeValsInterpolated']=timeValsInterpolated
if showFits:
l=len(valdict['valdictFits'])
for k in range(l):
fig, ax = plt.subplots(1,1)
valdictK=valdict['valdictFits'][k]
ax.plot(valdictK['radialX'],valdictK['radialY'],'ob')
ax.plot(valdictK['axialX'],valdictK['axialY'],'og')
ax.plot(valdictK['radialX'],
valdictK['radialGaussian'](valdictK['radialX'],
valdictK['radialA'],
valdictK['radialC'],
valdictK['radialSigma']),'-r')
ax.plot(valdictK['axialX'],
valdictK['axialGaussian'](valdictK['axialX'],
valdictK['axialA'],
valdictK['axialC'],
valdictK['axialSigma']),'-k')
plt.show()
return valdict
def getSize(self,fileNoStart,fileNoStop,fileNoBG,param):
valdict={}
paramVals=[]
radialSigmas=[]
axialSigmas=[]
bg,_=self.readFromZip(fileNoBG,dstr='C')
valdictFits=[]
for fileNo in range(fileNoStart,fileNoStop+1):
imgs,paramsDict=self.readFromZip(fileNo,dstr='C')
imgsSubBG=imgs-bg
avgImage=np.mean(self.cropImages(imgsSubBG),axis=0)
valdictFit=self.gaussianFitToCloud2(avgImage)
paramVals.append(paramsDict[param])
radialSigmas.append(valdictFit['radialSigma'])
axialSigmas.append(valdictFit['axialSigma'])
valdictFits.append(valdictFit)
valdict['valdictFits']=valdictFits
valdict['axialSigmas']=np.array(axialSigmas)
valdict['radialSigmas']=np.array(radialSigmas)
valdict['paramVals']=np.array(paramVals)#-np.min(paramVals))*1e-5
return valdict
def size(self,fileNoStart,fileNoStop,fileNoBG,N_interpolate,
param,showFits=True,cols=4):
valdict=self.getSize(fileNoStart,fileNoStop,fileNoBG,param)
fig, ax = plt.subplots(1,2)
ax[0].plot(valdict['paramVals'],valdict['radialSigmas'],'ok')
ax[1].plot(valdict['paramVals'],valdict['axialSigmas'],'ok')
return valdict
def lifetime(self,fileNoStart,fileNoStop,fileNoBG,
param,trigType,N_interpolate,fmt):
valdict={}
if trigType=='single':
#param='Frame0Trigger'
meanNos,stdNos,paramVals=\
self.singleImageNumberRange(fileNoStart,fileNoStop,fileNoBG,param)
else:
#param='Frame1Trigger'
meanNos,stdNos,paramVals=\
self.twoImageNormalisedNumberRange(fileNoStart,fileNoStop,
fileNoBG,param)
offset=np.min(paramVals)/100.0
paramVals=paramVals/100.0-offset
fig, ax = plt.subplots()
ax.errorbar(paramVals,meanNos,yerr=stdNos,fmt=fmt)
valdictFit=self.numberFit(meanNos,paramVals,fitType='exp',
N_interpolate=200)
ax.plot(valdictFit['paramValsInterpolated'],
valdictFit['meanNosInterpolated'],'-r')
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.set_xlabel(param+' [ms] [offset: {}]'.format(offset))
ax.set_ylabel('MOT number')
ax.set_title('Lifetime: {0:.2f} ms'.format(valdictFit['s']))
plt.show()
valdict['meanNos']=meanNos
valdict['paramVals']=paramVals
valdict.update(valdictFit)
return valdict
def viewImages(self,fileNoStart,fileNoStop,fileNoBG,cols=4):
l=(fileNoStop+1)-fileNoStart
rows=np.int(np.ceil(l/float(cols)))
fig, ax = plt.subplots(rows,cols)
avgImages=[]
bg,_=self.readFromZip(fileNoBG,dstr='C')
for fileNo in range(fileNoStart,fileNoStop+1):
imgs,_=self.readFromZip(fileNo,dstr='C')
imgsSubBG=imgs-bg
avgImage=np.mean(self.cropImages(imgsSubBG),axis=0)
avgImages.append(avgImage)
if rows>1 and cols>1:
k=0
while k<l:
ax[np.int(k/cols),np.mod(k,cols)].imshow(avgImages[k])
k+=1
for row in range(rows):
for col in range(cols):
ax[row,col].axis('off')
else:
k=0
while k<l:
ax[np.mod(k,cols)].imshow(avgImages[k])
k+=1
longer=np.max([rows,cols])
for axis in range(longer):
ax[axis].axis('off')
plt.show()
def singleImageLifetime(self,fileNo,fileNoBg,shotsPerImage,t0,dt):
images,_=self.readFromZip(fileNo,dstr='C')
bg,_=self.readFromZip(fileNoBg,dstr='C')
t=np.array([t0+i*dt for i in range(shotsPerImage)])*1e-2
images=images-bg
noShots=len(images)/shotsPerImage
k=0
N_list=[]
for i in range(noShots):
imageArray=images[k:k+shotsPerImage,:,:]
k+=shotsPerImage
N=self.getMOTNumber(imageArray)
N=N/N[0]
N_list.append(N)
N_list=np.array(N_list)
N_mean=np.mean(N_list,axis=0)
N_std=np.std(N_list,axis=0)/np.sqrt(noShots)
f,a,c,s=expFit(t,N_mean)
return N_mean,N_std,t,f,a,c,s
def plotDualImage(self,fileNoStart,fileNoStop,fileNoBG):
bg1,bg2=self.getAvgImageFromTwoTriggerData(fileNoBG)
for fileNo in range(fileNoStart,fileNoStop+1):
imgs1,imgs2=self.getAvgImageFromTwoTriggerData(fileNo)
imgs1-=bg1
imgs2-=bg2
imgs1=self.cropSingleImages(imgs1)
imgs2=self.cropSingleImages(imgs2)
_,ax=plt.subplots(1,2)
ax[0].imshow(imgs1)
ax[1].imshow(imgs2)
def __call__(self,fileNoStart,fileNoStop,fileNoBG,
requirement='Number',
param='Frame0Trigger',
trigType='single',
fit=False,fitType='lin',
N_interpolate=200,
extParam='give a name',
extParamVals=[],
fmt='ok',
showFits=False,
preferredUnits=['um','mK','ms'],
imageCols=4):
if requirement=='Number':
return self.number(fileNoStart,fileNoStop,fileNoBG,param,trigType,
fit,fmt,fitType,N_interpolate,extParam,extParamVals)
elif requirement=='Temperature':
return self.temperature(fileNoStart,fileNoStop,fileNoBG,
N_interpolate,param,showFits,imageCols)
elif requirement=='Lifetime':
return self.lifetime(fileNoStart,fileNoStop,fileNoBG,param,
trigType,N_interpolate,fmt)
elif requirement=='Image':
self.viewImages(fileNoStart,fileNoStop,fileNoBG,imageCols)
def defaultCaF():
"""
Default settings for CaF analysis\n
return : Analysis object with settings, \n
analysis.bitDepth=16 \n
analysis.fullWellCapacity=18000 \n
analysis.collectionSolidAngle=0.023 \n
analysis.pixelSize=6.45e-6 \n
analysis.binSize=8 \n
analysis.bitsPerChannel=12 \n
analysis.gamma=1.5e6 \n
analysis.etaQ=0.65 \n
analysis.exposureTime=10e-3 \n
analysis.cropCentre=(74,64) \n
analysis.cropHeight=100 \n
analysis.cropWidth=110 \n
Change any of the values in the object instance using \n
instanceName.propertyName=propertyValue \n
Add also,\n
analysis.dirPath=path to the data directory \n
analysis.fileNameString=starting name of the files before underscore \n
Example:\n
analysis=defaultCaF() \n
analysis.exposureTime=10e-3 \n
analysis.dirPath='../../data/MOTMasterData' \n
analysis.fileNameString='CaF16Jan1900' \n
"""
analysis=Analysis()
analysis.bitDepth=16
analysis.fullWellCapacity=18000
analysis.collectionSolidAngle=0.023
analysis.pixelSize=6.45e-6
analysis.binSize=8
analysis.magFactor=0.5
analysis.bitsPerChannel=12
analysis.gamma=1.5e6
analysis.etaQ=0.65
analysis.exposureTime=10e-3
analysis.cropCentre=(74,64)
analysis.cropHeight=100
analysis.cropWidth=110
return analysis
if __name__=='__main__':
analysis=defaultCaF()
analysis.dirPath='../../data/temperature'
analysis.fileNameString='CaF16Jan1900'
a=analysis(fileNoStart=25,
fileNoStop=30,
fileNoBG=31,
requirement='Number',
param='ExpansionTime',
fit=True,fitType='exp',
trigType='single',
N_interpolate=200,
extParam='Test',
extParamVals=[],
fmt='ok',
showFits=True,
imageCols=4)
| ColdMatter/EDMSuite | MoleculeMOTScripts/analysis2.py | Python | mit | 41,931 | [
"Gaussian"
] | 81d018bb5a779007e098906a51f30f0db31aee3cad39f7219135192e5da2801d |
import sys
import subprocess
import os
DNA_COMP = None
def comp(seq_str):
"""complements the provided DNA sequence and returns it"""
global DNA_COMP
if DNA_COMP is None:
DNA_COMP = str.maketrans("ATCGMRWSYKNatcgmrwsykn",
"TAGCKYWSRMNtagckywsrmn")
return seq_str.translate(DNA_COMP)
def revcomp(seq_str):
"""returns reverse complement of provided DNA sequence"""
return comp(seq_str)[::-1]
def sort_bam(input_bam, output_prefix):
"""Calls samtools sort on input_bam filename and writes to
output_bam. Takes into account that the command line arguments
for samtools sort have changed between versions."""
output_bam = output_prefix + ".sort.bam"
# first try new way of using samtools sort
failed = False
cmd = "samtools sort -o " + output_bam + " " + input_bam
sys.stderr.write("running command: %s\n" % cmd)
try:
subprocess.check_call(cmd, shell=True)
except Exception as e:
sys.stderr.write("samtools sort command failed:\n%s\n" %
str(e))
failed = True
if not os.path.exists(output_bam):
sys.stderr.write("output file %s does not exist\n" % output_bam)
failed = True
if failed:
# OLD way of calling samtools (changed in newer versions)
sys.stderr.write("samtools sort command failed, trying old samtools "
"syntax\n")
cmd = "samtools sort " + input_bam + " " + output_prefix
sys.stderr.write("running command: %s\n" % cmd)
try:
subprocess.check_call(cmd, shell=True)
except Exception as e:
sys.stderr.write("samtools sort command failed:\n%s\n" %
str(e))
exit(1)
if not os.path.exists(paths.sorted_output_bam):
raise IOError("Failed to create sorted BAM file '%s'" %
paths.sorted_output_bam)
def is_gzipped(filename):
"""Checks first two bytes of provided filename and looks for
gzip magic number. Returns true if it is a gzipped file"""
f = open(filename, "rb")
# read first two bytes
byte1 = f.read(1)
byte2 = f.read(1)
f.close()
# check against gzip magic number 1f8b
# return (byte1 == chr(0x1f)) and (byte2 == chr(0x8b))
return (byte1 == b'\x1f') and (byte2== b'\x8b')
def check_pysam_version(min_pysam_ver="0.8.4"):
"""Checks that the imported version of pysam is greater than
or equal to provided version. Returns 0 if version is high enough,
raises ImportWarning otherwise."""
import pysam
min_ver = [int(x) for x in min_pysam_ver.split(".")]
pysam_ver = [int(x) for x in pysam.__version__.split(".")]
n_ver = min(len(pysam_ver), len(min_pysam_ver))
for i in range(n_ver):
if pysam_ver[i] < min_ver[i]:
raise ImportWarning("pysam version is %s, but pysam version %s "
"or greater is required" % (pysam.__version__,
min_pysam_ver))
if pysam_ver[i] > min_ver[i]:
# version like 1.0 beats version like 0.8
break
return 0
def check_pytables_version():
"""Checks that PyTables version 3 is being used. PyTables version 3
changes the names of many functions and is not backwards compatible
with PyTables 2. Previous versions of WASP used version 2, but switch
to version 3 was made at same time as switch to python3."""
import tables
pytables_ver = [int(x) for x in tables.__version__.split(".")]
if pytables_ver[0] < 3:
raise ImportWarning("pytables version is %s, but pytables version "
">=3 is required" % (tables.__version__))
return 0
def check_python_version():
"""Checks that Python version 3 is being used. Previous versions of
WASP used python2.7, but version 3 is now required."""
python_ver = int(sys.version.split()[0].split(".")[0])
if python_ver < 3:
raise ImportWarning("python version is %s, but version "
">=3 is required" % (sys.version))
return 0
| gmcvicker/WASP | mapping/util.py | Python | apache-2.0 | 4,288 | [
"pysam"
] | d75919ebbd50c8d8ff43191b5c418eab7af9fbec5452dbbd242ff3fdc2d224b3 |
"""Dirac Jobs Table."""
import logging
from copy import deepcopy
from collections import Counter, defaultdict
from sqlalchemy import Column, Integer, Enum, ForeignKey
from sqlalchemy.orm import relationship
from lzproduction.rpc.DiracRPCClient import dirac_api_client
from ..utils import db_session
from ..statuses import DIRACSTATUS
from .SQLTableBase import SQLTableBase
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class DiracJobs(SQLTableBase):
"""Dirac Jobs SQL Table."""
__tablename__ = 'diracjobs'
id = Column(Integer, primary_key=True) # pylint: disable=invalid-name
parametricjob_id = Column(Integer, ForeignKey('parametricjobs.id'), nullable=False)
parametricjob = relationship("ParametricJobs", back_populates='diracjobs')
status = Column(Enum(DIRACSTATUS), nullable=False, default=DIRACSTATUS.Unknown)
reschedules = Column(Integer, nullable=False, default=0)
@staticmethod
def update_status(parametricjob):
"""
Bulk update status.
This method updates all DIRAC jobs which belong to the given
parametricjob.
"""
with db_session() as session:
dirac_jobs = session.query(DiracJobs)\
.filter_by(parametricjob_id=parametricjob.id)\
.all()
session.expunge_all()
# Group jobs by status
job_types = defaultdict(set)
for job in dirac_jobs:
job_types[job.status].add(job.id)
# add auto-reschedule jobs
if job.status in (DIRACSTATUS.Failed, DIRACSTATUS.Stalled) and job.reschedules < 2:
job_types['Reschedule'].add(job.id)
reschedule_jobs = job_types['Reschedule'] if job_types[DIRACSTATUS.Done] else set()
monitor_jobs = job_types[DIRACSTATUS.Running] | \
job_types[DIRACSTATUS.Received] | \
job_types[DIRACSTATUS.Queued] | \
job_types[DIRACSTATUS.Waiting] | \
job_types[DIRACSTATUS.Checking] | \
job_types[DIRACSTATUS.Matched] | \
job_types[DIRACSTATUS.Unknown] | \
job_types[DIRACSTATUS.Completed]
if parametricjob.reschedule:
reschedule_jobs = job_types[DIRACSTATUS.Failed] | job_types[DIRACSTATUS.Stalled]
# Reschedule jobs
if reschedule_jobs:
with dirac_api_client() as dirac:
result = deepcopy(dirac.reschedule(reschedule_jobs))
if result['OK']:
logger.info("Rescheduled jobs: %s", result['Value'])
monitor_jobs.update(result['Value'])
with db_session(reraise=False) as session:
session.query(DiracJobs)\
.filter(DiracJobs.id.in_(result['Value']))\
.update({'reschedules': DiracJobs.reschedules + 1},
synchronize_session=False)
# rescheduled_jobs = session.query(DiracJobs.id, DiracJobs.reschedules)\
# .filter(DiracJobs.id.in_(results['Value']))\
# .all()
# session.bulk_update_mappings(DiracJobs, [dict(job._asdict(), reschedules=job.reschedules + 1)
# for job in rescheduled_jobs])
skipped_jobs = reschedule_jobs.difference(result["Value"])
if skipped_jobs:
logger.warning("Failed to reschedule jobs: %s", list(skipped_jobs))
else:
logger.error("Problem rescheduling jobs: %s", result['Message'])
# Update status
with dirac_api_client() as dirac:
dirac_answer = deepcopy(dirac.status(monitor_jobs))
if not dirac_answer['OK']:
raise DiracError(dirac_answer['Message'])
dirac_statuses = dirac_answer['Value']
skipped_jobs = monitor_jobs.difference(dirac_statuses)
if skipped_jobs:
logger.warning("Couldn't check the status of jobs: %s", list(skipped_jobs))
with db_session() as session:
# session.query(DiracJobs)\
# .filter(DiracJobs.id.in_(dirac_statuses.keys()))\
# .update({'status': DIRACSTATUS[dirac_statuses[DiracJobs.id]['Status']]})
session.bulk_update_mappings(DiracJobs, [{'id': i, 'status': DIRACSTATUS[j['Status']]}
for i, j in dirac_statuses.iteritems()])
session.flush()
session.expire_all()
dirac_jobs = session.query(DiracJobs)\
.filter_by(parametricjob_id=parametricjob.id)\
.all()
session.expunge_all()
if not dirac_jobs:
logger.warning("No dirac jobs associated with parametricjob: %s. returning status unknown", parametricjob.id)
return Counter([DIRACSTATUS.Unknown.local_status])
return Counter(job.status.local_status for job in dirac_jobs)
| alexanderrichards/LZProduction | lzproduction/sql/tables/DiracJobs.py | Python | mit | 5,198 | [
"DIRAC"
] | 84a49c762050bf90b45874d549b76898a1354d5fccd626826558d583ee731555 |
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# Based on Bio.Nexus, copyright 2005-2008 by Frank Kauff & Cymon J. Cox.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""I/O function wrappers for the Newick file format.
See: http://evolution.genetics.washington.edu/phylip/newick_doc.html
"""
__docformat__ = "restructuredtext en"
import warnings
from cStringIO import StringIO
from Bio import BiopythonDeprecationWarning
from Bio.Phylo import Newick
# Definitions retrieved from Bio.Nexus.Trees
NODECOMMENT_START = '[&'
NODECOMMENT_END = ']'
class NewickError(Exception):
"""Exception raised when Newick object construction cannot continue."""
pass
# ---------------------------------------------------------
# Public API
def parse(handle, **kwargs):
"""Iterate over the trees in a Newick file handle.
:returns: generator of Bio.Phylo.Newick.Tree objects.
"""
return Parser(handle).parse(**kwargs)
def write(trees, handle, plain=False, **kwargs):
"""Write a trees in Newick format to the given file handle.
:returns: number of trees written.
"""
return Writer(trees).write(handle, plain=plain, **kwargs)
# ---------------------------------------------------------
# Input
class Parser(object):
"""Parse a Newick tree given a file handle.
Based on the parser in `Bio.Nexus.Trees`.
"""
def __init__(self, handle):
self.handle = handle
@classmethod
def from_string(cls, treetext):
handle = StringIO(treetext)
return cls(handle)
def parse(self, values_are_confidence=False, rooted=False,
# XXX Deprecated kwarg -- remove after Biopython 1.58
values_are_support=None):
"""Parse the text stream this object was initialized with."""
# XXX Handling the deprecated kwarg -- remove after Biopython 1.58
if values_are_support is not None:
warnings.warn("use the argument values_are_confidence instead",
BiopythonDeprecationWarning)
values_are_confidence = values_are_support
self.values_are_confidence = values_are_confidence
self.rooted = rooted # XXX this attribue is useless
buf = ''
for line in self.handle:
buf += line.rstrip()
if buf.endswith(';'):
yield self._parse_tree(buf, rooted)
buf = ''
if buf:
# Last tree is missing a terminal ';' character -- that's OK
yield self._parse_tree(buf, rooted)
def _parse_tree(self, text, rooted):
"""Parses the text representation into an Tree object."""
# XXX Pass **kwargs along from Parser.parse?
return Newick.Tree(root=self._parse_subtree(text), rooted=self.rooted)
def _parse_subtree(self, text):
"""Parse ``(a,b,c...)[[[xx]:]yy]`` into subcomponents, recursively."""
text = text.strip().rstrip(';')
if text.count('(')!=text.count(')'):
raise NewickError("Parentheses do not match in (sub)tree: " + text)
# Text is now "(...)..." (balanced parens) or "..." (leaf node)
if text.count('(') == 0:
# Leaf/terminal node -- recursion stops here
return self._parse_tag(text)
# Handle one layer of the nested subtree
# XXX what if there's a paren in a comment or other string?
close_posn = text.rfind(')')
subtrees = []
# Locate subtrees by counting nesting levels of parens
plevel = 0
prev = 1
for posn in range(1, close_posn):
if text[posn] == '(':
plevel += 1
elif text[posn] == ')':
plevel -= 1
elif text[posn] == ',' and plevel == 0:
subtrees.append(text[prev:posn])
prev = posn + 1
subtrees.append(text[prev:close_posn])
# Construct a new clade from trailing text, then attach subclades
clade = self._parse_tag(text[close_posn+1:])
clade.clades = [self._parse_subtree(st) for st in subtrees]
return clade
def _parse_tag(self, text):
"""Extract the data for a node from text.
:returns: Clade instance containing any available data
"""
# Extract the comment
comment_start = text.find(NODECOMMENT_START)
if comment_start != -1:
comment_end = text.find(NODECOMMENT_END)
if comment_end == -1:
raise NewickError('Error in tree description: '
'Found %s without matching %s'
% (NODECOMMENT_START, NODECOMMENT_END))
comment = text[comment_start+len(NODECOMMENT_START):comment_end]
text = text[:comment_start] + text[comment_end+len(NODECOMMENT_END):]
else:
comment = None
clade = Newick.Clade(comment=comment)
# Extract name (taxon), and optionally support, branch length
# Float values are support and branch length, the string is name/taxon
values = []
for part in (t.strip() for t in text.split(':')):
if part:
try:
values.append(float(part))
except ValueError:
assert clade.name is None, "Two string taxonomies?"
clade.name = part
if len(values) == 1:
# Real branch length, or support as branch length
if self.values_are_confidence:
clade.confidence = values[0]
else:
clade.branch_length = values[0]
elif len(values) == 2:
# Two non-taxon values: support comes first. (Is that always so?)
clade.confidence, clade.branch_length = values
elif len(values) > 2:
raise NewickError("Too many colons in tag: " + text)
return clade
# ---------------------------------------------------------
# Output
class Writer(object):
"""Based on the writer in Bio.Nexus.Trees (str, to_string)."""
def __init__(self, trees):
self.trees = trees
def write(self, handle, **kwargs):
"""Write this instance's trees to a file handle."""
count = 0
for treestr in self.to_strings(**kwargs):
handle.write(treestr + '\n')
count += 1
return count
def to_strings(self, confidence_as_branch_length=False,
branch_length_only=False, plain=False,
plain_newick=True, ladderize=None, max_confidence=1.0,
format_confidence='%1.2f', format_branch_length='%1.5f',
# XXX Deprecated kwargs -- remove after Biopython 1.58
support_as_branchlengths=None, branchlengths_only=None,
max_support=None):
"""Return an iterable of PAUP-compatible tree lines."""
# XXX Handling the deprecated kwargs -- remove after Biopython 1.58
if support_as_branchlengths is not None:
warnings.warn(
"use the argument confidence_as_branch_length instead",
BiopythonDeprecationWarning)
confidence_as_branch_length = support_as_branchlengths
if branchlengths_only is not None:
warnings.warn("use the argument branch_length_only instead",
BiopythonDeprecationWarning)
branch_length_only = branchlengths_only
if max_support is not None:
warnings.warn("use the argument max_confidence instead",
BiopythonDeprecationWarning)
max_confidence = max_support
# If there's a conflict in the arguments, we override plain=True
if confidence_as_branch_length or branch_length_only:
plain = False
make_info_string = self._info_factory(plain,
confidence_as_branch_length, branch_length_only, max_confidence,
format_confidence, format_branch_length)
def newickize(clade):
"""Convert a node tree to a Newick tree string, recursively."""
if clade.is_terminal(): #terminal
return ((clade.name or '')
+ make_info_string(clade, terminal=True))
else:
subtrees = (newickize(sub) for sub in clade)
return '(%s)%s' % (','.join(subtrees),
make_info_string(clade))
# Convert each tree to a string
for tree in self.trees:
if ladderize in ('left', 'LEFT', 'right', 'RIGHT'):
# Nexus compatibility shim, kind of
tree.ladderize(reverse=(ladderize in ('right', 'RIGHT')))
rawtree = newickize(tree.root) + ';'
if plain_newick:
yield rawtree
continue
# Nexus-style (?) notation before the raw Newick tree
treeline = ['tree', (tree.name or 'a_tree'), '=']
if tree.weight != 1:
treeline.append('[&W%s]' % round(float(tree.weight), 3))
if tree.rooted:
treeline.append('[&R]')
treeline.append(rawtree)
yield ' '.join(treeline)
def _info_factory(self, plain, confidence_as_branch_length,
branch_length_only, max_confidence, format_confidence,
format_branch_length):
"""Return a function that creates a nicely formatted node tag."""
if plain:
# Plain tree only. That's easy.
def make_info_string(clade, terminal=False):
return ''
elif confidence_as_branch_length:
# Support as branchlengths (eg. PAUP), ignore actual branchlengths
def make_info_string(clade, terminal=False):
if terminal:
# terminal branches have 100% support
return ':' + format_confidence % max_confidence
else:
return ':' + format_confidence % clade.confidence
elif branch_length_only:
# write only branchlengths, ignore support
def make_info_string(clade, terminal=False):
return ':' + format_branch_length % clade.branch_length
else:
# write support and branchlengths (e.g. .con tree of mrbayes)
def make_info_string(clade, terminal=False):
if (terminal or
not hasattr(clade, 'confidence') or
clade.confidence is None):
return (':' + format_branch_length
) % (clade.branch_length or 0.0)
else:
return (format_confidence + ':' + format_branch_length
) % (clade.confidence, clade.branch_length or 0.0)
return make_info_string
| LyonsLab/coge | bin/last_wrapper/Bio/Phylo/NewickIO.py | Python | bsd-2-clause | 10,986 | [
"Biopython"
] | da68b1e82dbc5f83d2eea225e44bf952980cd49f6a0a02e247ad43e875b713f5 |
"""
A module defining various plotting functions that generally act on a :class:`~dffit.DFFit` instance.
The primary function for general use is :func:`~mfplot`, which is a wrapper around :func:`~dfplot`, and shows a fitted
mass function optionally with uncertainty region, and binned data, along with a histogram of data counts.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import poisson
from chainconsumer import ChainConsumer
import warnings
def dfplot(
dffit,
xlab=r"Observable $x$",
ylab=r"Generative distribution function, $\phi$",
ylab_histogram="Counts",
fit_label=None,
xlim=None,
ylim=None,
p_true=None,
xpower10=False,
show_input_data=True,
show_posterior_data=True,
show_data_histogram=True,
uncertainty_type=1,
show_bias_correction=True,
nbins=None,
bin_xmin=None,
bin_xmax=None,
col_fit="blue",
lw_fit=2,
ls_fit="-",
col_data="purple",
size_data=20,
lw_data=1,
col_posterior="black",
size_posterior=20,
lw_posterior=1,
col_hist="grey",
col_ref="black",
lw_ref=1,
ls_ref=":",
col_veff="black",
lw_veff=1.5,
ls_veff="--",
legend=True,
fig=None,
ax0=None,
ax1=None,
):
"""
Display fitted generative distribution function
This function creates a one-dimensional generative distribution function
fitted using :class:`pydftools.dffit.DFFit`.
Parameters
----------
dffit : :class:`~dffit.DFFit` instance
Provides the data to be fit
xlab: str, optional
An x-axis label
ylab: str, optional
A y-axis label for the mass-function axis of the figure
ylab_histogram: str, optional
A y-axis label for the histogram axis of the figure
fit_label: str, optional
A label for the fitted curve, to appear in a legend (useful when overplotting several fits).
xlim: 2-tuple, optional
x-axis range
ylim: 2-tuple, optional
y-axis range (for mass function axis)
p_true: sequence,optional
Parameters of a reference distribution function to be over-plotted on the fitted function. Using `None` will
omit the reference function.
xpower10: bool, optional
If `True`, the model argument x is elevated to the power of 10 in the plots.
show_input_data: bool, optional
Whether the input data is shown in bins. Each bin value is simply the sum 1/Veff(x) of the observed x-values
in this bin.
show_posterior_data: bool, optional
Whether the posterior data, constructed from all the individual posterior PDFs of the observed data,
are shown in bins. Note that posterior data only exists of the fitted data is uncertain (i.e. `x_err` is not
None in the Data object).
show_data_histogram: bool, optional
Whether a histogram of source counts, based on the input data, is displayed in a bottom panel.
uncertainty_type: int, optional
How to plot uncertainty regions around the fit. 0: don't plot any. 1: plot Gaussian 1-sigma uncertanties
propagated from the Hessian matrix of the likelihood. 2: plot 68 percentile region (from 16 to 84 percent).
3: plot 68 (16 to 84) and 95 (2 to 98) percentile regions.
show_bias_correction: bool, optional
Whether the bias corrected MLE is shown instead of the native ML parameters. Note, the ``jackknife()`` method
must have been called on the fit object for this to work.
nbins: int, optional
Number of bins to be plotted in data scatter; must be larger than 0. Choose `None` (default) to determine the
number of bins automatically.
bin_xmin, bin_xmax: float, optional
Left, right edge of first, last bin (for data scatter)
col_<x>: str, optional
The color of the line showing object <x>, where x is "fit", "data", "posterior", "hist", "ref" or "veff"
lw_<x>: float, optional
The line-width of the line showing object <x>, where x is "fit", "data", "posterior", "ref", or "veff"
ls_<x>: str, optional
The linestyle of the line showing object <x>, where x is "fit", "ref" or "veff"
size_data, size_posterior: float, optional
The size of the markers in the binned data/posteriors
legend: bool, optional
Whether to draw a legend.
fig, ax0, ax1: optional
Figure and Axis objects (from matplotlib) defining the canvas on which to draw the plots. These are useful
for overplotting new fits on the same axis, since they are returned from this function.
Returns
-------
fig : matplotlib figure object
The figure on which the plot is drawn
ax : list of axes
The list of axes (upper and lower, if it exists) that have been plotted, which can be passed in to the same
function for overplotting
"""
if dffit.data.n_dim != 1:
raise ValueError(
"dfplot only handles 1D distribution functions. Use dfplot2 for 2D functions."
)
# Make figure, ax
# open plot
if fig is None:
if show_data_histogram:
subplot_kw = {}
if xpower10:
subplot_kw.update({"xscale": "log"})
if xlim is not None:
subplot_kw.update({"xlim": xlim})
fig, ax = plt.subplots(
2,
1,
sharex=True,
subplot_kw=subplot_kw,
gridspec_kw={"height_ratios": (3, 1), "hspace": 0},
)
ax0 = ax[0]
ax1 = ax[1]
else:
fig, ax0 = plt.subplots(
1,
1,
subplot_kw={
"xlim": xlim,
"ylim": ylim,
"xscale": "log" if xpower10 else None,
},
gridspec_kw={},
)
# Plot DF
fig, ax = plot_dffit(
dffit=dffit,
ylab=ylab,
xlab=None if show_data_histogram else xlab,
fit_label=fit_label,
xpower10=xpower10,
uncertainty_type=uncertainty_type,
show_bias_correction=show_bias_correction,
p_true=p_true,
col_fit=col_fit,
lw_fit=lw_fit,
ls_fit=ls_fit,
col_ref=col_ref,
lw_ref=lw_ref,
ls_ref=ls_ref,
ylim=ylim,
legend=legend,
fig=fig,
ax=ax0,
)
ax0.set_yscale("log")
# Plot Histogram
if show_data_histogram:
fig, ax = plot_hist(
dffit=dffit,
xlab=xlab if show_data_histogram else None,
ylab=ylab_histogram,
nbins=nbins,
xpower10=xpower10,
col_hist=col_hist,
col_veff=col_veff,
ls_veff=ls_veff,
lw_veff=lw_veff,
fig=fig,
ax=ax1,
)
if show_input_data or show_posterior_data:
fig, ax = plot_dfdata(
dffit=dffit,
nbins=nbins,
bin_xmin=bin_xmin,
bin_xmax=bin_xmax,
show_input_data=show_input_data,
show_posterior_data=show_posterior_data,
xpower10=xpower10,
col_data=col_data,
size_data=size_data,
lw_data=lw_data,
col_posterior=col_posterior,
size_posterior=size_posterior,
lw_posterior=lw_posterior,
fig=fig,
ax=ax0,
)
try:
ax = [ax0, ax1]
except NameError:
ax = ax0
if fit_label is not None:
ax0.legend(loc=0)
return fig, ax
def plot_dffit(
dffit,
ylab=r"$\phi [{\rm Mpc}^{-3}{\rm dex}^{-1}]$",
fit_label=None,
xlab=None,
show_uncertainties=True,
xpower10=False,
uncertainty_type=1,
show_bias_correction=True,
p_true=None,
col_fit="blue",
lw_fit=2,
ls_fit="-",
col_ref="black",
lw_ref=1,
ls_ref=":",
ylim=None,
xlim=None,
legend=True,
fig=None,
ax=None,
figsize=None,
):
# Make sure it's a 1D plot
if dffit.data.n_dim > 1:
raise RuntimeError("This plotting routine only deals with 1D distributions.")
# If not passed a figure/axis, create one.
if fig is None and ax is None:
fig, ax = plt.subplots(
1, 1, figsize=figsize, subplot_kw={"xscale": "log" if xpower10 else None}
)
ax.set_yscale("log")
if xlim is not None:
ax.set_xlim(xlim)
ax.set_xlabel(xlab)
# PLOT UNCERTAINTY REGIONS
poly_x = 10 ** dffit.grid.x[0] if xpower10 else dffit.grid.x[0]
if show_uncertainties and dffit.fit.status["converged"]:
if uncertainty_type > 1 and not hasattr(dffit.grid, "gdf_quantile"):
raise ValueError("Quantiles not available. Use resampling in dffit.")
if uncertainty_type == 3:
ax.fill_between(
poly_x,
dffit.grid.gdf_quantile[0],
dffit.grid.gdf_quantile[-1],
color=col_fit,
alpha=0.15,
)
# poly_y
# .95 = pmax(ylim[1], c(dffit.grid.gdf_quantile
# .02, rev(dffit.grid.gdf_quantile
# .98)))
# list = is_finite(poly_x) & is_finite(poly_y
# .95)
# polygon(poly_x[list], poly_y
# .95[list], col = rgb(r, g, b, 0.15), border = np.nan)
if uncertainty_type >= 2:
ax.fill_between(
poly_x,
dffit.grid.gdf_quantile[1],
dffit.grid.gdf_quantile[-2],
color=col_fit,
alpha=0.25,
)
if uncertainty_type == 1:
ax.fill_between(
poly_x,
dffit.gdf_gaussian_min,
dffit.gdf_gaussian_max,
color=col_fit,
alpha=0.25,
)
# poly_y
# .68 = pmax(ylim[1], c(dffit.grid.gdf - dffit.grid.gdf_error_neg,
# rev(dffit.grid.gdf + dffit.grid.gdf_error_pos)))
# list = is_finite(poly_x) & is_finite(poly_y
# .68)
# polygon(poly_x[list], poly_y
# .68[list], col = rgb(r, g, b, 0.25), border = np.nan) # plot central fit
# PLOT ACTUAL FIT
if (
show_bias_correction
and dffit.fit.status["converged"]
and hasattr(dffit.grid, "gdf_mle_bias_corrected")
):
fit = dffit.grid.gdf_mle_bias_corrected
else:
fit = dffit.grid.gdf
ax.plot(poly_x, fit, color=col_fit, lw=lw_fit, ls=ls_fit, label=fit_label)
# PLOT REFERENCE
if p_true is not None:
ax.plot(
poly_x,
dffit.model.gdf(np.log10(poly_x) if xpower10 else poly_x, p_true),
color=col_ref,
lw=lw_ref,
linestyle=ls_ref,
label="Input" if legend else None,
)
# Pretty Up Plot
if ylim is not None:
ax.set_ylim(ylim)
if ylab is not None:
ax.set_ylabel(ylab)
return fig, ax
def plot_hist(
dffit,
xlab=r"$M [M_\odot]$",
ylab="Counts",
nbins=None,
xpower10=False,
col_hist="grey",
col_veff="black",
ls_veff="--",
lw_veff=1.5,
fig=None,
ax=None,
figsize=None,
xlim=None,
):
# Make sure it's a 1D plot
if dffit.data.n_dim > 1:
raise RuntimeError("This plotting routine only deals with 1D distributions.")
# If not passed a figure/axis, create one.
if fig is None and ax is None:
fig, ax = plt.subplots(
1,
1,
figsize=figsize,
subplot_kw={"xscale": "log" if xpower10 else None, "xlim": xlim},
)
# Plot histogram of input data
# determine number of bins
if nbins is None:
nbins = min(100, round(np.sqrt(dffit.data.n_data)))
else:
if nbins <= 0:
raise ValueError("Choose more than 0 bins.")
if xpower10:
bins = np.logspace(dffit.data.x.min(), dffit.data.x.max(), nbins)
else:
bins = np.linspace(dffit.data.x.min(), dffit.data.x.max(), nbins)
hval, bin_edges, patches = ax.hist(
10 ** dffit.data.x if xpower10 else dffit.data.x, bins=bins, color=col_hist
)
ax.get_yaxis().set_ticks([])
# Plot selection function
selfnc = dffit.grid.veff * hval.max() * 1.2 / dffit.grid.veff.max()
ax.plot(
10 ** dffit.grid.x[0] if xpower10 else dffit.grid.x[0],
selfnc,
color=col_veff,
ls=ls_veff,
lw=lw_veff,
)
ax.set_ylim((0, selfnc.max() * 1.2))
ax.set_xlabel(xlab)
ax.set_ylabel(ylab)
return fig, ax
def plot_dfdata(
dffit,
nbins=None,
bin_xmin=None,
bin_xmax=None,
show_input_data=True,
show_posterior_data=True,
xpower10=False,
col_data="grey",
size_data=20,
lw_data=1,
col_posterior="blue",
size_posterior=20,
lw_posterior=1,
xlab=None,
ylab=None,
fig=None,
ax=None,
figsize=None,
xlim=None,
):
# Make sure it's a 1D plot
if dffit.data.n_dim > 1:
raise RuntimeError("This plotting routine only deals with 1D distributions.")
# If not passed a figure/axis, create one.
if fig is None and ax is None:
fig, ax = plt.subplots(
1,
1,
figsize=figsize,
subplot_kw={"xscale": "log" if xpower10 else None, "xlim": xlim},
)
ax.set_yscale("log")
ax.set_xlabel(xlab)
# bin data
bin = bin_data(dffit, nbins, bin_xmin, bin_xmax)
# plot binned input data points
# bin = list()
for mode in range(2):
if mode == 0:
show = show_input_data
if show:
bin_count = bin["histogram"]
bin_gdf = bin["gdf_input"]
bin_xmean = bin["xmean_input"]
col = col_data
size = size_data
lw = lw_data
else:
show = show_posterior_data and not dffit.ignore_uncertainties
# First make sure effective_counts has been created:
dffit.posterior
if show:
bin_count = bin["count_posterior"]
bin_gdf = bin["gdf_posterior"]
bin_xmean = bin["xmean_posterior"]
col = col_posterior
size = size_posterior
lw = lw_posterior
if show:
lst = bin_gdf > 0
bin_count = bin_count[lst]
bin_gdf = bin_gdf[lst]
bin_xmean = bin_xmean[lst]
pm = 0.05
f_16 = poisson.ppf(0.16, bin_count) / bin_count
f_84 = poisson.ppf(0.84, bin_count) / bin_count
upper = f_16 < pm
f_16 = np.clip(f_16, pm, np.inf)
def xpow(x):
return 10 ** x if xpower10 else x
ax.errorbar(
xpow(bin_xmean),
bin_gdf,
yerr=[bin_gdf * (1 - f_16), bin_gdf * (f_84 - 1)],
xerr=[
xpow(bin_xmean) - xpow(bin["xedges"][:-1][lst]),
xpow(bin["xedges"][1:][lst]) - xpow(bin_xmean),
],
color=col,
lw=lw,
uplims=upper,
ls="none",
ms=size,
)
if ylab is not None:
ax.set_ylabel(ylab)
return fig, ax
def bin_data(dffit, nbins=None, bin_xmin=None, bin_xmax=None):
# initialize
x = dffit.data.x
bin = dict()
n_data = len(x)
# determine number of bins
if nbins is None:
nbins = min(100, int(round(np.sqrt(n_data))))
else:
if nbins <= 0:
raise ValueError("Choose more than 0 bins.")
bin["n"] = int(nbins)
# make bin intervals
if bin_xmin is None:
bin["xmin"] = x.min() - (x.max() - x.min()) / bin["n"] * 0.25
else:
bin["xmin"] = bin_xmin
if bin_xmax is None:
bin["xmax"] = x.max() + (x.max() - x.min()) / bin["n"] * 0.25
else:
bin["xmax"] = bin_xmax
wx = bin["xmax"] - bin["xmin"]
bin["dx"] = wx / bin["n"]
bin["xedges"] = np.linspace(bin["xmin"], bin["xmax"], bin["n"] + 1)
bin["xcenter"] = (bin["xedges"][1:] + bin["xedges"][:-1]) / 2
# Mask out entries outside bin range
x = x[np.logical_and(x >= bin["xmin"], x < bin["xmax"])]
# fill input data into bins
v = dffit.selection.Veff(x)
# Generate the index of each sample in the bin space
x_bins = np.digitize(x, bin["xedges"]) - 1
bin["gdf_input"] = np.bincount(
x_bins, weights=1 / bin["dx"] / v, minlength=bin["n"]
)
bin["histogram"] = np.bincount(x_bins, minlength=bin["n"])
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
bin["xmean_input"] = np.bincount(
x_bins, weights=x, minlength=bin["n"]
) / np.clip(bin["histogram"], 0, np.inf)
# fill posterior data into bins
if not dffit.ignore_uncertainties:
# Ensure that effective counts has been initialised
dffit.posterior
# bin['gdf_posterior'] = bin.count_posterior = bin.xmean_posterior = array(0, bin.n)
xg = dffit.grid.x[0]
mask = np.logical_and(xg >= bin["xmin"], xg < bin["xmax"])
xg = xg[mask]
xg_bins = np.digitize(xg, bin["xedges"]) - 1
scd = np.bincount(
xg_bins, weights=dffit.grid.scd_posterior[mask], minlength=bin["n"]
)
cnts = np.bincount(xg_bins, minlength=bin["n"])
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
bin["xmean_posterior"] = (
np.bincount(
xg_bins,
weights=dffit.grid.scd_posterior[mask] * xg,
minlength=bin["n"],
)
/ scd
)
bin["count_posterior"] = (
np.bincount(
xg_bins,
weights=dffit.grid.effective_counts[mask],
minlength=bin["n"],
)
/ cnts
)
bin["gdf_posterior"] = (
np.bincount(
xg_bins,
weights=dffit.grid.scd_posterior[mask] / dffit.grid.veff[mask],
minlength=bin["n"],
)
/ cnts
)
return bin
def mfplot(
dffit,
xlab=r"$M [M_\odot]$",
ylab=r"$\phi [{\rm Mpc}^{-3}{\rm dex}^{-1}]$",
xpower10=True,
show_data_histogram=True,
**kwargs,
):
"""
A convenience wrapper around :func:`~dfplot` which provides some nice defaults for plotting mass functions.
"""
return dfplot(
dffit,
xlab=xlab,
ylab=ylab,
xpower10=xpower10,
show_data_histogram=show_data_histogram,
**kwargs,
)
def plotcov(fits, names=None, p_true=None, figsize="grow"):
"""
Plot covariance ellipses for each of the fitted parameters.
Parameters
----------
fits : list
A list of :class:`~dffit.DFFit` objects for which to show the ellipses.
names: list
A list with the same length as `fits`, defining names for each fit to appear in a legend.
p_true : vector
A vector defining a reference set of parameters
figsize : str or tuple
Either a tuple defining the figure size in inches, or a string defining a sizing scheme (see ChainConsumer
documentation for details).
Returns
-------
fig : matplotlib figure
"""
if names is None:
names = [None for i in range(len(fits))]
# This is a bit slow and hacky, but easy to write up
c = ChainConsumer()
for fit, name in zip(fits, names):
chain = np.random.multivariate_normal(
mean=fit.fit.p_best, cov=fit.fit.p_covariance, size=10000
)
c.add_chain(chain, parameters=fit.model.names, name=name)
fig = c.plotter.plot(
figsize=figsize,
truth=list(p_true) if p_true is not None else None,
legend=names[0] is not None,
)
return fig
| steven-murray/pydftools | pydftools/plotting.py | Python | mit | 20,237 | [
"Gaussian"
] | 8c7401382c9f1382b70f29fcf96a67efa4ba28ca798475b33c6ad3fd97b94893 |
# Manual stimulus used to find the receptive field position and size.
#
# Copyright (C) 2010-2012 Huang Xin
#
# See LICENSE.TXT that came with this file.
from Experiments.Experiment import ExperimentConfig,ManbarExp,MangratingExp
ExperimentConfig(data_base_dir='data',new_cell=False)
# When a neuron is isolated in Plexon PlexControl software. The experimenter should choose proper
# bar to estimate the size and position of the receptive field of that neuron.
p_left, p_right = ManbarExp(left_params=None, right_params=None).run()
# The bar parameter is passed to mangrating and proper spatial frequency and orientation should be choosed.
p_left, p_right = MangratingExp(left_params=None, right_params=None).run() | chrox/RealTimeElectrophy | Experimenter/man_stimulus_current_cell.py | Python | bsd-2-clause | 718 | [
"NEURON"
] | f894d4a24bcd753f965abd50cada1dacfc43cb737dfd893d46d51a350dd2bcc5 |
"""High level summaries of samples and programs with MultiQC.
https://github.com/ewels/MultiQC
"""
import collections
import glob
import io
import json
import mimetypes
import os
import pandas as pd
import shutil
import numpy as np
from collections import OrderedDict
import pybedtools
import six
import toolz as tz
import yaml
from bcbio import utils
from bcbio.cwl import cwlutils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.log import logger
from bcbio.provenance import do, programs
from bcbio.provenance import data as provenancedata
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.bam import ref
from bcbio.qc.qsignature import get_qsig_multiqc_files
from bcbio.structural import annotate
from bcbio.utils import walk_json
from bcbio.variation import bedutils
from bcbio.qc.variant import get_active_vcinfo
from bcbio.upload import get_all_upload_paths_from_sample
from bcbio.variation import coverage
from bcbio.chipseq import atac
def summary(*samples):
"""Summarize all quality metrics together"""
samples = list(utils.flatten(samples))
work_dir = dd.get_work_dir(samples[0])
multiqc = config_utils.get_program("multiqc", samples[0]["config"])
if not multiqc:
logger.debug("multiqc not found. Update bcbio_nextgen.py tools to fix this issue.")
out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "multiqc"))
out_data = os.path.join(out_dir, "multiqc_data")
out_file = os.path.join(out_dir, "multiqc_report.html")
file_list = os.path.join(out_dir, "list_files.txt")
work_samples = cwlutils.unpack_tarballs([utils.deepish_copy(x) for x in samples], samples[0])
work_samples = _summarize_inputs(work_samples, out_dir)
if not utils.file_exists(out_file):
with tx_tmpdir(samples[0], work_dir) as tx_out:
in_files = _get_input_files(work_samples, out_dir, tx_out)
in_files += _merge_metrics(work_samples, out_dir)
if _one_exists(in_files):
with utils.chdir(out_dir):
config_file = _create_config_file(out_dir, work_samples)
input_list_file = _create_list_file(in_files, file_list)
if dd.get_tmp_dir(samples[0]):
export_tmp = "export TMPDIR=%s && " % dd.get_tmp_dir(samples[0])
else:
export_tmp = ""
locale_export = utils.locale_export()
path_export = utils.local_path_export()
other_opts = config_utils.get_resources("multiqc", samples[0]["config"]).get("options", [])
other_opts = " ".join([str(x) for x in other_opts])
cmd = ("{path_export}{export_tmp}{locale_export} "
"{multiqc} -c {config_file} -f -l {input_list_file} {other_opts} -o {tx_out}")
do.run(cmd.format(**locals()), "Run multiqc")
if utils.file_exists(os.path.join(tx_out, "multiqc_report.html")):
shutil.move(os.path.join(tx_out, "multiqc_report.html"), out_file)
shutil.move(os.path.join(tx_out, "multiqc_data"), out_data)
samples = _group_by_sample_and_batch(samples)
if utils.file_exists(out_file) and samples:
data_files = set()
for i, data in enumerate(samples):
data_files.add(os.path.join(out_dir, "report", "metrics", dd.get_sample_name(data) + "_bcbio.txt"))
data_files.add(os.path.join(out_dir, "report", "metrics", "target_info.yaml"))
data_files.add(os.path.join(out_dir, "multiqc_config.yaml"))
[data_files.add(f) for f in glob.glob(os.path.join(out_dir, "multiqc_data", "*"))]
data_files = [f for f in data_files if f and utils.file_exists(f)]
if "summary" not in samples[0]:
samples[0]["summary"] = {}
samples[0]["summary"]["multiqc"] = {"base": out_file, "secondary": data_files}
data_json = os.path.join(out_dir, "multiqc_data", "multiqc_data.json")
data_json_final = _save_uploaded_data_json(samples, data_json, os.path.join(out_dir, "multiqc_data"))
if data_json_final:
samples[0]["summary"]["multiqc"]["secondary"].append(data_json_final)
# Prepare final file list and inputs for downstream usage
file_list_final = _save_uploaded_file_list(samples, file_list, out_dir)
if file_list_final:
samples[0]["summary"]["multiqc"]["secondary"].append(file_list_final)
if any([cwlutils.is_cwl_run(d) for d in samples]):
for indir in ["inputs", "report"]:
tarball = os.path.join(out_dir, "multiqc-%s.tar.gz" % (indir))
if not utils.file_exists(tarball):
with utils.chdir(out_dir):
cmd = ["tar", "-czvpf", tarball, indir]
do.run(cmd, "Compress multiqc inputs: %s" % indir)
samples[0]["summary"]["multiqc"]["secondary"].append(tarball)
if any([cwlutils.is_cwl_run(d) for d in samples]):
samples = _add_versions(samples)
return [[data] for data in samples]
def _add_versions(samples):
"""Add tool and data versions to the summary.
"""
samples[0]["versions"] = {"tools": programs.write_versions(samples[0]["dirs"], samples[0]["config"]),
"data": provenancedata.write_versions(samples[0]["dirs"], samples)}
return samples
def _summarize_inputs(samples, out_dir):
"""Summarize inputs for MultiQC reporting in display.
"""
logger.info("summarize target information")
if samples[0].get("analysis", "").lower() in ["variant", "variant2"]:
metrics_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics"))
samples = _merge_target_information(samples, metrics_dir)
logger.info("summarize fastqc")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "fastqc"))
with utils.chdir(out_dir):
_merge_fastqc(samples)
preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)]
if preseq_samples:
logger.info("summarize preseq")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "preseq"))
with utils.chdir(out_dir):
_merge_preseq(preseq_samples)
return samples
def _save_uploaded_data_json(samples, data_json_work, out_dir):
""" Fixes all absolute work-rooted paths to relative final-rooted paths
"""
if not utils.file_exists(data_json_work):
return None
upload_path_mapping = dict()
for sample in samples:
upload_path_mapping.update(get_all_upload_paths_from_sample(sample))
if not upload_path_mapping:
return data_json_work
with io.open(data_json_work, encoding="utf-8") as f:
data = json.load(f, object_pairs_hook=OrderedDict)
upload_base = samples[0]["upload"]["dir"]
data = walk_json(data, lambda s: _work_path_to_rel_final_path(s, upload_path_mapping, upload_base))
data_json_final = os.path.join(out_dir, "multiqc_data_final.json")
with io.open(data_json_final, "w", encoding="utf-8") as f:
json.dump(data, f, indent=4)
return data_json_final
def _save_uploaded_file_list(samples, file_list_work, out_dir):
""" Fixes all absolute work-rooted paths to relative final-rooted paths
For CWL, prepare paths relative to output directory.
"""
if not utils.file_exists(file_list_work):
return None
if any([cwlutils.is_cwl_run(d) for d in samples]):
upload_paths = []
with open(file_list_work) as f:
for p in (l.strip() for l in f.readlines() if os.path.exists(l.strip())):
if p.startswith(out_dir):
upload_paths.append(p.replace(out_dir + "/", ""))
else:
upload_path_mapping = dict()
for sample in samples:
upload_path_mapping.update(get_all_upload_paths_from_sample(sample))
if not upload_path_mapping:
return None
with open(file_list_work) as f:
paths = [l.strip() for l in f.readlines() if os.path.exists(l.strip())]
upload_paths = [p for p in [
_work_path_to_rel_final_path(path, upload_path_mapping, samples[0]["upload"]["dir"])
for path in paths
] if p]
if not upload_paths:
return None
file_list_final = os.path.join(out_dir, "list_files_final.txt")
with open(file_list_final, "w") as f:
for path in upload_paths:
f.write(path + '\n')
return file_list_final
def _work_path_to_rel_final_path(path, upload_path_mapping, upload_base_dir):
""" Check if `path` is a work-rooted path, and convert to a relative final-rooted path
"""
if not path or not isinstance(path, str):
return path
upload_path = None
# First, check in the mapping: if it's there is a direct reference and
# it's a file, we immediately return it (saves lots of iterations)
if upload_path_mapping.get(path) is not None and os.path.isfile(path):
upload_path = upload_path_mapping[path]
else:
# Not a file: check for elements in the mapping that contain
# it
paths_to_check = [key for key in upload_path_mapping
if path.startswith(key)]
if paths_to_check:
for work_path in paths_to_check:
if os.path.isdir(work_path):
final_path = upload_path_mapping[work_path]
upload_path = path.replace(work_path, final_path)
break
if upload_path is not None:
return os.path.relpath(upload_path, upload_base_dir)
else:
return None
def _one_exists(input_files):
"""
at least one file must exist for multiqc to run properly
"""
for f in input_files:
if os.path.exists(f):
return True
return False
def _get_input_files(samples, base_dir, tx_out_dir):
"""Retrieve input files, keyed by sample and QC method name.
Stages files into the work directory to ensure correct names for
MultiQC sample assessment when running with CWL.
"""
in_files = collections.defaultdict(list)
for data in samples:
sum_qc = tz.get_in(["summary", "qc"], data, {})
if sum_qc in [None, "None"]:
sum_qc = {}
elif isinstance(sum_qc, six.string_types):
sum_qc = {dd.get_algorithm_qc(data)[0]: sum_qc}
elif not isinstance(sum_qc, dict):
raise ValueError("Unexpected summary qc: %s" % sum_qc)
for program, pfiles in sum_qc.items():
if isinstance(pfiles, dict):
pfiles = [pfiles["base"]] + pfiles.get("secondary", [])
# CWL: presents output files as single file plus associated secondary files
elif isinstance(pfiles, six.string_types):
if os.path.exists(pfiles):
pfiles = [os.path.join(basedir, f) for basedir, subdir, filenames in os.walk(os.path.dirname(pfiles)) for f in filenames]
else:
pfiles = []
in_files[(dd.get_sample_name(data), program)].extend(pfiles)
staged_files = []
for (sample, program), files in in_files.items():
cur_dir = utils.safe_makedir(os.path.join(base_dir, "inputs", sample, program))
for f in files:
if _check_multiqc_input(f) and _is_good_file_for_multiqc(f):
if _in_temp_directory(f) or any([cwlutils.is_cwl_run(d) for d in samples]):
staged_f = os.path.join(cur_dir, os.path.basename(f))
shutil.copy(f, staged_f)
staged_files.append(staged_f)
else:
staged_files.append(f)
staged_files.extend(get_qsig_multiqc_files(samples))
# Back compatible -- to migrate to explicit specifications in input YAML
if not any([cwlutils.is_cwl_run(d) for d in samples]):
staged_files += ["trimmed", "htseq-count/*summary"]
# Add in created target_info file
if os.path.isfile(os.path.join(base_dir, "report", "metrics", "target_info.yaml")):
staged_files += [os.path.join(base_dir, "report", "metrics", "target_info.yaml")]
return sorted(list(set(staged_files)))
def _in_temp_directory(f):
return any(x.startswith("tmp") for x in f.split("/"))
def _get_batches(data):
batches = dd.get_batch(data) or dd.get_sample_name(data)
if not isinstance(batches, (list, tuple)):
batches = [batches]
return batches
def _group_by_sample_and_batch(samples):
"""Group samples split by QC method back one per sample-batch.
"""
out = collections.defaultdict(list)
for data in samples:
out[(dd.get_sample_name(data), dd.get_align_bam(data), tuple(_get_batches(data)))].append(data)
return [xs[0] for xs in out.values()]
def _create_list_file(paths, out_file):
with open(out_file, "w") as f:
for path in paths:
f.write(path + '\n')
return out_file
def _create_config_file(out_dir, samples):
"""Provide configuration file for multiqc report."""
out_file = os.path.join(out_dir, "multiqc_config.yaml")
out = {"table_columns_visible": dict()}
extra_fn_clean_trim = []
extra_fn_clean_trim.extend(["coverage.mosdepth.region.dist", "coverage.mosdepth.global.dist"])
out["extra_fn_clean_trim"] = extra_fn_clean_trim
# Avoid duplicated bcbio columns with qualimap
if any(("qualimap" in dd.get_tools_on(d) or "qualimap_full" in dd.get_tools_on(d)) for d in samples):
# Hiding metrics duplicated by Qualimap
out["table_columns_visible"]["bcbio"] = {"Average_insert_size": False}
out["table_columns_visible"]["FastQC"] = {"percent_gc": False}
# Setting up thresholds for Qualimap depth cutoff calculations, based on sample avg depths
avg_depths = [tz.get_in(["summary", "metrics", "Avg_coverage"], s) for s in samples]
avg_depths = [x for x in avg_depths if x]
# Picking all thresholds up to the highest sample average depth
thresholds = [t for t in coverage.DEPTH_THRESHOLDS if not avg_depths or t <= max(avg_depths)]
# ...plus one more
if len(thresholds) < len(coverage.DEPTH_THRESHOLDS):
thresholds.append(coverage.DEPTH_THRESHOLDS[len(thresholds)])
# Showing only thresholds surrounding any of average depths
thresholds_hidden = []
for i, t in enumerate(thresholds):
if t > 20: # Not hiding anything below 20x
if any(thresholds[i-1] <= c < thresholds[i] for c in avg_depths if c and i-1 >= 0) or \
any(thresholds[i] <= c < thresholds[i+1] for c in avg_depths if c and i+1 < len(thresholds)):
pass
else:
thresholds_hidden.append(t)
# Hide coverage unless running full qualimap, downsampled inputs are confusing
if not any(("qualimap_full" in dd.get_tools_on(d)) for d in samples):
thresholds_hidden = thresholds + thresholds_hidden
thresholds_hidden.sort()
thresholds = []
out['qualimap_config'] = {
'general_stats_coverage': [str(t) for t in thresholds],
'general_stats_coverage_hidden': [str(t) for t in thresholds_hidden]}
# Avoid confusing peddy outputs, sticking to ancestry and sex prediction
out["table_columns_visible"]["Peddy"] = {"family_id": False, "sex_het_ratio": False,
"error_sex_check": False}
# Setting the module order
module_order = []
module_order.extend([
"bcbio",
"samtools",
"goleft_indexcov",
"peddy"
])
out['bcftools'] = {'write_separate_table': True}
# if germline calling was performed:
if any("germline" in (get_active_vcinfo(s) or {}) or # tumor-only somatic with germline extraction
dd.get_phenotype(s) == "germline" or # or paired somatic with germline calling for normal
_has_bcftools_germline_stats(s) # CWL organized statistics
for s in samples):
# Split somatic and germline variant stats into separate multiqc submodules,
# with somatic going into General Stats, and germline going into a separate table:
module_order.extend([{
'bcftools': {
'name': 'Bcftools (somatic)',
'info': 'Bcftools stats for somatic variant calls only.',
'path_filters': ['*_bcftools_stats.txt'],
'custom_config': {'write_general_stats': True},
}},
{'bcftools': {
'name': 'Bcftools (germline)',
'info': 'Bcftools stats for germline variant calls only.',
'path_filters': ['*_bcftools_stats_germline.txt'],
'custom_config': {'write_general_stats': False},
}},
])
else:
module_order.append("bcftools")
module_order.extend([
"salmon",
"star",
"picard",
"qualimap",
"snpeff",
"bismark",
"fastqc",
"preseq"
])
out["module_order"] = module_order
preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)]
if preseq_samples:
out["preseq"] = _make_preseq_multiqc_config(preseq_samples)
with open(out_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return out_file
def _has_bcftools_germline_stats(data):
"""Check for the presence of a germline stats file, CWL compatible.
"""
stats_file = tz.get_in(["summary", "qc"], data)
if isinstance(stats_file, dict):
stats_file = tz.get_in(["variants", "base"], stats_file)
if not stats_file:
stats_file = ""
return stats_file.find("bcftools_stats_germline") > 0
def _check_multiqc_input(path):
"""Check if file exists, and return empty if it doesn't"""
if utils.file_exists(path):
return path
# ## report and coverage
def _is_good_file_for_multiqc(fpath):
"""Returns False if the file is binary or image."""
# Use mimetypes to exclude binary files where possible
(ftype, encoding) = mimetypes.guess_type(fpath)
if encoding is not None:
return False
if ftype is not None and ftype.startswith('image'):
return False
return True
def _parse_disambiguate(disambiguatestatsfilename):
"""Parse disambiguation stats from given file."""
disambig_stats = [0, 0, 0]
with open(disambiguatestatsfilename, "r") as in_handle:
for i, line in enumerate(in_handle):
fields = line.strip().split("\t")
if i == 0:
assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']
else:
disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])]
# stats is reported in read pairs while Reads in the final bam in single
# reporting here single reads as well
disambig_stats_single_reads = [2 * x for x in disambig_stats]
return disambig_stats_single_reads
def _add_disambiguate(sample):
# check if disambiguation was run
if "disambiguate" in sample:
if utils.file_exists(sample["disambiguate"]["summary"]):
disambigStats = _parse_disambiguate(sample["disambiguate"]["summary"])
sample["summary"]["metrics"]["Disambiguated %s reads" % str(sample["genome_build"])] = disambigStats[0]
disambigGenome = (sample["config"]["algorithm"]["disambiguate"][0]
if isinstance(sample["config"]["algorithm"]["disambiguate"], (list, tuple))
else sample["config"]["algorithm"]["disambiguate"])
sample["summary"]["metrics"]["Disambiguated %s reads" % disambigGenome] = disambigStats[1]
sample["summary"]["metrics"]["Disambiguated ambiguous reads"] = disambigStats[2]
return sample
def _add_atac(sample):
atac_metrics = atac.calculate_encode_complexity_metrics(sample)
if not atac_metrics:
return sample
sample["summary"]["metrics"] = tz.merge(atac_metrics, sample["summary"]["metrics"])
return sample
def _fix_duplicated_rate(dt):
"""Get RNA duplicated rate if exists and replace by samtools metric"""
if "Duplication_Rate_of_Mapped" in dt:
dt["Duplicates_pct"] = 100.0 * dt["Duplication_Rate_of_Mapped"]
return dt
def _merge_metrics(samples, out_dir):
"""Merge metrics from multiple QC steps
"""
logger.info("summarize metrics")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics"))
sample_metrics = collections.defaultdict(dict)
for s in samples:
s = _add_disambiguate(s)
s = _add_atac(s)
m = tz.get_in(['summary', 'metrics'], s)
if isinstance(m, six.string_types):
m = json.loads(m)
if m:
for me in list(m.keys()):
if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple):
m.pop(me, None)
sample_metrics[dd.get_sample_name(s)].update(m)
out = []
for sample_name, m in sample_metrics.items():
sample_file = os.path.join(out_dir, "%s_bcbio.txt" % sample_name)
with file_transaction(samples[0], sample_file) as tx_out_file:
dt = pd.DataFrame(m, index=['1'])
dt.columns = [k.replace(" ", "_").replace("(", "").replace(")", "") for k in dt.columns]
dt['sample'] = sample_name
if m.get('rRNA_rate'):
dt['rRNA_rate'] = m.get('rRNA_rate')
if m.get("RiP"):
dt['RiP_pct'] = "%.3f" % (int(m.get("RiP")) / float(m.get("Total_reads", 1)) * 100)
dt = _fix_duplicated_rate(dt)
dt.transpose().to_csv(tx_out_file, sep="\t", header=False)
out.append(sample_file)
return out
def _merge_fastqc(samples):
"""
merge all fastqc samples into one by module
"""
fastqc_list = collections.defaultdict(list)
seen = set()
for data in samples:
name = dd.get_sample_name(data)
if name in seen:
continue
seen.add(name)
fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*")
for fn in fns:
if fn.endswith("tsv"):
metric = os.path.basename(fn)
fastqc_list[metric].append([name, fn])
for metric in fastqc_list:
dt_by_sample = []
for fn in fastqc_list[metric]:
dt = pd.read_csv(fn[1], sep="\t")
dt['sample'] = fn[0]
dt_by_sample.append(dt)
dt = utils.rbind(dt_by_sample)
dt.to_csv(metric, sep="\t", index=False, mode ='w')
return samples
def _merge_preseq(samples):
metrics = [utils.get_in(s, ("summary", "metrics")) for s in samples]
real_counts_file = os.path.abspath(os.path.join("preseq_real_counts.txt"))
with file_transaction(samples[0], real_counts_file) as tx_out_file:
with open(tx_out_file, "w") as f:
for s, m in zip(samples, metrics):
line = dd.get_sample_name(s) + "\t" + str(m["Preseq_read_count"])
if m.get("Preseq_unique_count") is not None:
line += "\t" + str(m["Preseq_unique_count"])
line += "\n"
f.write(line)
samples[0]["summary"]["qc"]["preseq"]["secondary"] = [real_counts_file]
def _make_preseq_multiqc_config(samples):
metrics = [utils.get_in(s, ("summary", "metrics")) for s in samples]
out = {"read_length": float(np.median([m["Preseq_read_length"] for m in metrics]))}
genome_sizes = list(set(m["Preseq_genome_size"] for m in metrics))
if len(genome_sizes) == 1:
out["genome_size"] = genome_sizes[0]
return out
def _merge_target_information(samples, metrics_dir):
out_file = os.path.abspath(os.path.join(metrics_dir, "target_info.yaml"))
if utils.file_exists(out_file):
return samples
genomes = set(dd.get_genome_build(data) for data in samples)
coverage_beds = set(dd.get_coverage(data) for data in samples)
original_variant_regions = set(dd.get_variant_regions_orig(data) for data in samples)
data = samples[0]
info = {}
# Reporting in MultiQC only if the genome is the same across all samples
if len(genomes) == 1:
info["genome_info"] = {
"name": dd.get_genome_build(data),
"size": sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]),
}
# Reporting in MultiQC only if the target is the same across all samples
vcr_orig = None
if len(original_variant_regions) == 1 and list(original_variant_regions)[0] is not None:
vcr_orig = list(original_variant_regions)[0]
vcr_clean = bedutils.clean_file(vcr_orig, data)
info["variants_regions_info"] = {
"bed": vcr_orig,
"size": sum(len(x) for x in pybedtools.BedTool(dd.get_variant_regions_merged(data))),
"regions": pybedtools.BedTool(vcr_clean).count(),
}
gene_num = annotate.count_genes(vcr_clean, data)
if gene_num is not None:
info["variants_regions_info"]["genes"] = gene_num
else:
info["variants_regions_info"] = {
"bed": "callable regions",
}
# Reporting in MultiQC only if the target is the same across samples
if len(coverage_beds) == 1:
cov_bed = list(coverage_beds)[0]
if cov_bed not in [None, "None"]:
if vcr_orig and vcr_orig == cov_bed:
info["coverage_bed_info"] = info["variants_regions_info"]
else:
clean_bed = bedutils.clean_file(cov_bed, data, prefix="cov-", simple=True)
info["coverage_bed_info"] = {
"bed": cov_bed,
"size": pybedtools.BedTool(cov_bed).total_coverage(),
"regions": pybedtools.BedTool(clean_bed).count(),
}
gene_num = annotate.count_genes(clean_bed, data)
if gene_num is not None:
info["coverage_bed_info"]["genes"] = gene_num
else:
info["coverage_bed_info"] = info["variants_regions_info"]
coverage_intervals = set(data["config"]["algorithm"]["coverage_interval"] for data in samples)
if len(coverage_intervals) == 1:
info["coverage_interval"] = list(coverage_intervals)[0]
if info:
with open(out_file, "w") as out_handle:
yaml.safe_dump(info, out_handle)
return samples
| lbeltrame/bcbio-nextgen | bcbio/qc/multiqc.py | Python | mit | 26,918 | [
"HTSeq"
] | e5b1aa1aae46942039d1a1f7fa3904a663b5e8253830777f21b52e7a7b0fa39a |
"""
This class is used to define the plot using the plot attributes.
"""
from DIRAC import S_OK, gLogger
from DIRAC.MonitoringSystem.Client.Types.ComponentMonitoring import ComponentMonitoring
from DIRAC.MonitoringSystem.private.Plotters.BasePlotter import BasePlotter
__RCSID__ = "$Id$"
class ComponentMonitoringPlotter(BasePlotter):
"""
.. class:: ComponentMonitoringPlotter
It is used to crate the plots.
param: str _typeName monitoring type
param: list _typeKeyFields list of keys what we monitor (list of attributes)
"""
_typeName = "ComponentMonitoring"
_typeKeyFields = ComponentMonitoring().keyFields
def __reportAllResources(self, reportRequest, metric, unit):
selectFields = [metric]
retVal = self._getTimedData(startTime=reportRequest['startTime'],
endTime=reportRequest['endTime'],
selectFields=selectFields,
preCondDict=reportRequest['condDict'],
metadataDict={'DynamicBucketing': False,
"metric": "avg"})
if not retVal['OK']:
return retVal
dataDict, granularity = retVal['Value']
try:
_, _, _, unitName = self._findSuitableUnit(dataDict, self._getAccumulationMaxValue(dataDict), unit)
except AttributeError as e:
gLogger.warn(e)
unitName = unit
return S_OK({'data': dataDict, 'granularity': granularity, 'unit': unitName})
def __plotAllResources(self, reportRequest, plotInfo, filename, title):
metadata = {'title': '%s by %s' % (title, reportRequest['grouping']),
'starttime': reportRequest['startTime'],
'endtime': reportRequest['endTime'],
'span': plotInfo['granularity'],
'skipEdgeColor': True,
'ylabel': plotInfo['unit']}
plotInfo['data'] = self._fillWithZero(granularity=plotInfo['granularity'],
startEpoch=reportRequest['startTime'],
endEpoch=reportRequest['endTime'],
dataDict=plotInfo['data'])
return self._generateStackedLinePlot(filename=filename,
dataDict=plotInfo['data'],
metadata=metadata)
_reportRunningThreadsName = "Number of running threads"
def _reportRunningThreads(self, reportRequest):
"""
It is used to retrieve the data from the database.
:param dict reportRequest contains attributes used to create the plot.
:return S_OK or S_ERROR {'data':value1, 'granularity':value2} value1 is a dictionary, value2 is the bucket length
"""
return self.__reportAllResources(reportRequest, "threads", "threads")
def _plotRunningThreads(self, reportRequest, plotInfo, filename):
"""
It creates the plot.
:param dict reportRequest plot attributes
:param dict plotInfo contains all the data which are used to create the plot
:param str filename
:return S_OK or S_ERROR { 'plot' : value1, 'thumbnail' : value2 } value1 and value2 are TRUE/FALSE
"""
return self.__plotAllResources(reportRequest, plotInfo, filename, 'Number of running threads')
_reportCpuUsageName = "CPU usage"
def _reportCpuUsage(self, reportRequest):
"""
It is used to retrieve the data from the database.
:param dict reportRequest contains attributes used to create the plot.
:return S_OK or S_ERROR {'data':value1, 'granularity':value2} value1 is a dictionary, value2 is the bucket length
"""
return self.__reportAllResources(reportRequest, "cpuUsage", "percentage")
def _plotCpuUsage(self, reportRequest, plotInfo, filename):
"""
It creates the plot.
:param dict reportRequest plot attributes
:param dict plotInfo contains all the data which are used to create the plot
:param str filename
:return S_OK or S_ERROR { 'plot' : value1, 'thumbnail' : value2 } value1 and value2 are TRUE/FALSE
"""
return self.__plotAllResources(reportRequest, plotInfo, filename, 'CPU usage')
_reportMemoryUsageName = "Memory usage"
def _reportMemoryUsage(self, reportRequest):
"""
It is used to retrieve the data from the database.
:param dict reportRequest contains attributes used to create the plot.
:return S_OK or S_ERROR {'data':value1, 'granularity':value2} value1 is a dictionary, value2 is the bucket length
"""
return self.__reportAllResources(reportRequest, "memoryUsage", "bytes")
def _plotMemoryUsage(self, reportRequest, plotInfo, filename):
"""
It creates the plot.
:param dict reportRequest plot attributes
:param dict plotInfo contains all the data which are used to create the plot
:param str filename
:return S_OK or S_ERROR { 'plot' : value1, 'thumbnail' : value2 } value1 and value2 are TRUE/FALSE
"""
return self.__plotAllResources(reportRequest, plotInfo, filename, 'Memory usage')
_reportRunningTimeName = "Running time"
def _reportRunningTime(self, reportRequest):
"""
It is used to retrieve the data from the database.
:param dict reportRequest contains attributes used to create the plot.
:return S_OK or S_ERROR {'data':value1, 'granularity':value2} value1 is a dictionary, value2 is the bucket length
"""
return self.__reportAllResources(reportRequest, "runningTime", "time")
def _plotRunningTime(self, reportRequest, plotInfo, filename):
"""
It creates the plot.
:param dict reportRequest plot attributes
:param dict plotInfo contains all the data which are used to create the plot
:param str filename
:return S_OK or S_ERROR { 'plot' : value1, 'thumbnail' : value2 } value1 and value2 are TRUE/FALSE
"""
return self.__plotAllResources(reportRequest, plotInfo, filename, 'Running time')
| andresailer/DIRAC | MonitoringSystem/private/Plotters/ComponentMonitoringPlotter.py | Python | gpl-3.0 | 5,913 | [
"DIRAC"
] | d1ddfe1ee2f0a3d7e4f9b12a20cd123bfadf9b00c471b3fd14f91758bc0a7ff6 |
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2017 Petra Gospodnetic, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
import colorsys
import math
import random
import signal
import sys
import time
import threading
import os
import numpy as np
import appleseed as asr
# Initial parameters for generating grid light scene
grid_lights_count = 20
color = "white"
plane_size = 100
output_scene_name = "{0}x{0}_{1}_point_lights".format(grid_lights_count, color)
def build_project():
# Create an empty project.
project = asr.Project("grid-point-lights-generator")
paths = project.get_search_paths()
paths.append("data")
project.set_search_paths(paths)
# Add default configurations to the project.
project.add_default_configurations()
# Set the number of samples. This is basically the quality parameter: the higher the number
# of samples, the smoother the image but the longer the rendering time.
# todo: fix.
conf = project.configurations()["final"]
conf.insert_path("uniform_pixel_renderer.samples", 1)
# Create a scene.
scene = asr.Scene()
# Create an assembly.
assembly = asr.Assembly("assembly")
# Prepare the orientation of all the objects in the scene.
orientation = asr.Matrix4d.make_rotation(asr.Vector3d(1.0, 0.0, 0.0), math.radians(-90.0))
#------------------------------------------------------------------------
# Materials
#------------------------------------------------------------------------
# Create a material called "01 - Default_mat" and insert it into the assembly.
assembly.materials().insert(asr.Material(
"disney_material",
"01 - Default_mat",
{
"alpha_map": "1",
"layer1": {
"anisotropic": "0",
"base_color": "[1, 1, 1]",
"clearcoat": "0",
"clearcoat_gloss": "0",
"layer_name": "layer1",
"layer_number": "0",
"mask": "1.0",
"metallic": "0",
"roughness": "1",
"sheen": "0",
"sheen_tint": "0",
"specular": "0",
"specular_tint": "0",
"subsurface": "0.0"
}
}))
#------------------------------------------------------------------------
# Geometry
#------------------------------------------------------------------------
# Load the scene geometry from disk.
objects = asr.MeshObjectReader.read(project.get_search_paths(), "plane", {"filename": "Plane001.binarymesh"})
# Insert all the objects into the assembly.
for object in objects:
# Create an instance of this object and insert it into the assembly.
instance_name = object.get_name() + "_inst"
material_name = {"material_slot_0": "01 - Default_mat"}
mat = orientation * asr.Matrix4d.make_translation(asr.Vector3d(0.0, 0.0, 0.0))
instance = asr.ObjectInstance(
instance_name,
{"visibility":
{
"camera": "true",
"diffuse": "true",
"glossy": "true",
"light": "true",
"probe": "true",
"shadow": "true",
"specular": "true",
"subsurface": "true",
"transparency": "true"
}},
object.get_name(),
asr.Transformd(mat),
material_name,
material_name)
assembly.object_instances().insert(instance)
# Insert this object into the scene.
assembly.objects().insert(object)
#------------------------------------------------------------------------
# Lights
#------------------------------------------------------------------------
light_z_distance = 1.0
if color == "white":
assembly.colors().insert(asr.ColorEntity("white",
{
"color_space": "linear_rgb",
"multiplier": 1.0
},
[1.0, 1.0, 1.0]))
step = float(plane_size) / grid_lights_count
light_count = 0
grid_range = np.linspace(-plane_size / 2 + step, plane_size / 2 - step, grid_lights_count)
for j in grid_range:
for i in grid_range:
# Create a point light called "light" and insert it into the assembly.
light_name = "light_" + str(light_count)
light_count = light_count + 1
light = asr.Light("point_light", light_name, {
"intensity": "white",
"intensity_multiplier": "3"
})
light_position = asr.Vector3d(i, j, light_z_distance)
mat = orientation * asr.Matrix4d.make_translation(light_position)
light.set_transform(asr.Transformd(mat))
assembly.lights().insert(light)
elif color == "mix":
for i in xrange(0, grid_lights_count * grid_lights_count):
s = random.uniform(0, 1)
if s < 0.65:
ran = random.gauss(1, 0.01)
elif s < 0.9:
ran = random.gauss(0.3, 0.1)
else:
ran = random.gauss(0.7, 0.01)
random_color = list(colorsys.hls_to_rgb(ran, 0.5, 1.0))
assembly.colors().insert(asr.ColorEntity("color_" + str(i),
{
"color_space": "linear_rgb",
"multiplier": 1.0
},
random_color))
step = float(plane_size) / grid_lights_count
light_count = 0
grid_range = np.linspace(-plane_size / 2 + step, plane_size / 2 - step, grid_lights_count)
for j in grid_range:
for i in grid_range:
# Create a point light called "light" and insert it into the assembly.
light_name = "light_" + str(light_count)
color_name = "color_" + str(light_count)
light_count = light_count + 1
light = asr.Light("point_light", light_name, {
"intensity": color_name,
"intensity_multiplier": "3"
})
light_position = asr.Vector3d(i, j, light_z_distance)
mat = orientation * asr.Matrix4d.make_translation(light_position)
light.set_transform(asr.Transformd(mat))
assembly.lights().insert(light)
else:
print("Unknown color: {0}".format(color))
return
#------------------------------------------------------------------------
# Assembly instance
#------------------------------------------------------------------------
# Create an instance of the assembly and insert it into the scene.
assembly_inst = asr.AssemblyInstance("assembly_inst", {}, assembly.get_name())
assembly_inst.transform_sequence().set_transform(0.0, asr.Transformd(asr.Matrix4d.identity()))
scene.assembly_instances().insert(assembly_inst)
# Insert the assembly into the scene.
scene.assemblies().insert(assembly)
#------------------------------------------------------------------------
# Environment
#------------------------------------------------------------------------
# Create an environment called "env" and bind it to the scene.
scene.set_environment(asr.Environment("env", {}))
#------------------------------------------------------------------------
# Camera
#------------------------------------------------------------------------
# Create an orthographic camera.
params = {
"controller_target": "0 0 0",
"film_dimensions": "128 128",
"near_z": "-0.1",
"shutter_close_time": "1.0",
"shutter_open_time": "0.0"
}
camera = asr.Camera("orthographic_camera", "camera", params)
# Place and orient the camera.
mat = orientation * asr.Matrix4d.make_translation(asr.Vector3d(0.0, 0.0, 0.0))
camera.transform_sequence().set_transform(0.0, asr.Transformd(mat))
# Bind the camera to the scene.
scene.cameras().insert(camera)
#------------------------------------------------------------------------
# Frame
#------------------------------------------------------------------------
# Create a frame and bind it to the project.
params = {
"camera": "camera",
"clamping": "false",
"color_space": "srgb",
"filter": "box",
"filter_size": "0.5",
"gamma_correction": "1.0",
"pixel_format": "float",
"premultiplied_alpha": "true",
"resolution": "512 512",
"tile_size": "64 64"}
project.set_frame(asr.Frame("beauty", params))
# Bind the scene to the project.
project.set_scene(scene)
return project
def main():
# Build the project.
project = build_project()
# Save the project to disk.
asr.ProjectFileWriter().write(project, output_scene_name + ".appleseed")
if __name__ == "__main__":
main()
| est77/appleseed | sandbox/tests/test scenes/many light sampling/generators/grid_point_lights.py | Python | mit | 10,508 | [
"VisIt"
] | 3b16bb5b2540aea902714ae3c9d61b785d1aa49883d7328efbee5307d70393c3 |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create four cursors configured differently
cursor = vtk.vtkCursor2D()
cursor.SetModelBounds(15, 45, 15, 45, 0, 0)
cursor.SetFocalPoint(30, 30, 0)
cursor.AllOff()
cursor.AxesOn()
cursor.OutlineOn()
cursorMapper = vtk.vtkPolyDataMapper2D()
cursorMapper.SetInputConnection(cursor.GetOutputPort())
cursorActor = vtk.vtkActor2D()
cursorActor.SetMapper(cursorMapper)
cursorActor.GetProperty().SetColor(1, 0, 0)
cursor2 = vtk.vtkCursor2D()
cursor2.SetModelBounds(75, 105, 15, 45, 0, 0)
cursor2.SetFocalPoint(90, 30, 0)
cursor2.AllOff()
cursor2.AxesOn()
cursor2.OutlineOn()
cursor2.PointOn()
cursor2Mapper = vtk.vtkPolyDataMapper2D()
cursor2Mapper.SetInputConnection(cursor2.GetOutputPort())
cursor2Actor = vtk.vtkActor2D()
cursor2Actor.SetMapper(cursor2Mapper)
cursor2Actor.GetProperty().SetColor(0, 1, 0)
cursor3 = vtk.vtkCursor2D()
cursor3.SetModelBounds(15, 45, 75, 105, 0, 0)
cursor3.SetFocalPoint(30, 90, 0)
cursor3.AllOff()
cursor3.AxesOn()
cursor3.OutlineOff()
cursor3.PointOn()
cursor3.SetRadius(3)
cursor3Mapper = vtk.vtkPolyDataMapper2D()
cursor3Mapper.SetInputConnection(cursor3.GetOutputPort())
cursor3Actor = vtk.vtkActor2D()
cursor3Actor.SetMapper(cursor3Mapper)
cursor3Actor.GetProperty().SetColor(0, 1, 0)
cursor4 = vtk.vtkCursor2D()
cursor4.SetModelBounds(75, 105, 75, 105, 0, 0)
cursor4.SetFocalPoint(90, 90, 0)
cursor4.AllOff()
cursor4.AxesOn()
cursor4.SetRadius(0.0)
cursor4Mapper = vtk.vtkPolyDataMapper2D()
cursor4Mapper.SetInputConnection(cursor4.GetOutputPort())
cursor4Actor = vtk.vtkActor2D()
cursor4Actor.SetMapper(cursor4Mapper)
cursor4Actor.GetProperty().SetColor(1, 0, 0)
# rendering support
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# read data
ren1.AddActor(cursorActor)
ren1.AddActor(cursor2Actor)
ren1.AddActor(cursor3Actor)
ren1.AddActor(cursor4Actor)
ren1.SetBackground(0, 0, 0)
renWin.SetSize(150, 150)
renWin.SetMultiSamples(0)
renWin.Render()
iren.Initialize()
#iren.Start()
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/General/Testing/Python/cursor2D.py | Python | gpl-3.0 | 2,161 | [
"VTK"
] | 82bd1e3f668f96bc024828911f224869666695cfb17f22acff27b365c6e8bc10 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
''' Header reading / writing functions for nifti1 image format
Author: Matthew Brett
'''
import numpy as np
import numpy.linalg as npl
from nipype.externals.pynifti.volumeutils import Recoder, make_dt_codes, \
HeaderDataError, HeaderTypeError, allopen
from nipype.externals.pynifti.batteryrunners import Report
from nipype.externals.pynifti.quaternions import fillpositive, quat2mat, mat2quat
from nipype.externals.pynifti import analyze # module import
from nipype.externals.pynifti.spm99analyze import SpmAnalyzeHeader
from nipype.externals.pynifti import filetuples # module import
from nipype.externals.pynifti.spatialimages import SpatialImage
from nipype.externals.pynifti.header_ufuncs import write_data, adapt_header
# nifti1 flat header definition for Analyze-like first 348 bytes
# first number in comments indicates offset in file header in bytes
header_dtd = [
('sizeof_hdr', 'i4'), # 0; must be 348
('data_type', 'S10'), # 4; unused
('db_name', 'S18'), # 14; unused
('extents', 'i4'), # 32; unused
('session_error', 'i2'), # 36; unused
('regular', 'S1'), # 38; unused
('dim_info', 'u1'), # 39; MRI slice ordering code
('dim', 'i2', 8), # 40; data array dimensions
('intent_p1', 'f4'), # 56; first intent parameter
('intent_p2', 'f4'), # 60; second intent parameter
('intent_p3', 'f4'), # 64; third intent parameter
('intent_code', 'i2'),# 68; NIFTI intent code
('datatype', 'i2'), # 70; it's the datatype
('bitpix', 'i2'), # 72; number of bits per voxel
('slice_start', 'i2'),# 74; first slice index
('pixdim', 'f4', 8), # 76; grid spacings (units below)
('vox_offset', 'f4'), # 108; offset to data in image file
('scl_slope', 'f4'), # 112; data scaling slope
('scl_inter', 'f4'), # 116; data scaling intercept
('slice_end', 'i2'), # 120; last slice index
('slice_code', 'u1'), # 122; slice timing order
('xyzt_units', 'u1'), # 123; inits of pixdim[1..4]
('cal_max', 'f4'), # 124; max display intensity
('cal_min', 'f4'), # 128; min display intensity
('slice_duration', 'f4'), # 132; time for 1 slice
('toffset', 'f4'), # 136; time axis shift
('glmax', 'i4'), # 140; unused
('glmin', 'i4'), # 144; unused
('descrip', 'S80'), # 148; any text
('aux_file', 'S24'), # 228; auxiliary filename
('qform_code', 'i2'), # 252; xform code
('sform_code', 'i2'), # 254; xform code
('quatern_b', 'f4'), # 256; quaternion b param
('quatern_c', 'f4'), # 260; quaternion c param
('quatern_d', 'f4'), # 264; quaternion d param
('qoffset_x', 'f4'), # 268; quaternion x shift
('qoffset_y', 'f4'), # 272; quaternion y shift
('qoffset_z', 'f4'), # 276; quaternion z shift
('srow_x', 'f4', 4), # 280; 1st row affine transform
('srow_y', 'f4', 4), # 296; 2nd row affine transform
('srow_z', 'f4', 4), # 312; 3rd row affine transform
('intent_name', 'S16'), # 328; name or meaning of data
('magic', 'S4') # 344; must be 'ni1\0' or 'n+1\0'
]
# Full header numpy dtype
header_dtype = np.dtype(header_dtd)
# datatypes not in analyze format, with codes
try:
_float128t = np.float128
except AttributeError:
_float128t = np.void
try:
_complex256t = np.complex256
except AttributeError:
_complex256t = np.void
_added_dtdefs = ( # code, label, dtype definition
(256, 'int8', np.int8),
(512, 'uint16', np.uint16),
(768, 'uint32', np.uint32),
(1024,'int64', np.int64),
(1280, 'int64', np.uint64),
(1536, 'float128', _float128t), # Only numpy defined on 64 bit
(1792, 'complex128', np.complex128),
(2048, 'complex256', _complex256t), # 64 bit again
(2304, 'RGBA', np.dtype([('R','u1'),
('G', 'u1'),
('B', 'u1'),
('A', 'u1')]))
)
# Make full code alias bank, including dtype column
data_type_codes = make_dt_codes(analyze._dtdefs + _added_dtdefs)
# Transform (qform, sform) codes
xform_codes = Recoder(( # code, label
(0, 'unknown'), # Code for transform unknown or absent
(1, 'scanner'),
(2, 'aligned'),
(3, 'talairach'),
(4, 'mni')), fields=('code', 'label'))
# unit codes
unit_codes = Recoder(( # code, label
(0, 'unknown'),
(1, 'meter'),
(2, 'mm'),
(3, 'micron'),
(8, 'sec'),
(16, 'msec'),
(24, 'usec'),
(32, 'hz'),
(40, 'ppm'),
(48, 'rads')), fields=('code', 'label'))
slice_order_codes = Recoder(( # code, label
(0, 'unknown'),
(1, 'sequential increasing', 'seq inc'),
(2, 'sequential decreasing', 'seq dec'),
(3, 'alternating increasing', 'alt inc'),
(4, 'alternating decreasing', 'alt dec'),
(5, 'alternating increasing 2', 'alt inc 2'),
(6, 'alternating decreasing 2', 'alt dec 2')),
fields=('code', 'label'))
intent_codes = Recoder((
# code, label, parameters description tuple
(0, 'none', ()),
(2, 'correlation',('p1 = DOF',)),
(3, 't test', ('p1 = DOF',)),
(4, 'f test', ('p1 = numerator DOF', 'p2 = denominator DOF')),
(5, 'z score', ()),
(6, 'chi2', ('p1 = DOF',)),
(7, 'beta', ('p1=a', 'p2=b')), # two parameter beta distribution
(8, 'binomial', ('p1 = number of trials', 'p2 = probability per trial')),
# Prob(x) = (p1 choose x) * p2^x * (1-p2)^(p1-x), for x=0,1,...,p1
(9, 'gamma', ('p1 = shape, p2 = scale', 2)), # 2 parameter gamma
(10, 'poisson', ('p1 = mean',)), # Density(x) proportional to x^(p1-1) * exp(-p2*x)
(11, 'normal', ('p1 = mean', 'p2 = standard deviation',)),
(12, 'non central f test', ('p1 = numerator DOF',
'p2 = denominator DOF',
'p3 = numerator noncentrality parameter',)),
(13, 'non central chi2', ('p1 = DOF', 'p2 = noncentrality parameter',)),
(14, 'logistic', ('p1 = location', 'p2 = scale',)),
(15, 'laplace', ('p1 = location', 'p2 = scale')),
(16, 'uniform', ('p1 = lower end', 'p2 = upper end')),
(17, 'non central t test', ('p1 = DOF', 'p2 = noncentrality parameter')),
(18, 'weibull', ('p1 = location', 'p2 = scale, p3 = power')),
(19, 'chi', ('p1 = DOF',)),
# p1 = 1 = 'half normal' distribution
# p1 = 2 = Rayleigh distribution
# p1 = 3 = Maxwell-Boltzmann distribution. */
(20, 'inverse gaussian', ('pi = mu', 'p2 = lambda')),
(21, 'extreme value 1', ('p1 = location', 'p2 = scale')),
(22, 'p value', ()),
(23, 'log p value', ()),
(24, 'log10 p value', ()),
(1001, 'estimate', ()),
(1002, 'label', ()),
(1003, 'neuroname', ()),
(1004, 'general matrix', ('p1 = M', 'p2 = N')),
(1005, 'symmetric matrix', ('p1 = M',)),
(1006, 'displacement vector', ()),
(1007, 'vector', ()),
(1008, 'poinset', ()),
(1009, 'triangle', ()),
(1010, 'quaternion', ()),
(1011, 'dimensionless', ()),
(2001, 'time series', ()),
(2002, 'node index', ()),
(2003, 'rgb vector', ()),
(2004, 'rgba vector', ()),
(2005, 'shape', ())),
fields=('code', 'label', 'parameters'))
class Nifti1Extension(object):
"""Baseclass for NIfTI1 header extensions.
This class is sufficient to handle very simple text-based extensions, such
as `comment`. More sophisticated extensions should/will be supported by
dedicated subclasses.
"""
def __init__(self, code, content):
"""
Parameters
----------
code : int|str
Canonical extension code as defined in the NIfTI standard, given
either as integer or corresponding label
(see :data:`~nifti.nifti1.extension_codes`)
content : str
Extension content as read from the NIfTI file header. This content is
converted into a runtime representation.
"""
try:
self._code = extension_codes.code[code]
except KeyError:
# XXX or fail or at least complain?
self._code = code
self._content = self._unmangle(content)
def _unmangle(self, value):
"""Convert the extension content into its runtime representation.
The default implementation does nothing at all.
Parameters
----------
value : str
Extension content as read from file.
Returns
-------
The same object that was passed as `value`.
Notes
-----
Subclasses should reimplement this method to provide the desired
unmangling procedure and may return any type of object.
"""
return value
def _mangle(self, value):
"""Convert the extension content into NIfTI file header representation.
The default implementation does nothing at all.
Parameters
----------
value : str
Extension content in runtime form.
Returns
-------
str
Notes
-----
Subclasses should reimplement this method to provide the desired
mangling procedure.
"""
return value
def get_code(self):
"""Return the canonical extension type code."""
return self._code
def get_content(self):
"""Return the extension content in its runtime representation."""
return self._content
def get_sizeondisk(self):
"""Return the size of the extension in the NIfTI file.
"""
# need raw value size plus 8 bytes for esize and ecode
size = len(self._mangle(self._content))
size += 8
# extensions size has to be a multiple of 16 bytes
size += 16 - (size % 16)
return size
def __repr__(self):
try:
code = extension_codes.label[self._code]
except KeyError:
# deal with unknown codes
code = self._code
s = "Nifti1Extension('%s', '%s')" % (code, self._content)
return s
def __eq__(self, other):
if self._code != other._code \
or self._content != other._content:
return False
else:
return True
def write_to(self, fileobj):
''' Write header extensions to fileobj
Write starts at fileobj current file position.
Parameters
----------
fileobj : file-like object
Should implement ``write`` method
Returns
-------
None
'''
extstart = fileobj.tell()
rawsize = self.get_sizeondisk()
# write esize and ecode first
fileobj.write(np.array((rawsize, self._code),
dtype=np.int32).tostring())
# followed by the actual extension content
# XXX if mangling upon load is implemented, it should be reverted here
fileobj.write(self._mangle(self._content))
# be nice and zero out remaining part of the extension till the
# next 16 byte border
fileobj.write('\x00' * (extstart + rawsize - fileobj.tell()))
# NIfTI header extension type codes (ECODE)
# see nifti1_io.h for a complete list of all known extensions and
# references to their description or contacts of the respective
# initiators
extension_codes = Recoder((
(0, "ignore", Nifti1Extension),
(2, "dicom", Nifti1Extension),
(4, "afni", Nifti1Extension),
(6, "comment", Nifti1Extension),
(8, "xcede", Nifti1Extension),
(10, "jimdiminfo", Nifti1Extension),
(12, "workflow_fwds", Nifti1Extension),
(14, "freesurfer", Nifti1Extension),
(16, "pypickle", Nifti1Extension)
),
fields=('code', 'label', 'handler'))
class Nifti1Extensions(list):
"""Simple extension collection, implemented as a list-subclass.
"""
def count(self, ecode):
"""Returns the number of extensions matching a given *ecode*.
Parameter
---------
code : int | str
The ecode can be specified either literal or as numerical value.
"""
count = 0
code = extension_codes.code[ecode]
for e in self:
if e.get_code() == code:
count += 1
return count
def get_codes(self):
"""Return a list of the extension code of all available extensions"""
return [e.get_code() for e in self]
def get_sizeondisk(self):
"""Return the size of the complete header extensions in the NIfTI file.
"""
# add four bytes for the NIfTI extension flag!
return np.sum([e.get_sizeondisk() for e in self]) + 4
def __repr__(self):
s = "Nifti1Extensions(%s)" \
% ', '.join([str(e) for e in self])
return s
def __eq__(self, other):
for i, e in enumerate(self):
if not e == other[i]:
return False
return True
def write_to(self, fileobj):
''' Write header extensions to fileobj
Write starts at fileobj current file position.
Parameters
----------
fileobj : file-like object
Should implement ``write`` method
Returns
-------
None
'''
# not extensions -> nothing to do
if not len(self):
return
# since we have extensions write the appropriate flag
fileobj.write(np.array((1,0,0,0), dtype=np.int8).tostring())
# and now each extension
for e in self:
e.write_to(fileobj)
@classmethod
def from_fileobj(klass, fileobj, size):
'''Read header extensions from a fileobj
Parameters
----------
fileobj : file-like object
It is assumed to be positions right after the NIfTI magic field.
size : int
Number of bytes to read. If negative, fileobj will be read till its
end.
Returns
-------
An extension list. This list might be empty in case not extensions
were present in fileobj.
'''
# make empty extension list
extensions = klass()
# assume the fileptr is just after header (magic field)
# try reading the next 4 bytes after the initial header
extension_status = fileobj.read(4)
if not len(extension_status):
# if there is nothing the NIfTI standard requires to assume zeros
extension_status = np.zeros((4,), dtype=np.int8)
else:
extension_status = np.fromstring(extension_status, dtype=np.int8)
# NIfTI1 says: if first element is non-zero there are extensions present
# if not there is nothing left to do
if not extension_status[0]:
return extensions
# note that we read the extension flag
if not size < 0:
size = size - 4
# read until the whole header is parsed (each extension is a multiple
# of 16 bytes) or in case of a separate header file till the end
# (break inside the body)
# XXX not sure if the separate header behavior is sane
while size >= 16 or size < 0:
# the next 8 bytes should have esize and ecode
ext_def = fileobj.read(8)
# nothing was read and instructed to read till the end
# -> assume all extensions where parsed and break
if not len(ext_def) and size < 0:
break
# otherwise there should be a full extension header
if not len(ext_def) == 8:
raise HeaderDataError('failed to read extension header')
ext_def = np.fromstring(ext_def, dtype=np.int32)
# be extra verbose
ecode = ext_def[1]
esize = ext_def[0]
if esize % 16:
raise HeaderDataError(
'extension size is not a multiple of 16 bytes')
# read extension itself; esize includes the 8 bytes already read
evalue = fileobj.read(esize - 8)
if not len(evalue) == esize - 8:
raise HeaderDataError('failed to read extension content')
# note that we read a full extension
size -= esize
# store raw extension content, but strip trailing NULL chars
evalue = evalue.rstrip('\x00')
# 'extension_codes' also knows the best implementation to handle
# a particular extension type
try:
ext = extension_codes.handler[ecode](ecode, evalue)
except KeyError:
# unknown extension type
# XXX complain or fail or go with a generic extension
ext = Nifti1Extension(ecode, evalue)
extensions.append(ext)
return extensions
class Nifti1Header(SpmAnalyzeHeader):
''' Class for NIFTI1 header '''
# Copies of module level definitions
_dtype = header_dtype
_data_type_codes = data_type_codes
_xform_codes = xform_codes
_unit_codes = unit_codes
_intent_codes = intent_codes
_slice_order_codes = slice_order_codes
# data scaling capabilities
has_data_slope = True
has_data_intercept = True
def get_best_affine(self):
''' Select best of available transforms '''
hdr = self._header_data
if hdr['sform_code']:
return self.get_sform()
if hdr['qform_code']:
return self.get_qform()
return self.get_base_affine()
def _empty_headerdata(self, endianness=None):
''' Create empty header binary block with given endianness '''
hdr_data = analyze.AnalyzeHeader._empty_headerdata(self, endianness)
hdr_data['scl_slope'] = 1
hdr_data['magic'] = 'n+1'
hdr_data['vox_offset'] = 352
return hdr_data
def get_qform_quaternion(self):
''' Compute quaternion from b, c, d of quaternion
Fills a value by assuming this is a unit quaternion
'''
hdr = self._header_data
bcd = [hdr['quatern_b'], hdr['quatern_c'], hdr['quatern_d']]
return fillpositive(bcd)
def get_qform(self):
''' Return 4x4 affine matrix from qform parameters in header '''
hdr = self._header_data
quat = self.get_qform_quaternion()
R = quat2mat(quat)
vox = hdr['pixdim'][1:4].copy()
if np.any(vox) < 0:
raise HeaderDataError('pixdims[1,2,3] should be positive')
qfac = hdr['pixdim'][0]
if qfac not in (-1,1):
raise HeaderDataError('qfac (pixdim[0]) should be 1 or -1')
vox[-1] *= qfac
S = np.diag(vox)
M = np.dot(R, S)
out = np.eye(4)
out[0:3,0:3] = M
out[0:3,3] = [hdr['qoffset_x'], hdr['qoffset_y'], hdr['qoffset_z']]
return out
def set_qform(self, affine, code=None):
''' Set qform header values from 4x4 affine
Parameters
----------
hdr : nifti1 header
affine : 4x4 array
affine transform to write into qform
code : None, string or integer
String or integer giving meaning of transform in *affine*.
The default is None. If code is None, then {if current
qform code is not 0, leave code as it is in the header; else
set to 1 ('scanner')}.
Notes
-----
The qform transform only encodes translations, rotations and
zooms. If there are shear components to the *affine* transform,
the written qform gives the closest approximation where the
rotation matrix is orthogonal. This is to allow quaternion
representation. The orthogonal representation enforces orthogonal
axes.
Examples
--------
>>> hdr = Nifti1Header()
>>> int(hdr['qform_code']) # gives 0 - unknown
0
>>> affine = np.diag([1,2,3,1])
>>> np.all(hdr.get_qform() == affine)
False
>>> hdr.set_qform(affine)
>>> np.all(hdr.get_qform() == affine)
True
>>> int(hdr['qform_code']) # gives 1 - scanner
1
>>> hdr.set_qform(affine, code='talairach')
>>> int(hdr['qform_code'])
3
>>> hdr.set_qform(affine, code=None)
>>> int(hdr['qform_code'])
3
>>> hdr.set_qform(affine, code='scanner')
>>> int(hdr['qform_code'])
1
'''
hdr = self._header_data
if code is None:
code = hdr['qform_code']
if code == 0:
hdr['qform_code'] = 1
else:
code = self._xform_codes[code]
hdr['qform_code'] = code
if not affine.shape == (4,4):
raise TypeError('Need 4x4 affine as input')
trans = affine[:3,3]
RZS = affine[:3,:3]
zooms = np.sqrt(np.sum(RZS * RZS, axis=0))
R = RZS / zooms
# Set qfac to make R determinant positive
if npl.det(R) > 0:
qfac = 1
else:
qfac = -1
R[:,-1] *= -1
# Make R orthogonal (to allow quaternion representation)
# The orthogonal representation enforces orthogonal axes
# (a subtle requirement of the NIFTI format qform transform)
# Transform below is polar decomposition, returning the closest
# orthogonal matrix PR, to input R
P, S, Qs = npl.svd(R)
PR = np.dot(P, Qs)
# Convert to quaternion
quat = mat2quat(PR)
# Set into header
hdr['qoffset_x'], hdr['qoffset_y'], hdr['qoffset_z'] = trans
hdr['pixdim'][0] = qfac
hdr['pixdim'][1:4] = zooms
hdr['quatern_b'], hdr['quatern_c'], hdr['quatern_d'] = quat[1:]
def get_sform(self):
''' Return sform 4x4 affine matrix from header '''
hdr = self._header_data
out = np.eye(4)
out[0,:] = hdr['srow_x'][:]
out[1,:] = hdr['srow_y'][:]
out[2,:] = hdr['srow_z'][:]
return out
def set_sform(self, affine, code=None):
''' Set sform transform from 4x4 affine
Parameters
----------
hdr : nifti1 header
affine : 4x4 array
affine transform to write into sform
code : None, string or integer
String or integer giving meaning of transform in *affine*.
The default is None. If code is None, then {if current
sform code is not 0, leave code as it is in the header; else
set to 1 ('scanner')}.
Examples
--------
>>> hdr = Nifti1Header()
>>> int(hdr['sform_code']) # gives 0 - unknown
0
>>> affine = np.diag([1,2,3,1])
>>> np.all(hdr.get_sform() == affine)
False
>>> hdr.set_sform(affine)
>>> np.all(hdr.get_sform() == affine)
True
>>> int(hdr['sform_code']) # gives 1 - scanner
1
>>> hdr.set_sform(affine, code='talairach')
>>> int(hdr['sform_code'])
3
>>> hdr.set_sform(affine, code=None)
>>> int(hdr['sform_code'])
3
>>> hdr.set_sform(affine, code='scanner')
>>> int(hdr['sform_code'])
1
'''
hdr = self._header_data
if code is None:
code = hdr['sform_code']
if code == 0:
hdr['sform_code'] = 1
else:
code = self._xform_codes[code]
hdr['sform_code'] = code
hdr['srow_x'][:] = affine[0,:]
hdr['srow_y'][:] = affine[1,:]
hdr['srow_z'][:] = affine[2,:]
def get_qform_code(self, code_repr='label'):
''' Return representation of qform code
Parameters
----------
code_repr : string
string giving output form of intent code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
qform_code : string or integer
string label for qform code or code
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr['qform_code'] = 3
>>> hdr.get_qform_code()
'talairach'
'''
return self._get_code_field(
code_repr,
'qform_code',
self._xform_codes)
def get_sform_code(self, code_repr='label'):
''' Return representation of sform code
Parameters
----------
code_repr : string
string giving output form of intent code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
sform_code : string or integer
string label for sform code or code
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr['sform_code'] = 3
>>> hdr.get_sform_code()
'talairach'
'''
return self._get_code_field(
code_repr,
'sform_code',
self._xform_codes)
def get_slope_inter(self):
''' Get data scaling (slope) and DC offset (intercept) from header data
Parameters
----------
self : header object
Should have fields (keys)
* scl_slope - slope
* scl_inter - intercept
Returns
-------
slope : None or float
scaling (slope). None if there is no valid scaling from
these fields
inter : None or float
offset (intercept). Also None if there is no valid scaling, offset
Examples
--------
>>> fields = {'scl_slope':1,'scl_inter':0}
>>> hdr = Nifti1Header()
>>> hdr.get_slope_inter()
(1.0, 0.0)
>>> hdr['scl_slope'] = 0
>>> hdr.get_slope_inter()
(None, None)
>>> hdr['scl_slope'] = np.nan
>>> hdr.get_slope_inter()
(None, None)
>>> hdr['scl_slope'] = 1
>>> hdr['scl_inter'] = 1
>>> hdr.get_slope_inter()
(1.0, 1.0)
>>> hdr['scl_inter'] = np.inf
>>> hdr.get_slope_inter()
(1.0, 0.0)
'''
scale = float(self['scl_slope'])
dc_offset = float(self['scl_inter'])
if not scale or not np.isfinite(scale):
return None, None
if not np.isfinite(dc_offset):
dc_offset = 0.0
return scale, dc_offset
def set_slope_inter(self, slope, inter):
self._header_data['scl_slope'] = slope
self._header_data['scl_inter'] = inter
def get_dim_info(self):
''' Gets nifti MRI slice etc dimension information
Returns
-------
freq : {None,0,1,2}
Which data array axis is freqency encode direction
phase : {None,0,1,2}
Which data array axis is phase encode direction
slice : {None,0,1,2}
Which data array axis is slice encode direction
where ``data array`` is the array returned by ``get_data``
Because nifti1 files are natively Fortran indexed:
0 is fastest changing in file
1 is medium changing in file
2 is slowest changing in file
``None`` means the axis appears not to be specified.
Examples
--------
See set_dim_info function
'''
hdr = self._header_data
info = int(hdr['dim_info'])
freq = info & 3
phase = (info >> 2) & 3
slice = (info >> 4) & 3
return (freq-1 if freq else None,
phase-1 if phase else None,
slice-1 if slice else None)
def set_dim_info(self, freq=None, phase=None, slice=None):
''' Sets nifti MRI slice etc dimension information
Parameters
----------
hdr : nifti1 header
freq : {None, 0, 1, 2}
axis of data array refering to freqency encoding
phase : {None, 0, 1, 2}
axis of data array refering to phase encoding
slice : {None, 0, 1, 2}
axis of data array refering to slice encoding
``None`` means the axis is not specified.
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_dim_info(1, 2, 0)
>>> hdr.get_dim_info()
(1, 2, 0)
>>> hdr.set_dim_info(freq=1, phase=2, slice=0)
>>> hdr.get_dim_info()
(1, 2, 0)
>>> hdr.set_dim_info()
>>> hdr.get_dim_info()
(None, None, None)
>>> hdr.set_dim_info(freq=1, phase=None, slice=0)
>>> hdr.get_dim_info()
(1, None, 0)
Notes
-----
This is stored in one byte in the header
'''
for inp in (freq, phase, slice):
if inp not in (None, 0, 1, 2):
raise HeaderDataError('Inputs must be in [None, 0, 1, 2]')
info = 0
if not freq is None:
info = info | ((freq+1) & 3)
if not phase is None:
info = info | (((phase+1) & 3) << 2)
if not slice is None:
info = info | (((slice+1) & 3) << 4)
self._header_data['dim_info'] = info
def get_intent_code(self, code_repr='label'):
''' Return representation of intent code
Parameters
----------
code_repr : string
string giving output form of intent code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
intent_code : string or integer
string label for intent code or code
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_intent('t test', (10,), name='some score')
>>> hdr.get_intent_code()
't test'
'''
return self._get_code_field(
code_repr,
'intent_code',
self._intent_codes)
def get_intent(self, code_repr='label'):
''' Get intent code, parameters and name
Parameters
----------
code_repr : string
string giving output form of intent code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
code : string or integer
intent code, or string describing code
parameters : tuple
parameters for the intent
name : string
intent name
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_intent('t test', (10,), name='some score')
>>> hdr.get_intent()
('t test', (10.0,), 'some score')
>>> hdr.get_intent('code')
(3, (10.0,), 'some score')
'''
hdr = self._header_data
code = int(hdr['intent_code'])
recode = self.get_intent_code(code_repr)
n_params = len(self._intent_codes.parameters[code])
params = (float(hdr['intent_p%d' % (i+1)]) for i in range(n_params))
return recode, tuple(params), str(hdr['intent_name'])
def set_intent(self, code, params=(), name=''):
''' Set the intent code, parameters and name
If parameters are not specified, assumed to be all zero. Each
intent code has a set number of parameters associated. If you
specify any parameters, then it will need to be the correct number
(e.g the "f test" intent requires 2). However, parameters can
also be set in the file data, so we also allow not setting any
parameters (empty parameter tuple).
Parameters
----------
code : integer or string
code specifying nifti intent
params : list, tuple of scalars
parameters relating to intent (see intent_codes)
defaults to (). Unspecified parameters are set to 0.0
name : string
intent name (description). Defaults to ''
Returns
-------
None
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_intent(0) # unknown code
>>> hdr.set_intent('z score')
>>> hdr.get_intent()
('z score', (), '')
>>> hdr.get_intent('code')
(5, (), '')
>>> hdr.set_intent('t test', (10,), name='some score')
>>> hdr.get_intent()
('t test', (10.0,), 'some score')
>>> hdr.set_intent('f test', (2, 10), name='another score')
>>> hdr.get_intent()
('f test', (2.0, 10.0), 'another score')
>>> hdr.set_intent('f test')
>>> hdr.get_intent()
('f test', (0.0, 0.0), '')
'''
hdr = self._header_data
icode = intent_codes.code[code]
p_descr = intent_codes.parameters[code]
if len(params) and len(params) != len(p_descr):
raise HeaderDataError('Need params of form %s, or empty' % (p_descr,))
all_params = [0] * 3
all_params[:len(params)] = params[:]
for i, param in enumerate(all_params):
hdr['intent_p%d' % (i+1)] = param
hdr['intent_code'] = icode
hdr['intent_name'] = name
def get_slice_duration(self):
''' Get slice duration
Returns
-------
slice_duration : float
time to acquire one slice
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_dim_info(slice=2)
>>> hdr.set_slice_duration(0.3)
>>> print "%0.1f" % hdr.get_slice_duration()
0.3
Notes
-----
The Nifti1 spec appears to require the slice dimension to be
defined for slice_duration to have meaning.
'''
_, _, slice_dim = self.get_dim_info()
if slice_dim is None:
raise HeaderDataError('Slice dimension must be set '
'for duration to be valid')
return float(self._header_data['slice_duration'])
def set_slice_duration(self, duration):
''' Set slice duration
Parameters
----------
duration : scalar
time to acquire one slice
Examples
--------
See ``get_slice_duration``
'''
_, _, slice_dim = self.get_dim_info()
if slice_dim is None:
raise HeaderDataError('Slice dimension must be set '
'for duration to be valid')
self._header_data['slice_duration'] = duration
def get_slice_code(self, code_repr='label'):
''' Return representation of slice order code
Parameters
----------
code_repr : string
string giving output form of slice order code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
slice_code : string or integer
string label for slice ordering code or code
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr['slice_code'] = 4 # alternating decreasing
>>> hdr.get_slice_code()
'alternating decreasing'
'''
return self._get_code_field(
code_repr,
'slice_code',
self._slice_order_codes)
def get_slice_times(self):
''' Get slice times from slice timing information
Returns
-------
slice_times : tuple
Times of acquisition of slices, where 0 is the beginning of
the acquisition, ordered by position in file. nifti allows
slices at the top and bottom of the volume to be excluded from
the standard slice timing specification, and calls these
"padding slices". We give padding slices ``None`` as a time
of acquisition
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_dim_info(slice=2)
>>> hdr.set_data_shape((1, 1, 7))
>>> hdr.set_slice_duration(0.1)
We need a function to print out the Nones and floating point
values in a predictable way, for the tests below.
>>> _stringer = lambda val: val is not None and '%2.1f' % val or None
>>> _print_me = lambda s: map(_stringer, s)
The following examples are from the nifti1.h documentation.
>>> hdr['slice_code'] = slice_order_codes['sequential increasing']
>>> _print_me(hdr.get_slice_times())
['0.0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6']
>>> hdr['slice_start'] = 1
>>> hdr['slice_end'] = 5
>>> _print_me(hdr.get_slice_times())
[None, '0.0', '0.1', '0.2', '0.3', '0.4', None]
>>> hdr['slice_code'] = slice_order_codes['sequential decreasing']
>>> _print_me(hdr.get_slice_times())
[None, '0.4', '0.3', '0.2', '0.1', '0.0', None]
>>> hdr['slice_code'] = slice_order_codes['alternating increasing']
>>> _print_me(hdr.get_slice_times())
[None, '0.0', '0.3', '0.1', '0.4', '0.2', None]
>>> hdr['slice_code'] = slice_order_codes['alternating decreasing']
>>> _print_me(hdr.get_slice_times())
[None, '0.2', '0.4', '0.1', '0.3', '0.0', None]
>>> hdr['slice_code'] = slice_order_codes['alternating increasing 2']
>>> _print_me(hdr.get_slice_times())
[None, '0.2', '0.0', '0.3', '0.1', '0.4', None]
>>> hdr['slice_code'] = slice_order_codes['alternating decreasing 2']
>>> _print_me(hdr.get_slice_times())
[None, '0.4', '0.1', '0.3', '0.0', '0.2', None]
'''
hdr = self._header_data
_, _, slice_dim = self.get_dim_info()
shape = self.get_data_shape()
slice_len = shape[slice_dim]
duration = self.get_slice_duration()
slabel = self.get_slice_code()
if slabel == 'unknown':
raise HeaderDataError('Cannot get slice times when '
'Slice code is "unknown"')
slice_start, slice_end = (int(hdr['slice_start']),
int(hdr['slice_end']))
if slice_start < 0:
raise HeaderDataError('slice_start should be >= 0')
if slice_end == 0:
slice_end = slice_len-1
n_timed = slice_end - slice_start + 1
if n_timed < 1:
raise HeaderDataError('slice_end should be > slice_start')
st_order = self._slice_time_order(slabel, n_timed)
times = st_order * duration
return ((None,)*slice_start +
tuple(times) +
(None,)*(slice_len-slice_end-1))
def set_slice_times(self, slice_times):
''' Set slice times into *hdr*
Parameters
----------
slice_times : tuple
tuple of slice times, one value per slice
tuple can include None to indicate no slice time for that slice
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_dim_info(slice=2)
>>> hdr.set_data_shape([1, 1, 7])
>>> hdr.set_slice_duration(0.1)
>>> times = [None, 0.2, 0.4, 0.1, 0.3, 0.0, None]
>>> hdr.set_slice_times(times)
>>> hdr.get_slice_code()
'alternating decreasing'
>>> int(hdr['slice_start'])
1
>>> int(hdr['slice_end'])
5
'''
# Check if number of slices matches header
hdr = self._header_data
_, _, slice_dim = self.get_dim_info()
shape = self.get_data_shape()
slice_len = shape[slice_dim]
if slice_len != len(slice_times):
raise HeaderDataError('Number of slice times does not '
'match number of slices')
# Extract Nones at beginning and end. Check for others
for ind, time in enumerate(slice_times):
if time is not None:
slice_start = ind
break
else:
raise HeaderDataError('Not all slice times can be None')
for ind, time in enumerate(slice_times[::-1]):
if time is not None:
slice_end = slice_len-ind-1
break
timed = slice_times[slice_start:slice_end+1]
for time in timed:
if time is None:
raise HeaderDataError('Cannot have None in middle '
'of slice time vector')
# Find slice duration, check times are compatible with single
# duration
tdiffs = np.diff(np.sort(timed))
if not np.allclose(np.diff(tdiffs), 0):
raise HeaderDataError('Slice times not compatible with '
'single slice duration')
duration = np.mean(tdiffs)
# To slice time order
st_order = np.round(np.array(timed) / duration)
# Check if slice times fit known schemes
n_timed = len(timed)
labels = self._slice_order_codes.value_set('label')
labels.remove('unknown')
for label in labels:
if np.all(st_order == self._slice_time_order(
label,
n_timed)):
break
else:
raise HeaderDataError('slice ordering of %s fits '
'with no known scheme' % st_order)
# Set values into header
hdr['slice_start'] = slice_start
hdr['slice_end'] = slice_end
hdr['slice_duration'] = duration
hdr['slice_code'] = slice_order_codes.code[label]
def for_file_pair(self, is_pair=True):
''' Adapt header to separate or same image and header file
Parameters
----------
is_pair : bool, optional
True if adapting header to file pair state, False for single
Returns
-------
hdr : Nifti1Header
copied and possibly modified header
Examples
--------
The header starts off as being for a single file
>>> hdr = Nifti1Header()
>>> str(hdr['magic'])
'n+1'
>>> hdr.get_data_offset()
352
But we can switch it to be for two files (a pair)
>>> pair_hdr = hdr.for_file_pair()
>>> str(pair_hdr['magic'])
'ni1'
>>> pair_hdr.get_data_offset()
0
The original header is not affected (a copy is returned)
>>> hdr.get_data_offset()
352
Back to single again
>>> unpair_hdr = pair_hdr.for_file_pair(False)
>>> str(unpair_hdr['magic'])
'n+1'
>>> unpair_hdr.get_data_offset()
352
'''
hdr = self.copy()
if not is_pair:
# one file version
if hdr['magic'] == 'n+1':
if hdr['vox_offset'] < 352:
hdr['vox_offset'] = 352
return hdr
hdr['magic'] = 'n+1'
hdr['vox_offset'] = 352
return hdr
# two file version
if hdr['magic'] == 'ni1':
return hdr
hdr['magic'] = 'ni1'
hdr['vox_offset'] = 0
return hdr
def _slice_time_order(self, slabel, n_slices):
''' Supporting function to give time order of slices from label '''
if slabel == 'sequential increasing':
sp_ind_time_order = range(n_slices)
elif slabel == 'sequential decreasing':
sp_ind_time_order = range(n_slices)[::-1]
elif slabel == 'alternating increasing':
sp_ind_time_order = range(0,n_slices,2) + range(1, n_slices, 2)
elif slabel == 'alternating decreasing':
sp_ind_time_order = range(n_slices-1,-1,-2) + range(n_slices-2,-1,-2)
elif slabel == 'alternating increasing 2':
sp_ind_time_order = range(1,n_slices,2) + range(0, n_slices, 2)
elif slabel == 'alternating decreasing 2':
sp_ind_time_order = range(n_slices-2,-1,-2) + range(n_slices-1,-1,-2)
else:
raise HeaderDataError('We do not handle slice ordering "%s"'
% slabel)
return np.argsort(sp_ind_time_order)
''' Checks only below here '''
@classmethod
def _get_checks(klass):
# We need to return our own versions of - e.g. chk_datatype, to
# pick up the Nifti datatypes from our class
return (klass._chk_sizeof_hdr,
klass._chk_datatype,
klass._chk_bitpix,
klass._chk_pixdims,
klass._chk_scale_slope,
klass._chk_scale_inter,
klass._chk_qfac,
klass._chk_magic_offset,
klass._chk_qform_code,
klass._chk_sform_code)
@staticmethod
def _chk_scale_slope(hdr, fix=True):
ret = Report(hdr, HeaderDataError)
scale = hdr['scl_slope']
if scale and np.isfinite(scale):
return ret
ret.problem_msg = '"scl_slope" is %s; should !=0 and be finite' % scale
if fix:
hdr['scl_slope'] = 1
ret.fix_msg = 'setting "scl_slope" to 1'
else:
ret.level = 30
return ret
@staticmethod
def _chk_scale_inter(hdr, fix=True):
ret = Report(hdr, HeaderDataError)
scale = hdr['scl_inter']
if np.isfinite(scale):
return ret
ret.problem_msg = '"scl_inter" is %s; should be finite' % scale
if fix:
hdr['scl_inter'] = 0
ret.fix_msg = 'setting "scl_inter" to 0'
else:
ret.level = 30
return ret
@staticmethod
def _chk_qfac(hdr, fix=True):
ret = Report(hdr, HeaderDataError)
if hdr['pixdim'][0] in (-1, 1):
return ret
ret.problem_msg = 'pixdim[0] (qfac) should be 1 (default) or -1'
if fix:
hdr['pixdim'][0] = 1
ret.fix_msg = 'setting qfac to 1'
else:
ret.level = 20
return ret
@staticmethod
def _chk_magic_offset(hdr, fix=True):
ret = Report(hdr, HeaderDataError)
magic = hdr['magic']
offset = hdr['vox_offset']
if magic == 'ni1': # two files
if offset == 0:
return ret
ret.problem_msg = ('vox offset should be 0 (is %s)'
'with two-file nifti images' % offset)
ret.level = 40
if fix:
ret.fix_msg = 'leaving at current value'
elif magic == 'n+1': # one file
if offset >= 352:
if not offset % 16:
return ret
else:
# XXX Michael wonders, if this warning really valid? NIfTI
# says that each extension's length has to be a multiple of
# 16, therefore the test should be (offset-352) % 16 and
# not offset % 16, or does SPM have additional artifical
# limitations?
ret.problem_msg = ('vox offset (=%s) not divisible '
'by 16, not SPM compatible' % offset)
ret.level = 30
if fix:
ret.fix_msg = 'leaving at current value'
return ret
ret.problem_msg = ('vox offset %d too low for '
'single file nifti1' % offset)
if fix:
hdr['vox_offset'] = 352
ret.fix_msg = 'setting to minimum value of 352'
else:
ret.level = 50
else: # unrecognized nii magic string, oh dear
ret.problem_msg = 'magic string %s is not valid' % magic
ret.level = 50
if fix:
ret.fix_msg = 'leaving as is, but future errors are likely'
return ret
@classmethod
def _chk_qform_code(klass, hdr, fix=True):
ret = Report(hdr, HeaderDataError)
code = int(hdr['qform_code'])
if int(hdr['qform_code']) in klass._xform_codes.value_set():
return ret
ret.problem_msg = 'qform code %d not valid' % code
if fix:
hdr['qform_code'] = 0
ret.fix_msg = 'setting to 0'
else:
ret.level = 30
return ret
@classmethod
def _chk_sform_code(klass, hdr, fix=True):
ret = Report(hdr, HeaderDataError)
code = int(hdr['sform_code'])
if int(hdr['sform_code']) in klass._xform_codes.value_set():
return ret
ret.problem_msg = 'sform code %d not valid' % code
if fix:
hdr['sform_code'] = 0
ret.fix_msg = 'setting to 0'
else:
ret.level = 30
return ret
class Nifti1Image(analyze.AnalyzeImage):
_header_maker = Nifti1Header
def _set_header(self, header=None):
SpatialImage._set_header(self, header)
@staticmethod
def filespec_to_files(filespec):
ft1 = filetuples.FileTuples(
(('header', '.nii'), ('image', '.nii')),
ignored_suffixes=('.gz', '.bz2')
)
ft2 = filetuples.FileTuples(
(('header', '.hdr'), ('image', '.img')),
ignored_suffixes=('.gz', '.bz2')
)
for ftups in (ft1, ft2):
try:
ftups.set_filenames(filespec)
except filetuples.FileTuplesError:
continue
break
else:
raise ValueError('Filespec "%s" does not '
'look like Nifti1' % filespec)
files = dict(zip(('header', 'image'), ftups.get_filenames()))
return files
@classmethod
def from_files(klass, files):
fname = files['header']
fileobj = allopen(fname)
header = klass._header_maker.from_fileobj(fileobj)
extra = None
# handle extensions
# assume the fileptr is just after header (magic field)
# determine how much to read when parsing the extensions
if header['vox_offset'] == 0:
# read till the end of the header
extsize = -1
else:
extsize = header['vox_offset'] - fileobj.tell()
extensions = Nifti1Extensions.from_fileobj(fileobj, extsize)
# XXX maybe always do that?
if len(extensions):
extra = {'extensions': extensions}
affine = header.get_best_affine()
ret = klass(None, affine, header=header, extra=extra)
ret._files = files
return ret
def to_files(self, files=None):
''' Write image to files passed, or self._files
'''
# XXX the whole method is candidate for refactoring, since it started as
# verbatim copy of AnalyzeImage.to_files()
if files is None:
files = self._files
if files is None:
raise ValueError('Need files to write data')
data = self.get_data()
# Adapt header to possible two<->one file difference
is_pair = files['header'] != files['image']
hdr = self.get_header().for_file_pair(is_pair)
# if any extensions, figure out necessary vox_offset for extensions to
# fit
if self.extra.has_key('extensions') and len(self.extra['extensions']):
hdr['vox_offset'] = len(hdr.binaryblock) \
+ self.extra['extensions'].get_sizeondisk()
slope, inter, mn, mx = adapt_header(hdr, data)
hdrf = allopen(files['header'], 'wb')
hdr.write_to(hdrf)
# write all extensions to file
# assumes that the file ptr is right after the magic string
if not self.extra.has_key('extensions'):
# no extensions: be nice and write appropriate flag
hdrf.write(np.array((0,0,0,0), dtype=np.int8).tostring())
else:
self.extra['extensions'].write_to(hdrf)
if is_pair:
imgf = allopen(files['image'], 'wb')
else: # single file for header and image
imgf = hdrf
# streams like bz2 do not allow seeks, even forward. We
# check where to go, and write zeros up until the data part
# of the file
offset = hdr.get_data_offset()
diff = offset-hdrf.tell()
if diff > 0:
hdrf.write('\x00' * diff)
write_data(hdr, data, imgf, inter, slope, mn, mx)
self._header = hdr
self._files = files
def _update_header(self):
''' Harmonize header with image data and affine
See AnalyzeImage._update_header for more examples
Examples
--------
>>> data = np.zeros((2,3,4))
>>> affine = np.diag([1.0,2.0,3.0,1.0])
>>> img = Nifti1Image(data, affine)
>>> hdr = img.get_header()
>>> np.all(hdr.get_qform() == affine)
True
>>> np.all(hdr.get_sform() == affine)
True
'''
super(Nifti1Image, self)._update_header()
hdr = self._header
if not self._affine is None:
hdr.set_sform(self._affine)
hdr.set_qform(self._affine)
load = Nifti1Image.load
save = Nifti1Image.save
| satra/NiPypeold | nipype/externals/pynifti/nifti1.py | Python | bsd-3-clause | 53,052 | [
"Gaussian"
] | 73ebef063bc96bc4c1fac9c1979cb8a8762e22961929d9bbe73830d3a2597c42 |
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import moose
import pylab
import numpy
def main():
"""
This is the Hello MOOSE program. It shows how to get MOOSE to do
its most basic operations: to load, run, and graph a model defined
in an external model definition file.
The loadModel function is the core of this example. It can accept
a range of file and model types, including kkit, cspace and GENESIS .p
files. It autodetects the file type and loads in the simulation.
The wildcardFind function finds all objects matching the specified path,
in this case Table objects hoding the simulation results. They were
all defined in the model file.
"""
mfile = '../genesis/kkit_objects_example.g'
modelId = moose.loadModel( mfile, 'model', 'gsl' ) # Load it
moose.reinit() # Set initial conditions
moose.start( 20.0 ) # Run it
# Graph it
for x in moose.wildcardFind( '/model/#graphs/conc#/#' ):
pylab.plot( x.vector, label=x.name )
pylab.legend()
pylab.show()
if __name__ == '__main__':
main()
| BhallaLab/moose-examples | snippets/helloMoose.py | Python | gpl-2.0 | 1,500 | [
"MOOSE"
] | 951216ac76366a615ac22e913b5b99eb8df6b804e065dcb77f5a1207d861fc86 |
# fMBT, free Model Based Testing tool
# Copyright (c) 2012, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
"""
eyenfinger - GUI testing library based on OCR and X event generation
Configuring low-level key presses
---------------------------------
printEventsFromFile() prints input events from Linux chosen
/dev/input/eventXX file. Example:
python -c '
import eyenfinger
eyenfinger.printEventsFromFile("/dev/input/event0")
'
Alternatively, you can use device names in /proc/bus/input/devices and
printEventsFromDevice("device name").
Configuring OCR
---------------
autoconfigure() evaluates number of preprocessing filters to give the
best result on finding given words from given image. Example:
python -c '
from eyenfinger import *
autoconfigure("screenshot.png", ["Try", "to", "find", "these", "words"])
'
evaluatePreprocessFilter() highlights words detected on given image. Example:
python -c '
from eyenfinger import *
evaluatePreprocessFilter("screenshot.png", "-sharpen 5 -resize 1600x", ["File", "View"])
'
setPreprocessFilter() sets given filter to be used when reading text from images.
Debugging
---------
iClickWord() capture parameter visualises coordinates to be clicked. Example:
python -c '
from eyenfinger import *
setPreprocessFilter("-sharpen 5 -filter Mitchell -resize 1600x -level 40%,50%,3.0")
iRead(source="screenshot.png")
iClickWord("[initial", clickPos=(-2,3), capture="highlight.png", dryRun=True)
'
"""
import distutils.sysconfig
import time
import subprocess
import re
import math
import htmlentitydefs
import sys
import os
import tempfile
import atexit
import shutil
import ctypes
import platform
import struct
import warnings
def _DEPRECATED():
warnings.warn("eyenfinger.py API is deprecated, use fmbtx11 instead.",
DeprecationWarning, stacklevel=2)
_g_preprocess = "-sharpen 5 -filter Mitchell -resize 1920x1600 -level 40%%,70%%,5.0 -sharpen 5"
_g_readImage = None
_g_origImage = None
_g_hocr = ""
_g_words = None
_g_lastWindow = None
_g_defaultClickDryRun = False
_g_defaultDelayedDrawing = False
_g_defaultIconMatch = 1.0
_g_defaultIconColorMatch = 1.0
_g_defaultIconOpacityLimit = 0.0
_g_defaultInputKeyDevice = None
_g_defaultReadWithOCR = True
# windowsOffsets maps window-id to (x, y) pair.
_g_windowOffsets = {None: (0,0)}
# windowsSizes maps window-id to (width, height) pair.
_g_windowSizes = {None: (0,0)}
# screenSize is a (width, height) pair.
_g_screenSize = (0, 0)
_g_tempdir = tempfile.mkdtemp(prefix="eyenfinger.%s." % (os.getpid(),))
SCREENSHOT_FILENAME = _g_tempdir + "/screenshot.png"
LOG_FILENAME = _g_tempdir + "/eyenfinger.log"
MOUSEEVENT_MOVE, MOUSEEVENT_CLICK, MOUSEEVENT_DOWN, MOUSEEVENT_UP = range(4)
# Xkeys contains key names known to X11, see keysymdef.h.
Xkeys = [
"BackSpace", "Tab", "Linefeed", "Clear", "Return", "Pause",
"Scroll_Lock", "Sys_Req", "Escape", "Delete", "Home", "Left",
"Up", "Right", "Down", "Prior", "Page_Up", "Next", "Page_Down",
"End", "Begin", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8",
"F9", "F10", "F11", "F12", "Shift_L", "Shift_R", "Control_L",
"Control_R", "Caps_Lock", "Shift_Lock", "Meta_L", "Meta_R",
"Alt_L", "Alt_R", "space", "exclam", "quotedbl", "numbersign",
"dollar", "percent", "ampersand", "apostrophe", "quoteright",
"parenleft", "parenright", "asterisk", "plus", "comma", "minus",
"period", "slash", "0", "1", "2", "3", "4", "5", "6", "7", "8",
"9", "colon", "semicolon", "less", "equal", "greater", "question",
"at", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L",
"M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y",
"Z", "bracketleft", "backslash", "bracketright", "asciicircum",
"underscore", "grave", "quoteleft", "a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s",
"t", "u", "v", "w", "x", "y", "z", "braceleft", "bar",
"braceright"]
# InputKeys contains key names known to input devices, see
# linux/input.h or http://www.usb.org/developers/hidpage. The order is
# significant, because keyCode = InputKeys.index(keyName).
InputKeys = [
"RESERVED", "ESC","1", "2", "3", "4", "5", "6", "7", "8", "9", "0",
"MINUS", "EQUAL", "BACKSPACE", "TAB",
"Q", "W", "E", "R", "T", "Y", "U", "I", "O", "P",
"LEFTBRACE", "RIGHTBRACE", "ENTER", "LEFTCTRL",
"A", "S", "D", "F", "G", "H", "J", "K", "L",
"SEMICOLON", "APOSTROPHE", "GRAVE", "LEFTSHIFT", "BACKSLASH",
"Z", "X", "C", "V", "B", "N", "M",
"COMMA", "DOT", "SLASH", "RIGHTSHIFT", "KPASTERISK", "LEFTALT",
"SPACE", "CAPSLOCK",
"F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "F10",
"NUMLOCK", "SCROLLLOCK",
"KP7", "KP8", "KP9", "KPMINUS",
"KP4", "KP5", "KP6", "KPPLUS",
"KP1", "KP2", "KP3", "KP0", "KPDOT",
"undefined0",
"ZENKAKUHANKAKU", "102ND", "F11", "F12", "RO",
"KATAKANA", "HIRAGANA", "HENKAN", "KATAKANAHIRAGANA", "MUHENKAN",
"KPJPCOMMA", "KPENTER", "RIGHTCTRL", "KPSLASH", "SYSRQ", "RIGHTALT",
"LINEFEED", "HOME", "UP", "PAGEUP", "LEFT", "RIGHT", "END", "DOWN",
"PAGEDOWN", "INSERT", "DELETE", "MACRO",
"MUTE", "VOLUMEDOWN", "VOLUMEUP",
"POWER",
"KPEQUAL", "KPPLUSMINUS", "PAUSE", "SCALE", "KPCOMMA", "HANGEUL",
"HANGUEL", "HANJA", "YEN", "LEFTMETA", "RIGHTMETA", "COMPOSE"]
_inputKeyShorthands = {
"-": "MINUS", "=": "EQUAL",
"[": "LEFTBRACE", "]": "RIGHTBRACE", "\n": "ENTER",
";": "SEMICOLON",
",": "COMMA", ".": "DOT", "/": "SLASH",
" ": "SPACE" }
class EyenfingerError(Exception):
pass
class BadMatch (EyenfingerError):
pass
class BadWindowName (EyenfingerError):
pass
class BadSourceImage(EyenfingerError):
pass
class BadIconImage(EyenfingerError):
pass
class NoOCRResults(EyenfingerError):
pass
try:
import fmbt
def _log(msg):
fmbt.adapterlog("eyenfinger: %s" % (msg,))
except ImportError:
def _log(msg):
file(LOG_FILENAME, "a").write("%13.2f %s\n" %
(time.time(), msg))
try:
import ctypes
_libpath = ["",
".",
os.path.join(".", ".libs"),
os.path.dirname(__file__),
os.path.join(os.path.dirname(__file__), ".libs"),
distutils.sysconfig.get_python_lib(plat_specific=1)]
for _dirname in _libpath:
try:
eye4graphics = ctypes.CDLL(os.path.join(_dirname, "eye4graphics.so"))
break
except: pass
else:
raise ImportError("%s cannot load eye4graphics.so" % (__file__,))
class Bbox(ctypes.Structure):
_fields_ = [("left", ctypes.c_int32),
("top", ctypes.c_int32),
("right", ctypes.c_int32),
("bottom", ctypes.c_int32),
("error", ctypes.c_int32)]
except Exception, e:
Bbox = None
eye4graphics = None
_log('Loading icon recognition library failed: "%s".' % (e,))
# See struct input_event in /usr/include/linux/input.h
if platform.architecture()[0] == "32bit":
_InputEventStructSpec = 'IIHHi'
else:
_InputEventStructSpec = 'QQHHi'
# Event and keycodes are in input.h, too.
_EV_KEY = 0x01
# _inputKeyNameCodeMap is a dictionary keyName -> keyCode
_inputKeyNameCodeMap = {}
for code, name in enumerate(InputKeys):
_inputKeyNameCodeMap[name] = code
def _inputKeyNameToCode(keyName):
if keyName in _inputKeyNameCodeMap:
return _inputKeyNameCodeMap[keyName]
elif keyName in _inputKeyShorthands:
return _inputKeyNameCodeMap[_inputKeyShorthands[keyName]]
else:
raise ValueError('Invalid key name "%s"' % (keyName,))
def error(msg, exitstatus=1):
sys.stderr.write("eyenfinger: %s\n" % (msg,))
sys.exit(1)
def printEventsFromFile(filename):
fd = os.open(filename, os.O_RDONLY)
try:
while 1:
evString = os.read(fd, struct.calcsize(_InputEventStructSpec))
if not evString: break
tim, tus, typ, cod, val = struct.unpack(_InputEventStructSpec, evString)
if cod < len(InputKeys):
nam = InputKeys[cod]
else:
nam = "N/A"
print "time: %8s, susc: %8s, type: %8s, keyCode: %5s name: %10s value: %8s" % \
(tim, tus, typ, cod, nam, val)
finally:
os.close(fd)
def printEventsFromDevice(deviceName):
devices = dict(_listInputDevices())
if not deviceName in devices:
error('Unknown device "%s". Available devices: %s' %
(deviceName, sorted(devices.keys())))
else:
printEventsFromFile(devices[deviceName])
def _exitHandler():
shutil.rmtree(_g_tempdir, ignore_errors=True)
atexit.register(_exitHandler)
def _runcmd(cmd):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.stdout.read()
exit_status = p.wait()
if p.wait() != 0:
_log("runcmd: " + cmd)
_log("exit status: " + str(exit_status))
_log("stdout: " + output)
_log("stderr: " + p.stderr.read())
else:
p.stderr.read()
return exit_status, output
def _runDrawCmd(inputfilename, cmd, outputfilename):
if not _g_defaultDelayedDrawing:
return _runcmd("convert %s %s %s" % (inputfilename, cmd, outputfilename))
# Do delayed drawing to save test execution time. If the output
# file does not exist, just copy inputfile to outputfile and start
# logging delayed draw commands to
# outputfile.delayeddraw. Otherwise append latest command to
# outputfile.delayeddraw.
delayedCmd = "convert %s %s %s\n" % (outputfilename, cmd, outputfilename)
delayedDrawFilename = outputfilename + ".delayeddraw"
try:
if os.access(outputfilename, os.R_OK) == False:
shutil.copy(inputfilename, outputfilename)
file(delayedDrawFilename, "w").write(delayedCmd)
else:
file(delayedDrawFilename, "a").write(delayedCmd)
except:
_log("error on delayed drawing: %s" % (delayedCmd,))
raise
_log("delayed drawing: %s" % (delayedCmd,))
return (0, "")
def _safeForShell(s):
# convert all non-ascii and bad chars to _
try: s = unicode(s, "utf-8")
except: pass
return ''.join([(c, "_")[ord(c)>128 or c in "'\"\\`"] for c in s])
def _coordsToInt((x,y), (width, height)=(None, None)):
"""
Convert percentages to screen coordinates
"""
if (width == None or height == None):
width, height = screenSize()
if 0.0 <= x <= 1.0 and type(x) == float:
x = int(round(x * width))
else:
x = int(x)
if 0.0 <= y <= 1.0 and type(y) == float:
y = int(round(y * height))
else:
y = int(y)
return (x, y)
def setPreprocessFilter(preprocess):
global _g_preprocess
_g_preprocess = preprocess
def iSetDefaultClickDryRun(dryRun):
"""
Set the default value for optional dryRun parameter for iClick*
functions.
"""
global _g_defaultClickDryRun
_g_defaultClickDryRun = dryRun
def iSetDefaultDelayedDrawing(delayedDrawing):
"""
Set the default for delaying drawing operations on captured
screenshots.
If delayedDrawing == False, drawing actions on screenshots (like
highlighting icon and clicked coordinates) takes place during the
function execution (like iClickIcon).
If delayedDrawing == True, the screenshot is saved without
highlighted areas, and <screenshot filename>.delayeddraw file
contains all draw commands that can be executed after the test
run. This may save a lot test execution time and CPU on the device
that runs eyenfinger.
The default is False.
"""
global _g_defaultDelayedDrawing
_g_defaultDelayedDrawing = delayedDrawing
def iSetDefaultIconMatch(match):
"""
Set the default icon matching value, ranging from 0 to 1. The
value will be used in iClickIcon and iVerifyIcon, if the optional
match parameter is omitted. Value 1.0 will use pixel-perfect
matching (the default), values below 1.0 will use fuzzy matching.
Fuzzy matching is EXPERIMENTAL.
"""
global _g_defaultIconMatch
_g_defaultIconMatch = match
def iSetDefaultIconColorMatch(colorMatch):
"""
Set the default color matching value, ranging from 0 to 1. When
using pixel-perfect matching this will allow given error in pixel
colors.
For instance, when comparing 24 bit RGB images, value 0.97 will
allow 256 - int(256 * .97) = 8 difference on each color channel.
"""
global _g_defaultIconColorMatch
_g_defaultIconColorMatch = colorMatch
def iSetDefaultIconOpacityLimit(opacityLimit):
"""
Set the default minimum opacity for pixels to be matched. Defaults
to 0.0, all pixels are matched independently of their opacity.
"""
global _g_defaultIconOpacityLimit
_g_defaultIconOpacityLimit = opacityLimit
def iSetDefaultInputKeyDevice(deviceName):
"""
Use deviceName as a default input device for iInputKey.
iSetDefaultInputKeyDevice("/dev/input/event0")
iInputKey(["enter"])
"""
global _g_defaultInputKeyDevice
_g_defaultInputKeyDevice = deviceName
def iSetDefaultReadWithOCR(ocr):
"""
Set the default for using OCR when reading images or windows.
"""
global _g_defaultReadWithOCR
_g_defaultReadWithOCR = ocr
def screenSize():
"""
Returns the size of the screen as a pair (width, height).
"""
if _g_screenSize == (0, 0):
_getScreenSize()
return _g_screenSize
def windowSize():
"""
Returns the size of the window as a pair (width, height).
Choose a window first, for instance with iRead() or iUseWindow().
"""
if _g_lastWindow == None:
raise BadWindowName("undefined window")
return _g_windowSizes[_g_lastWindow]
def windowXY():
"""
Returns screen coordinates of the top-left corner of the window as
a pair (x, y).
Choose a window first, for instance with iRead() or iUseWindow().
"""
if _g_lastWindow == None:
raise BadWindowName("undefined window")
return _g_windowOffsets[_g_lastWindow]
def imageSize(imageFilename):
"""
Returns image size as pair (width, height).
"""
struct_bbox = Bbox(0,0,0,0,0)
err = eye4graphics.imageDimensions(ctypes.byref(struct_bbox),
imageFilename)
if err != 0:
return None, None
return struct_bbox.right, struct_bbox.bottom
def iRead(windowId = None, source = None, preprocess = None, ocr=None, capture=None, ocrArea=(0, 0, 1.0, 1.0), ocrPageSegModes=(3,)):
"""
DEPRECATED - use fmbtx11.Screen.refreshScreenshot instead.
Read the contents of the given window or other source. If neither
of windowId or source is given, reads the contents of active
window. iClickWord and iVerifyWord can be used after reading with
OCR.
Parameters:
windowId id (0x....) or the title of the window to be read.
Defaults to None.
source name of the file to be read, for instance a screen
capture. Defaults to None.
preprocess preprocess specification to override the default
that is set using setPreprocessFilter. Defaults
to None. Set to "" to disable preprocessing before
OCR.
ocr words will be read using OCR if True
(the default). Read object can be used with
iClickIcon and iVerifyIcon without OCR, too.
capture save image with read words highlighted to this
file. Default: None (nothing is saved).
ocrArea (top, left, right, bottom) coordinates -
area of the image to be read with OCR.
ocrPageSegModes
tuple of integers, see tesseract -pagesegmodes
Returns list of words detected by OCR from the read object.
"""
global _g_hocr
global _g_lastWindow
global _g_words
global _g_readImage
global _g_origImage
_g_words = None
_g_readImage = None
_g_origImage = None
if ocr == None:
ocr = _g_defaultReadWithOCR
if not source:
iUseWindow(windowId)
# take a screenshot
_runcmd("xwd -root -screen -out %s.xwd && convert %s.xwd -crop %sx%s+%s+%s +repage '%s'" %
(SCREENSHOT_FILENAME, SCREENSHOT_FILENAME,
_g_windowSizes[_g_lastWindow][0], _g_windowSizes[_g_lastWindow][1],
_g_windowOffsets[_g_lastWindow][0], _g_windowOffsets[_g_lastWindow][1],
SCREENSHOT_FILENAME))
source = SCREENSHOT_FILENAME
else:
iUseImageAsWindow(source)
_g_origImage = source
orig_width, orig_height = _g_windowSizes[_g_lastWindow][0], _g_windowSizes[_g_lastWindow][1]
x1, y1 = _coordsToInt(ocrArea[:2], (orig_width, orig_height))
x2, y2 = _coordsToInt(ocrArea[2:], (orig_width, orig_height))
if x2 <= x1 or y2 <= y1:
raise EyenfingerError("Invalid area size: %s => %s" % (ocrArea, (x1, y1, x2, y2)))
if orig_width <= 0 or orig_height <= 0:
raise EyenfingerError("Invalid image size: %sx%s" % (orig_width, orig_height))
if not ocr:
if capture:
drawWords(_g_origImage, capture, [], [])
return []
if preprocess == None:
preprocess = _g_preprocess
# convert to text
_g_readImage = _g_origImage + "-pp.png"
if ocrArea == (0, 0, 1.0, 1.0):
croparea = ""
wordXOffset = 0
wordYOffset = 0
else:
croparea = "-crop %sx%s+%s+%s +repage" % (x2-x1, y2-y1, x1, y1)
wordXOffset = x1
wordYOffset = y1
# rescale possible resize preprocessing parameter
resize_m = re.search('-resize ([0-9]+)x([0-9]*)', preprocess)
if resize_m:
origXResize = int(resize_m.group(1))
newXResize = int(origXResize/float(orig_width) * (x2-x1))
preprocess = (preprocess[:resize_m.start()] +
("-resize %sx" % (newXResize,)) +
preprocess[resize_m.end():])
_g_words = {}
for psm in ocrPageSegModes:
cmd = "convert %s %s %s %s && tesseract %s %s -l eng -psm %s hocr" % (
_g_origImage, croparea, preprocess, _g_readImage,
_g_readImage, SCREENSHOT_FILENAME, psm)
_, _g_hocr = _runcmd(cmd)
hocr_filename = SCREENSHOT_FILENAME + ".html"
if not os.access(hocr_filename, os.R_OK):
raise NoOCRResults("HOCR output missing. Tesseract OCR 3.02 or greater required.")
# store every word and its coordinates
_g_words.update(_hocr2words(file(hocr_filename).read()))
# convert word coordinates to the unscaled pixmap
try:
ocr_page_line = [line for line in file(hocr_filename).readlines() if "class='ocr_page'" in line][0]
except IndexError:
raise NoOCRResults("Could not read ocr_page class information from %s" % (hocr_filename,))
scaled_width, scaled_height = re.findall('bbox 0 0 ([0-9]+)\s*([0-9]+)', ocr_page_line)[0]
scaled_width, scaled_height = float(scaled_width) / (float(x2-x1)/orig_width), float(scaled_height) / (float(y2-y1)/orig_height)
for word in sorted(_g_words.keys()):
for appearance, (wordid, middle, bbox) in enumerate(_g_words[word]):
_g_words[word][appearance] = \
(wordid,
(int(middle[0]/scaled_width * orig_width) + wordXOffset,
int(middle[1]/scaled_height * orig_height) + wordYOffset),
(int(bbox[0]/scaled_width * orig_width) + wordXOffset,
int(bbox[1]/scaled_height * orig_height) + wordYOffset,
int(bbox[2]/scaled_width * orig_width) + wordXOffset,
int(bbox[3]/scaled_height * orig_height) + wordYOffset))
_log('found "' + word + '": (' + str(bbox[0]) + ', ' + str(bbox[1]) + ')')
if capture:
drawWords(_g_origImage, capture, _g_words, _g_words)
return sorted(_g_words.keys())
def iVerifyWord(word, match=0.33, appearance=1, capture=None):
"""
DEPRECATED - use fmbtx11.Screen.verifyOcrText instead.
Verify that word can be found from previously iRead() image.
Parameters:
word word that should be checked
appearance if word appears many times, appearance to
be clicked. Defaults to the first one.
match minimum matching score
capture save image with verified word highlighted
to this file. Default: None (nothing is saved).
Returns pair: ((score, matchingWord), (left, top, right, bottom)), where
score score of found match (1.0 for perfect match)
matchingWord corresponding word detected by OCR
(left, top, right, bottom)
bounding box of the word in read image
Throws BadMatch error if word is not found.
Throws NoOCRResults error if there are OCR results available
on the current screen.
"""
if _g_words == None:
raise NoOCRResults('iRead has not been called with ocr=True')
score, matching_word = findWord(word)
if capture:
drawWords(_g_origImage, capture, [word], _g_words)
if score < match:
raise BadMatch('No matching word for "%s". The best candidate "%s" with score %.2f, required %.2f' %
(word, matching_word, score, match))
return ((score, matching_word), _g_words[matching_word][appearance-1][2])
def iVerifyText(text, match=0.33, capture=None):
"""
DEPRECATED - use fmbtx11.Screen.verifyOcrText instead.
Verify that text can be found from previously iRead() image.
Parameters:
text multiple words that should be checked
match minimum matching score
capture save image with verified text highlighted
to this file. Default: None (nothing is saved).
Returns pair:
((score, matchingText), (left, top, right, bottom)), where
score score of found match (1.0 for perfect match)
matchingText corresponding text detected by OCR
(left, top, right, bottom)
bounding box of the text in read image
Throws BadMatch error if text is not found.
Throws NoOCRResults error if there are OCR results available
on the current screen.
"""
if _g_words == None:
raise NoOCRResults('iRead has not been called with ocr=True')
score_text_bbox_list = findText(text, match)
if len(score_text_bbox_list) == 0:
raise BadMatch('No match >= %s for text "%s"' % (score, text))
score, text, bbox = score_text_box_list[0]
if capture:
drawBbox(_g_origImage, capture, bbox, "%.2f %s" % (score, text))
return ((score, matching_text), bbox)
def iVerifyIcon(iconFilename, match=None, colorMatch=None, opacityLimit=None, capture=None, area=(0.0, 0.0, 1.0, 1.0), _origin="iVerifyIcon"):
"""
DEPRECATED - use fmbtx11.Screen.verifyBitmap instead.
Verify that icon can be found from previously iRead() image.
Parameters:
iconFilename name of the icon file to be searched for
match minimum matching score between 0 and 1.0,
1.0 is perfect match (default)
colorMatch 1.0 (default) requires exact color match. Value
below 1.0 defines maximum allowed color
difference. See iSetDefaultIconColorMatch.
opacityLimit 0.0 (default) requires exact color values
independently of opacity. If lower than 1.0,
pixel less opaque than given value are skipped
in pixel perfect comparisons.
capture save image with verified icon highlighted
to this file. Default: None (nothing is saved).
area rectangle (left, top, right, bottom). Search
icon inside this rectangle only. Values can be
absolute coordinates, or floats in range [0.0,
1.0] that will be scaled to image dimensions.
The default is (0.0, 0.0, 1.0, 1.0), that is
full rectangle.
Returns pair: (score, (left, top, right, bottom)), where
score score of found match (1.0 for perfect match)
(left, top, right, bottom)
bounding box of found icon
Throws BadMatch error if icon is not found.
"""
if not eye4graphics:
_log('ERROR: %s("%s") called, but eye4graphics not loaded.' % (_origin, iconFilename))
raise EyenfingerError("eye4graphics not available")
if not _g_origImage:
_log('ERROR %s("%s") called, but source not defined (iRead not called).' % (_origin, iconFilename))
raise BadSourceImage("Source image not defined, cannot search for an icon.")
if not (os.path.isfile(iconFilename) and os.access(iconFilename, os.R_OK)):
_log('ERROR %s("%s") called, but the icon file is not readable.' % (_origin, iconFilename))
raise BadIconImage('Icon "%s" is not readable.' % (iconFilename,))
if match == None:
match = _g_defaultIconMatch
if match > 1.0:
_log('ERROR %s("%s"): invalid match value, must be below 1.0. ' % (_origin, iconFilename,))
raise ValueError("invalid match value: %s, should be 0 <= match <= 1.0" % (match,))
if colorMatch == None:
colorMatch = _g_defaultIconColorMatch
if not 0.0 <= colorMatch <= 1.0:
_log('ERROR %s("%s"): invalid colorMatch value, must be between 0 and 1. ' % (_origin, iconFilename,))
raise ValueError("invalid colorMatch value: %s, should be 0 <= colorMatch <= 1.0" % (colorMatch,))
if opacityLimit == None:
opacityLimit = _g_defaultIconOpacityLimit
if not 0.0 <= opacityLimit <= 1.0:
_log('ERROR %s("%s"): invalid opacityLimit value, must be between 0 and 1. ' % (_origin, iconFilename,))
raise ValueError("invalid opacityLimit value: %s, should be 0 <= opacityLimit <= 1.0" % (opacityLimit,))
if area[0] > area[2] or area[1] >= area[3]:
raise ValueError("invalid area: %s, should be rectangle (left, top, right, bottom)" % (area,))
leftTopRightBottomZero = (_coordsToInt((area[0], area[1]), windowSize()) +
_coordsToInt((area[2], area[3]), windowSize()) +
(0,))
struct_area_bbox = Bbox(*leftTopRightBottomZero)
struct_bbox = Bbox(0,0,0,0,0)
threshold = int((1.0-match)*20)
err = eye4graphics.findSingleIcon(ctypes.byref(struct_bbox),
_g_origImage, iconFilename, threshold,
ctypes.c_double(colorMatch),
ctypes.c_double(opacityLimit),
ctypes.byref(struct_area_bbox))
bbox = (int(struct_bbox.left), int(struct_bbox.top),
int(struct_bbox.right), int(struct_bbox.bottom))
if err == -1 or err == -2:
msg = '%s: "%s" not found, match=%.2f, threshold=%s, closest threshold %s.' % (
_origin, iconFilename, match, threshold, int(struct_bbox.error))
if capture:
drawIcon(_g_origImage, capture, iconFilename, bbox, 'red')
_log(msg)
raise BadMatch(msg)
elif err != 0:
_log("%s: findSingleIcon returned %s" % (_origin, err,))
raise BadMatch("%s not found, findSingleIcon returned %s." % (iconFilename, err))
if threshold > 0:
score = (threshold - int(struct_bbox.error)) / float(threshold)
else:
score = 1.0
if capture:
drawIcon(_g_origImage, capture, iconFilename, bbox, area=leftTopRightBottomZero[:4])
return (score, bbox)
def iClickIcon(iconFilename, clickPos=(0.5,0.5), match=None,
colorMatch=None, opacityLimit=None,
mouseButton=1, mouseEvent=MOUSEEVENT_CLICK, dryRun=None, capture=None):
"""
DEPRECATED - use fmbtx11.Screen.tapBitmap instead.
Click coordinates relative to the given icon in previously iRead() image.
Parameters:
iconFilename read icon from this file
clickPos position to be clicked,
relative to word top-left corner of the bounding
box around the word. X and Y units are relative
to width and height of the box. (0,0) is the
top-left corner, (1,1) is bottom-right corner,
(0.5, 0.5) is the middle point (default).
Values below 0 or greater than 1 click outside
the bounding box.
match 1.0 (default) requires exact match. Value below 1.0
defines minimum required score for fuzzy matching
(EXPERIMENTAL). See iSetDefaultIconMatch.
colorMatch 1.0 (default) requires exact color match. Value
below 1.0 defines maximum allowed color
difference. See iSetDefaultIconColorMatch.
opacityLimit 0.0 (default) requires exact color values
independently of opacity. If lower than 1.0,
pixel less opaque than given value are skipped
in pixel perfect comparisons.
mouseButton mouse button to be synthesized on the event, default is 1.
mouseEvent event to be synthesized, the default is MOUSEEVENT_CLICK,
others: MOUSEEVENT_MOVE, MOUSEEVENT_DOWN, MOUSEEVENT_UP.
dryRun if True, does not synthesize events. Still returns
coordinates of the clicked position and illustrates
the clicked position on the capture image if
given.
capture name of file where image of highlighted icon and
clicked point are saved.
Returns pair (score, (clickedX, clickedY)), where
score score of found match (1.0 for perfect match)
(clickedX, clickedY)
X and Y coordinates of clicked position on the
screen.
Throws BadMatch error if could not find a matching word.
"""
_DEPRECATED()
score, bbox = iVerifyIcon(iconFilename, match=match,
colorMatch=colorMatch, opacityLimit=opacityLimit,
capture=capture, _origin="iClickIcon")
clickedXY = iClickBox(bbox, clickPos, mouseButton, mouseEvent, dryRun,
capture, _captureText = iconFilename)
return (score, clickedXY)
def iClickWord(word, appearance=1, clickPos=(0.5,0.5), match=0.33,
mouseButton=1, mouseEvent=1, dryRun=None, capture=None):
"""
DEPRECATED - use fmbtx11.Screen.tapOcrText instead.
Click coordinates relative to the given word in previously iRead() image.
Parameters:
word word that should be clicked
appearance if word appears many times, appearance to
be clicked. Defaults to the first one.
clickPos position to be clicked,
relative to word top-left corner of the bounding
box around the word. X and Y units are relative
to width and height of the box. (0,0) is the
top-left corner, (1,1) is bottom-right corner,
(0.5, 0.5) is the middle point (default).
Values below 0 or greater than 1 click outside
the bounding box.
capture name of file where image of highlighted word and
clicked point are saved.
Returns pair: ((score, matchingWord), (clickedX, clickedY)), where
score score of found match (1.0 for perfect match)
matchingWord corresponding word detected by OCR
(clickedX, clickedY)
X and Y coordinates of clicked position on the
screen.
Throws BadMatch error if could not find a matching word.
Throws NoOCRResults error if there are OCR results available
on the current screen.
"""
_DEPRECATED()
(score, matching_word), bbox = iVerifyWord(word, appearance=appearance, match=match, capture=False)
clickedX, clickedY = iClickBox(bbox, clickPos, mouseButton, mouseEvent, dryRun, capture=False)
windowId = _g_lastWindow
_log('iClickWord("%s"): word "%s", match %.2f, bbox %s, window offset %s, click %s' %
(word, matching_word, score,
bbox, _g_windowOffsets[windowId],
(clickedX, clickedY)))
if capture:
drawWords(_g_origImage, capture, [word], _g_words)
drawClickedPoint(capture, capture, (clickedX, clickedY))
return ((score, matching_word), (clickedX, clickedY))
def iClickBox((left, top, right, bottom), clickPos=(0.5, 0.5),
mouseButton=1, mouseEvent=1, dryRun=None,
capture=None, _captureText=None):
"""
DEPRECATED - use fmbtx11.Screen.tapItem instead.
Click coordinates relative to the given bounding box, default is
in the middle of the box.
Parameters:
(left, top, right, bottom)
coordinates of the box inside the window.
(0, 0) is the top-left corner of the window.
clickPos (offsetX, offsetY) position to be clicked,
relative to the given box. (0, 0) is the
top-left, and (1.0, 1.0) is the lower-right
corner of the box. The default is (0.5, 0.5),
that is, the middle point of the box. Values
smaller than 0 and bigger than 1 are allowed,
too.
mouseButton mouse button to be synthesized on the event, default is 1.
mouseEvent event to be synthesized, the default is MOUSEEVENT_CLICK,
others: MOUSEEVENT_MOVE, MOUSEEVENT_DOWN, MOUSEEVENT_UP.
dryRun if True, does not synthesize events. Still returns
coordinates of the clicked position and illustrates
the clicked position on the capture image if
given.
capture name of file where the last screenshot with
clicked point highlighted is saved. The default
is None (nothing is saved).
Returns pair (clickedX, clickedY)
X and Y coordinates of clicked position on the
screen.
"""
clickWinX = int(left + clickPos[0]*(right-left))
clickWinY = int(top + clickPos[1]*(bottom-top))
(clickedX, clickedY) = iClickWindow((clickWinX, clickWinY),
mouseButton, mouseEvent,
dryRun, capture=False)
if capture:
if _captureText == None:
_captureText = "Box: %s, %s, %s, %s" % (left, top, right, bottom)
drawIcon(_g_origImage, capture, _captureText, (left, top, right, bottom))
drawClickedPoint(capture, capture, (clickedX, clickedY))
return (clickedX, clickedY)
def iClickWindow((clickX, clickY), mouseButton=1, mouseEvent=1, dryRun=None, capture=None):
"""
DEPRECATED - use fmbtx11.Screen.tap instead.
Click given coordinates in the window.
Parameters:
(clickX, clickY)
coordinates to be clicked inside the window.
(0, 0) is the top-left corner of the window.
Integer values are window coordinates. Floating
point values from 0.0 to 1.0 are scaled to window
coordinates: (0.5, 0.5) is the middle of the
window, and (1.0, 1.0) the bottom-right corner of
the window.
mouseButton mouse button to be synthesized on the event, default is 1.
mouseEvent event to be synthesized, the default is MOUSEEVENT_CLICK,
others: MOUSEEVENT_MOVE, MOUSEEVENT_DOWN, MOUSEEVENT_UP.
dryRun if True, does not synthesize events. Still
illustrates the clicked position on the capture
image if given.
capture name of file where the last screenshot with
clicked point highlighted is saved. The default
is None (nothing is saved).
Returns pair (clickedX, clickedY)
X and Y coordinates of clicked position on the
screen.
"""
# Get the size of the window
wndSize = windowSize()
(clickX, clickY) = _coordsToInt((clickX, clickY), wndSize)
# Get the position of the window
wndPos = windowXY()
# If coordinates are given as percentages, convert to window coordinates
clickScrX = clickX + wndPos[0]
clickScrY = clickY + wndPos[1]
iClickScreen((clickScrX, clickScrY), mouseButton, mouseEvent, dryRun, capture)
return (clickScrX, clickScrY)
def iClickScreen((clickX, clickY), mouseButton=1, mouseEvent=1, dryRun=None, capture=None):
"""
DEPRECATED - use fmbtx11.Screen.tap instead.
Click given absolute coordinates on the screen.
Parameters:
(clickX, clickY)
coordinates to be clicked on the screen. (0, 0)
is the top-left corner of the screen. Integer
values are screen coordinates. Floating point
values from 0.0 to 1.0 are scaled to screen
coordinates: (0.5, 0.5) is the middle of the
screen, and (1.0, 1.0) the bottom-right corner of
the screen.
mouseButton mouse button to be synthesized on the event, default is 1.
mouseEvent event to be synthesized, the default is MOUSEEVENT_CLICK,
others: MOUSEEVENT_MOVE, MOUSEEVENT_DOWN, MOUSEEVENT_UP.
dryRun if True, does not synthesize events. Still
illustrates the clicked position on the capture
image if given.
capture name of file where the last screenshot with
clicked point highlighted is saved. The default
is None (nothing is saved).
"""
_DEPRECATED()
if mouseEvent == MOUSEEVENT_CLICK:
params = "'mouseclick %s'" % (mouseButton,)
elif mouseEvent == MOUSEEVENT_DOWN:
params = "'mousedown %s'" % (mouseButton,)
elif mouseEvent == MOUSEEVENT_UP:
params = "'mouseup %s'" % (mouseButton,)
else:
params = ""
clickX, clickY = _coordsToInt((clickX, clickY))
if capture:
drawClickedPoint(_g_origImage, capture, (clickX, clickY))
if dryRun == None:
dryRun = _g_defaultClickDryRun
if not dryRun:
# use xte from the xautomation package
_runcmd("xte 'mousemove %s %s' %s" % (clickX, clickY, params))
def iGestureScreen(listOfCoordinates, duration=0.5, holdBeforeGesture=0.0, holdAfterGesture=0.0, intermediatePoints=0, capture=None, dryRun=None):
"""
DEPRECATED - use fmbtx11.Screen.drag instead.
Synthesizes a gesture on the screen.
Parameters:
listOfCoordinates
The coordinates through which the cursor moves.
Integer values are screen coordinates. Floating
point values from 0.0 to 1.0 are scaled to screen
coordinates: (0.5, 0.5) is the middle of the
screen, and (1.0, 1.0) the bottom-right corner of
the screen.
duration gesture time in seconds, excluding
holdBeforeGesture and holdAfterGesture times.
holdBeforeGesture
time in seconds to keep mouse down before the
gesture.
holdAfterGesture
time in seconds to keep mouse down after the
gesture.
intermediatePoints
the number of intermediate points to be added
between each of the coordinates. Intermediate
points are added to straight lines between start
and end points.
capture name of file where the last screenshot with
the points through which the cursors passes is
saved. The default is None (nothing is saved).
dryRun if True, does not synthesize events. Still
illustrates the coordinates through which the cursor
goes.
"""
_DEPRECATED()
# The params list to be fed to xte
params = []
# The list of coordinates through which the cursor has to go
goThroughCoordinates = []
for pos in xrange(len(listOfCoordinates)):
x, y = _coordsToInt(listOfCoordinates[pos])
goThroughCoordinates.append((x,y))
if pos == len(listOfCoordinates) - 1:
break # last coordinate added
nextX, nextY = _coordsToInt(listOfCoordinates[pos+1])
(x,y), (nextX, nextY) = (x, y), (nextX, nextY)
for ip in range(intermediatePoints):
goThroughCoordinates.append(
(int(round(x + (nextX-x)*(ip+1)/float(intermediatePoints+1))),
int(round(y + (nextY-y)*(ip+1)/float(intermediatePoints+1)))))
# Calculate the time (in micro seconds) to sleep between moves.
if len(goThroughCoordinates) > 1:
moveDelay = 1000000 * float(duration) / (len(goThroughCoordinates)-1)
else:
moveDelay = 0
if not dryRun:
# Build the params list.
params.append("'mousemove %d %d'" % goThroughCoordinates[0])
params.append("'mousedown 1 '")
if holdBeforeGesture > 0:
params.append("'usleep %d'" % (holdBeforeGesture * 1000000,))
for i in xrange(1, len(goThroughCoordinates)):
params.append("'usleep %d'" % (moveDelay,))
params.append("'mousemove %d %d'" % goThroughCoordinates[i])
if holdAfterGesture > 0:
params.append("'usleep %d'" % (holdAfterGesture * 1000000,))
params.append("'mouseup 1'")
# Perform the gesture
_runcmd("xte %s" % (" ".join(params),))
if capture:
intCoordinates = [ _coordsToInt(point) for point in listOfCoordinates ]
drawLines(_g_origImage, capture, intCoordinates, goThroughCoordinates)
return goThroughCoordinates
def iGestureWindow(listOfCoordinates, duration=0.5, holdBeforeGesture=0.0, holdAfterGesture=0.0, intermediatePoints=0, capture=None, dryRun=None):
"""
DEPRECATED - use fmbtx11.Screen.drag instead.
Synthesizes a gesture on the window.
Parameters:
listOfCoordinates
The coordinates through which the cursor moves.
Integer values are window coordinates. Floating
point values from 0.0 to 1.0 are scaled to window
coordinates: (0.5, 0.5) is the middle of the
window, and (1.0, 1.0) the bottom-right corner of
the window.
duration gesture time in seconds, excluding
holdBeforeGesture and holdAfterGesture times.
holdBeforeGesture
time in seconds to keep mouse down before the
gesture.
holdAfterGesture
time in seconds to keep mouse down after the
gesture.
intermediatePoints
the number of intermediate points to be added
between each of the coordinates. Intermediate
points are added to straight lines between start
and end points.
capture name of file where the last screenshot with
the points through which the cursors passes is
saved. The default is None (nothing is saved).
dryRun if True, does not synthesize events. Still
illustrates the coordinates through which the cursor
goes.
"""
screenCoordinates = [ _windowToScreen(*_coordsToInt((x,y),windowSize())) for (x,y) in listOfCoordinates ]
return iGestureScreen(screenCoordinates, duration, holdBeforeGesture, holdAfterGesture, intermediatePoints, capture, dryRun)
def iType(word, delay=0.0):
"""
DEPRECATED - use fmbtx11.Screen.type instead.
Send keypress events.
Parameters:
word is either
- a string containing letters and numbers.
Each letter/number is using press and release events.
- a list that contains
- keys: each key is sent using press and release events.
- (key, event)-pairs: the event (either "press" or "release")
is sent.
- (key1, key2, ..., keyn)-tuples. 2n events is sent:
key1 press, key2 press, ..., keyn press,
keyn release, ..., key2 release, key1 release.
Keys are defined in eyenfinger.Xkeys, for complete list
see keysymdef.h.
delay is given as seconds between sent events
Examples:
iType('hello')
iType([('Shift_L', 'press'), 'h', 'e', ('Shift_L', 'release'), 'l', 'l', 'o'])
iType([('Control_L', 'Alt_L', 'Delete')])
"""
_DEPRECATED()
args = []
for char in word:
if type(char) == tuple:
if char[1].lower() == 'press':
args.append("'keydown %s'" % (char[0],))
elif char[1].lower() == 'release':
args.append("'keyup %s'" % (char[0],))
else:
rest = []
for key in char:
args.append("'keydown %s'" % (key,))
rest.insert(0, "'keyup %s'" % (key,))
args = args + rest
else:
# char is keyname or single letter/number
args.append("'key %s'" % (char,))
usdelay = " 'usleep %s' " % (int(delay*1000000),)
_runcmd("xte %s" % (usdelay.join(args),))
def iInputKey(*args, **kwargs):
"""
DEPRECATED - use fmbtx11.Screen.pressKey instead.
Send keypresses using Linux evdev interface
(/dev/input/eventXX).
iInputKey(keySpec[, keySpec...], hold=<float>, delay=<float>, device=<str>)
Parameters:
keySpec is one of the following:
- a string of one-character-long key names:
"aesc" will send four keypresses: A, E, S and C.
- a list of key names:
["a", "esc"] will send two keypresses: A and ESC.
Key names are listed in eyenfinger.InputKeys.
- an integer:
116 will press the POWER key.
- "_" or "^":
only press or release event will be generated
for the next key, respectively.
If a key name inside keySpec is prefixed by "_"
or "^", only press or release event is generated
for that key.
hold time (in seconds) to hold the key before
releasing. The default is 0.1.
delay delay (in seconds) after key release. The default
is 0.1.
device name of the input device or input event file to
which all key presses are sent. The default can
be set with iSetDefaultInputKeyDevice(). For
instance, "/dev/input/event0" or a name of a
device in /proc/bus/input/devices.
"""
_DEPRECATED()
hold = kwargs.get("hold", 0.1)
delay = kwargs.get("delay", 0.1)
device = kwargs.get("device", _g_defaultInputKeyDevice)
inputKeySeq = []
press, release = 1, 1
for a in args:
if a == "_": press, release = 1, 0
elif a == "^": press, release = 0, 1
elif type(a) == str:
for char in a:
if char == "_": press, release = 1, 0
elif char == "^": press, release = 0, 1
else:
inputKeySeq.append((press, release, _inputKeyNameToCode(char.upper())))
press, release = 1, 1
elif type(a) in (tuple, list):
for keySpec in a:
if type(keySpec) == int:
inputKeySeq.append((press, release, keySpec))
press, release = 1, 1
else:
if keySpec.startswith("_"):
press, release = 1, 0
keySpec = keySpec[1:]
elif keySpec.startswith("^"):
press, release = 0, 1
keySpec = keySpec[1:]
if keySpec:
inputKeySeq.append((press, release, _inputKeyNameToCode(keySpec.upper())))
press, release = 1, 1
elif type(a) == int:
inputKeySeq.append((press, release, a))
press, release = 1, 1
else:
raise ValueError('Invalid keySpec "%s"' % (a,))
if inputKeySeq:
_writeInputKeySeq(_deviceFilename(device), inputKeySeq, hold=hold, delay=delay)
def _deviceFilename(deviceName):
if not _deviceFilename.deviceCache:
_deviceFilename.deviceCache = dict(_listInputDevices())
if not deviceName in _deviceFilename.deviceCache:
return deviceName
else:
return _deviceFilename.deviceCache[deviceName]
_deviceFilename.deviceCache = {}
def _listInputDevices():
nameAndFile = []
for l in file("/proc/bus/input/devices"):
if l.startswith("N: Name="):
nameAndFile.append([l.split('"')[1]])
elif l.startswith("H: Handlers=") and "event" in l:
try:
eventFilename = re.findall("(event[0-9]+)", l)[0]
nameAndFile[-1].append("/dev/input/%s" % (eventFilename,))
except:
_log('WARNING: Could not recognise event[0-9] filename from row "%s".' % (l.strip(),))
return nameAndFile
def _writeInputKeySeq(filename, keyCodeSeq, hold=0.1, delay=0.1):
if type(filename) != str or len(filename) == 0:
raise ValueError('Invalid input device "%s"' % (filename,))
fd = os.open(filename, os.O_WRONLY | os.O_NONBLOCK)
for press, release, keyCode in keyCodeSeq:
if press:
bytes = os.write(fd, struct.pack(_InputEventStructSpec,
int(time.time()), 0, _EV_KEY, keyCode, 1))
if bytes > 0:
bytes += os.write(fd, struct.pack(_InputEventStructSpec,
0, 0, 0, 0, 0))
time.sleep(hold)
if release:
bytes += os.write(fd, struct.pack(_InputEventStructSpec,
int(time.time()), 0, _EV_KEY, keyCode, 0))
if bytes > 0:
bytes += os.write(fd, struct.pack(_InputEventStructSpec,
0, 0, 0, 0, 0))
time.sleep(delay)
os.close(fd)
def findWord(word, detected_words = None, appearance=1):
"""
Returns pair (score, corresponding-detected-word)
"""
if detected_words == None:
detected_words = _g_words
if _g_words == None:
raise NoOCRResults()
scored_words = []
for w in detected_words:
scored_words.append((_score(w, word), w))
scored_words.sort()
if len(scored_words) == 0:
raise BadMatch("No words found.")
return scored_words[-1]
def findText(text, detected_words = None, match=-1):
def biggerBox(bbox_list):
left, top, right, bottom = bbox_list[0]
for l, t, r, b in bbox_list[1:]:
left = min(left, l)
top = min(top, t)
right = max(right, r)
bottom = max(bottom, b)
return (left, top, right, bottom)
words = text.split()
word_count = len(words)
detected_texts = [] # strings of <word_count> words
if detected_words == None:
detected_words = _g_words
if _g_words == None:
raise NoOCRResults()
# sort by numeric word id
words_by_id = []
for word in detected_words:
for wid, middle, bbox in detected_words[word]:
words_by_id.append(
(int(wid.split("_")[1]), word, bbox))
words_by_id.sort()
for i in xrange(len(words_by_id)-word_count+1):
detected_texts.append(
(" ".join([w[1] for w in words_by_id[i:i+word_count]]),
biggerBox([w[2] for w in words_by_id[i:i+word_count]])))
norm_text = " ".join(words) # normalize whitespace
scored_texts = []
for t in detected_texts:
scored_texts.append((_score(t[0], norm_text), t[0], t[1]))
scored_texts.sort()
return [st for st in scored_texts if st[0] >= match]
def _score(w1, w2):
closeMatch = {
'1l': 0.1,
'1I': 0.2,
'Il': 0.2
}
def levenshteinDistance(w1, w2):
m = [range(len(w1)+1)]
for j in xrange(len(w2)+1):
m.append([])
m[-1].append(j+1)
i, j = 0, 0
for j in xrange(1, len(w2)+1):
for i in xrange(1, len(w1)+1):
if w1[i-1] == w2[j-1]:
m[j].append(m[j-1][i-1])
else:
# This is not part of Levenshtein:
# if characters often look similar,
# don't add full edit distance (1.0),
# use the value in closeMatch instead.
chars = ''.join(sorted(w1[i-1] + w2[j-1]))
if chars in closeMatch:
m[j].append(m[j-1][i-1]+closeMatch[chars])
else:
# Standard Levenshtein continues...
m[j].append(min(
m[j-1][i] + 1, # delete
m[j][i-1] + 1, # insert
m[j-1][i-1] + 1 # substitute
))
return m[j][i]
return 1 - (levenshteinDistance(w1, w2) / float(max(len(w1),len(w2))))
def _hocr2words(hocr):
rv = {}
hocr = hocr.replace("<strong>","").replace("</strong>","").replace("<em>","").replace("</em>","")
hocr.replace("'", "'")
for name, code in htmlentitydefs.name2codepoint.iteritems():
if code < 128:
hocr = hocr.replace('&' + name + ';', chr(code))
ocr_word = re.compile('''<span class=['"]ocrx?_word["'] id=['"]([^']*)["'] title=['"]bbox ([0-9]+) ([0-9]+) ([0-9]+) ([0-9]+)["'][^>]*>([^<]*)</span>''')
for word_id, bbox_left, bbox_top, bbox_right, bbox_bottom, word in ocr_word.findall(hocr):
bbox_left, bbox_top, bbox_right, bbox_bottom = \
int(bbox_left), int(bbox_top), int(bbox_right), int(bbox_bottom)
if not word in rv:
rv[word] = []
middle_x = (bbox_right + bbox_left) / 2.0
middle_y = (bbox_top + bbox_bottom) / 2.0
rv[word].append((word_id, (middle_x, middle_y),
(bbox_left, bbox_top, bbox_right, bbox_bottom)))
return rv
def _getScreenSize():
global _g_screenSize
_, output = _runcmd("xwininfo -root | awk '/Width:/{w=$NF}/Height:/{h=$NF}END{print w\" \"h}'")
s_width, s_height = output.split(" ")
_g_screenSize = (int(s_width), int(s_height))
def iUseWindow(windowIdOrName = None):
global _g_lastWindow
if windowIdOrName == None:
if _g_lastWindow == None:
_g_lastWindow = iActiveWindow()
elif windowIdOrName.startswith("0x"):
_g_lastWindow = windowIdOrName
else:
_g_lastWindow = _runcmd("xwininfo -name '%s' | awk '/Window id: 0x/{print $4}'" %
(windowIdOrName,))[1].strip()
if not _g_lastWindow.startswith("0x"):
raise BadWindowName('Cannot find window id for "%s" (got: "%s")' %
(windowIdOrName, _g_lastWindow))
_, output = _runcmd("xwininfo -id %s | awk '/Width:/{w=$NF}/Height:/{h=$NF}/Absolute upper-left X/{x=$NF}/Absolute upper-left Y/{y=$NF}END{print x\" \"y\" \"w\" \"h}'" %
(_g_lastWindow,))
offset_x, offset_y, width, height = output.split(" ")
_g_windowOffsets[_g_lastWindow] = (int(offset_x), int(offset_y))
_g_windowSizes[_g_lastWindow] = (int(width), int(height))
_getScreenSize()
return _g_lastWindow
def iUseImageAsWindow(imageFilename):
global _g_lastWindow
global _g_screenSize
if not eye4graphics:
_log('ERROR: iUseImageAsWindow("%s") called, but eye4graphics not loaded.' % (imageFilename,))
raise EyenfingerError("eye4graphics not available")
if not os.access(imageFilename, os.R_OK):
raise BadSourceImage("The input file could not be read or not present.")
_g_lastWindow = imageFilename
imageWidth, imageHeight = imageSize(imageFilename)
if imageWidth == None:
_log('iUseImageAsWindow: Failed reading dimensions of image "%s".' % (imageFilename,))
raise BadSourceImage('Failed to read dimensions of "%s".' % (imageFilename,))
_g_windowOffsets[_g_lastWindow] = (0, 0)
_g_windowSizes[_g_lastWindow] = (imageWidth, imageHeight)
_g_screenSize = _g_windowSizes[_g_lastWindow]
return _g_lastWindow
def iActiveWindow(windowId = None):
""" return id of active window, in '0x1d0f14' format """
if windowId == None:
_, output = _runcmd("xprop -root | awk '/_NET_ACTIVE_WINDOW\(WINDOW\)/{print $NF}'")
windowId = output.strip()
return windowId
def drawBbox(inputfilename, outputfilename, bbox, caption):
"""
Draw bounding box
"""
if inputfilename == None:
return
draw_commands = ""
left, top, right, bottom = bbox
color = "green"
draw_commands += """ -stroke %s -fill blue -draw "fill-opacity 0.2 rectangle %s,%s %s,%s" """ % (
color, left, top, right, bottom)
draw_commands += """ -stroke none -fill %s -draw "text %s,%s '%s'" """ % (
color, left, top, _safeForShell(caption))
_runDrawCmd(inputfilename, draw_commands, outputfilename)
def drawWords(inputfilename, outputfilename, words, detected_words):
"""
Draw boxes around words detected in inputfilename that match to
given words. Result is saved to outputfilename.
"""
if inputfilename == None:
return
draw_commands = ""
for w in words:
score, dw = findWord(w, detected_words)
left, top, right, bottom = detected_words[dw][0][2]
if score < 0.33:
color = "red"
elif score < 0.5:
color = "brown"
else:
color = "green"
draw_commands += """ -stroke %s -fill blue -draw "fill-opacity 0.2 rectangle %s,%s %s,%s" """ % (
color, left, top, right, bottom)
draw_commands += """ -stroke none -fill %s -draw "text %s,%s '%s'" """ % (
color, left, top, _safeForShell(w))
draw_commands += """ -stroke none -fill %s -draw "text %s,%s '%.2f'" """ % (
color, left, bottom+10, score)
_runDrawCmd(inputfilename, draw_commands, outputfilename)
def drawIcon(inputfilename, outputfilename, iconFilename, bboxes, color='green', area=None):
if inputfilename == None:
return
if type(bboxes) == tuple:
bboxes = [bboxes]
show_number = False
else:
show_number = True
draw_commands = ""
for index, bbox in enumerate(bboxes):
left, top, right, bottom = bbox[0], bbox[1], bbox[2], bbox[3]
draw_commands += """ -stroke %s -fill blue -draw "fill-opacity 0.2 rectangle %s,%s %s,%s" """ % (color, left, top, right, bottom)
if show_number:
caption = "%s %s" % (index+1, iconFilename)
else:
caption = iconFilename
draw_commands += """ -stroke none -fill %s -draw "text %s,%s '%s'" """ % (
color, left, top, caption)
if area != None:
draw_commands += """ -stroke yellow -draw "fill-opacity 0.0 rectangle %s,%s %s,%s" """ % (area[0]-1, area[1]-1, area[2], area[3])
_runDrawCmd(inputfilename, draw_commands, outputfilename)
def drawClickedPoint(inputfilename, outputfilename, clickedXY):
"""
clickedXY contains absolute screen coordinates
"""
if inputfilename == None:
return
x, y = clickedXY
x -= _g_windowOffsets[_g_lastWindow][0]
y -= _g_windowOffsets[_g_lastWindow][1]
draw_commands = """ -stroke red -fill blue -draw "fill-opacity 0.2 circle %s,%s %s,%s" """ % (
x, y, x + 20, y)
draw_commands += """ -stroke none -fill red -draw "point %s,%s" """ % (x, y)
_runDrawCmd(inputfilename, draw_commands, outputfilename)
def _screenToWindow(x,y):
"""
Converts from absolute coordinats to window coordinates
"""
offsetX = _g_windowOffsets[_g_lastWindow][0]
offsetY = _g_windowOffsets[_g_lastWindow][1]
return (x-offsetX, y-offsetY)
def _windowToScreen(x,y):
"""
Converts from window coordinates to screen coordinates
"""
offsetX = _g_windowOffsets[_g_lastWindow][0]
offsetY = _g_windowOffsets[_g_lastWindow][1]
return (x+offsetX, y+offsetY)
def drawLines(inputfilename, outputfilename, orig_coordinates, final_coordinates):
"""
coordinates contains the coordinates connected by lines
"""
if inputfilename == None:
return
# The command which will be run
drawCommand = ''
for pos in xrange(len(final_coordinates)-1):
# Get the pair coordinates
(x, y) = (final_coordinates[pos][0], final_coordinates[pos][1])
(nextX, nextY) = (final_coordinates[pos+1][0], final_coordinates[pos+1][1])
# Convert to window coordinates
(drawX, drawY) = _screenToWindow(x,y)
(drawnextX, drawnextY) = _screenToWindow(nextX, nextY)
# Draw a pair of circles. User-given points are blue
if (x, y) in orig_coordinates:
drawCommand += "-fill blue -stroke red -draw 'fill-opacity 0.2 circle %d, %d %d, %d' " % (drawX, drawY, drawX-5, drawY-5)
# Computer-generated points are white
else:
drawCommand += "-fill white -stroke red -draw 'fill-opacity 0.2 circle %d, %d %d, %d' " % (drawX, drawY, drawX-5, drawY-5)
# Draw the line between the points
drawCommand += "-stroke black -draw 'line %d, %d, %d, %d' " % (drawX, drawY, drawnextX, drawnextY)
if len(final_coordinates) > 0:
lastIndex = len(final_coordinates)-1
(finalX, finalY) = _screenToWindow(final_coordinates[lastIndex][0], final_coordinates[lastIndex][1])
drawCommand += "-fill blue -stroke red -draw 'fill-opacity 0.2 circle %d, %d %d, %d' " % (finalX, finalY, finalX-5, finalY-5)
_runDrawCmd(inputfilename, drawCommand, outputfilename)
def evaluatePreprocessFilter(imageFilename, ppfilter, words):
"""
Visualise how given words are detected from given image file when
using given preprocessing filter.
"""
global _g_preprocess
evaluatePreprocessFilter.count += 1
preprocessed_filename = '%s-pre%s.png' % (imageFilename, evaluatePreprocessFilter.count)
_runcmd("convert '%s' %s '%s' && tesseract %s eyenfinger.autoconfigure hocr" %
(imageFilename, ppfilter, preprocessed_filename,
preprocessed_filename))
detected_words = _hocr2words(file("eyenfinger.autoconfigure.html").read())
scored_words = []
for w in words:
try:
score, word = findWord(w, detected_words)
except BadMatch:
return
scored_words.append((score, word, w))
scored_words.sort()
avg_score = sum([s[0] for s in scored_words])/float(len(scored_words))
evaluatePreprocessFilter.scores.append( (scored_words[0][0] + avg_score, scored_words[0][0], avg_score, ppfilter) )
evaluatePreprocessFilter.scores.sort()
# set the best preprocess filter so far as a default
_g_preprocess = evaluatePreprocessFilter.scores[-1][-1]
drawWords(preprocessed_filename, preprocessed_filename, words, detected_words)
sys.stdout.write("%.2f %s %s %s\n" % (sum([s[0] for s in scored_words])/float(len(scored_words)), scored_words[0], preprocessed_filename, ppfilter))
sys.stdout.flush()
evaluatePreprocessFilter.count = 0
evaluatePreprocessFilter.scores = []
def autoconfigure(imageFilename, words):
"""
Search for image preprocessing configuration that will maximise
the score of finding given words in the image.
Returns configuration as a string.
"""
# check image width
iUseImageAsWindow(imageFilename)
image_width = _g_windowSizes[_g_lastWindow][0]
resize_filters = ['Mitchell', 'Catrom', 'Hermite', 'Gaussian']
levels = [(20, 20), (50, 50), (80, 80), (5, 5), (95, 95),
(30, 30), (40, 40), (60, 60), (70, 70), (60, 60),
(10, 30), (30, 50), (50, 70), (70, 90), (80, 100)]
zoom = [1, 2]
for f in resize_filters:
for z in zoom:
for blevel, wlevel in levels:
evaluatePreprocessFilter(
imageFilename,
"-sharpen 5 -level %s%%,%s%%,3.0 -sharpen 5" % (blevel, wlevel),
words)
evaluatePreprocessFilter(
imageFilename,
"-sharpen 5 -filter %s -resize %sx -sharpen 5 -level %s%%,%s%%,3.0 -sharpen 5" % (f, z * image_width, blevel, wlevel),
words)
evaluatePreprocessFilter(
imageFilename,
"-sharpen 5 -filter %s -resize %sx -level %s%%,%s%%,3.0 -sharpen 5" % (
f, z * image_width, blevel, wlevel),
words)
evaluatePreprocessFilter(
imageFilename,
"-sharpen 5 -filter %s -resize %sx -level %s%%,%s%%,3.0" % (
f, z * image_width, blevel, wlevel),
words)
evaluatePreprocessFilter(
imageFilename,
"-sharpen 5 -level %s%%,%s%%,3.0 -filter %s -resize %sx -sharpen 5" % (
blevel, wlevel, f, z * image_width),
words)
evaluatePreprocessFilter(
imageFilename,
"-sharpen 5 -level %s%%,%s%%,1.0 -filter %s -resize %sx" % (
blevel, wlevel, f, z * image_width),
words)
evaluatePreprocessFilter(
imageFilename,
"-sharpen 5 -level %s%%,%s%%,10.0 -filter %s -resize %sx" % (
blevel, wlevel, f, z * image_width),
words)
| madhavpai2000/fMBT | utils/eyenfinger.py | Python | lgpl-2.1 | 68,009 | [
"Gaussian"
] | 5e9337bc90eb30b2d0a0a2685b46687c4a3d0187ee5906dd016c5bfa5448de37 |
#!/usr/bin/env python
# Copyright (c) 2012, Vasilis Pappas <vpappas@cs.columbia.edu>
# This file is part of Orp http://nsl.cs.columbia.edu/projects/orp
import pydasm
import pickle
import inp
import util
class SimpleGadget(object):
def __init__(self, start, end, overlap, red, ins_num, func_ea):
self.start = start
self.end = end # real end!
self.overlap = overlap
self.red = red
self.ins_num = ins_num
self.func_ea = func_ea
def set_extra(self, addrs, string, end_func_ea):
self.addrs = addrs
self.string = string
self.end_func_ea = end_func_ea
def __str__(self):
output_header = "Gadget@[%.08X:%.08X] (Size: %dB) \nSegment@[%.08X:%.08X] (Size: %dB)%s\n" % (
self.start, self.end, self.end - self.start,
inp.seg_start(self.start), inp.seg_end(self.start), inp.seg_end(self.end) - inp.seg_start(self.end),
"(overlapping)" if self.overlap else "")
output_lines = []
for addr, string in zip(self.addrs, self.string.split('; ')):
output_lines.append("\t%.08X %s" % (addr, string))
return (output_header + '\n'.join(output_lines))
def getGadgetInfo(self):
return (self.start, self.end, self.overlap, self.red, self.ins_num, self.addrs, self.string)
class Gadget():
def __init__(self, start_ea, end_ea, instrs):
self._start_ea = start_ea
self._end_ea = end_ea # addr of the first byte of the final instruction
self._instrs = instrs
self.overlap = not all((a in inp.get_code_heads() for a, i in instrs))
def get_start_ea(self):
return self._start_ea
def get_end_ea(self):
return self._end_ea
def get_real_end_ea(self):
return self._end_ea + self._instrs[-1][1].length
def dump_simple(self, extra=False):
func_ea = inp.get_func_of(self._start_ea)
red = (not func_ea and not inp.get_func_of(self.get_real_end_ea() - 1))
sg = SimpleGadget(self._start_ea, self.get_real_end_ea(), self.overlap,
red, len(self._instrs), func_ea)
if extra:
sg.set_extra([a for a, i in self._instrs], '; '.join([
pydasm.get_instruction_string(i,
pydasm.FORMAT_INTEL,
0)
for a, i in self._instrs]),
inp.get_func_of(self.get_real_end_ea() - 1))
return sg
def __eq__(self, other):
return ((self.get_start_ea() == other.get_start_ea()) and
(self.get_end_ea() == other.get_end_ea()))
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash("%x-%x" % (self.get_start_ea(), self.get_end_ea()))
def __cmp__(self, other):
if self.get_end_ea() == other.get_end_ea():
return self.get_start_ea() - other.get_start_ea()
else:
return self.get_end_ea() - other.get_end_ea()
def __str__(self):
output_header = "Gadget@[%.08X:%.08X] (Size: %dB)" % (
self._start_ea, self._end_ea, self._end_ea - self._start_ea)
return output_header
gadget_ends = [
'C3', # ret
# 'CB', # ret far - ignore for now
'C2', # ret imm16
# 'CA', # ret imm16 far - ignore for now
'FF', # indirect jmp/call - not always (!) ..could be inc/dec r/m16/32
]
# based on http://hexblog.com/2009/09/assembling_and_finding_instruc.html
def find_gadget_ends(start_ea, end_ea):
gadget_end_addresses = []
for opcode_byte in gadget_ends:
ea = start_ea
while True:
ea = inp.code_search(ea, opcode_byte)
if ea > end_ea or ea == None:
break
if inp.byte_at(ea) != 0xFF:
gadget_end_addresses.append(ea)
else:
# An opcode starting with 0xFF is not necessarily an indirect jmp/call
bytes_ahead = 10 # TODO should be smaller, probably 3, should check
headroom = inp.seg_end(ea) - ea
if 0 < headroom < 10:
bytes_ahead = headroom
ibuf = inp.bytes_at(ea, bytes_ahead)
if not ibuf:
print "WARNING: GetManyBytes(%.08X, %d) failed " % (ea, bytes_ahead)
instr = pydasm.get_instruction(ibuf, pydasm.MODE_32)
if (instr and
pydasm.get_mnemonic_string(instr, pydasm.FORMAT_INTEL) in ("call", "jmp") and
(instr.op1.reg != 8 or instr.op1.basereg != 8 or instr.op1.indexreg != 8)):
gadget_end_addresses.append(ea)
ea += 1
return gadget_end_addresses
def extract_gadget(end_ea, depth_bytes=64):
"""Extract all gadgets that end at the instruction starting at ea
end_ea: the first byte of the final instruction of the gadgets
depth_bytes: look back for gadget instructions at most 'depth_bytes'
from the address of its last instruction"""
bytes_ahead = 6 # TODO should be smaller, probably 3, should check
bytes_back = depth_bytes
# Confine search within end_ea's segment
headroom = inp.seg_end(end_ea) - end_ea
if 0 < headroom < bytes_ahead:
bytes_ahead = headroom
headroom = end_ea - inp.seg_start(end_ea)
if 0 < headroom < bytes_back:
bytes_back = headroom
ibuf_start = max(end_ea - bytes_back, inp.seg_start(end_ea))
ibuf_len = bytes_back + bytes_ahead
ibuf = inp.bytes_at(ibuf_start, ibuf_len)
if not ibuf:
print "WARNING: GetManyBytes(%.08X, %d) failed " % (ibuf_start, ibuf_len)
# ibuf:
#
# end_ea-bytes_back end_ea end_ea+bytes_ahead
# | | |
# +---------------------------------+-----------------+
# <-----------bytes_back----------> <--bytes_ahead-->
# Always corresponds to end_ea and is our basic reference in the ibuf
idx_end_instr = bytes_back
# This is the final instruction of the gadget (ret/jmp/call)
instr = pydasm.get_instruction(ibuf[idx_end_instr:], pydasm.MODE_32)
# Holds the disassembled instruction at each position in the ibuf
all_instrs = [None] * (idx_end_instr + 1) # +1 for the final instruction
all_instrs[idx_end_instr] = instr
# Going backwards from its final instruction, a gadget cannot include
# any instruction of the following types
bad_instrs = [
pydasm.INSTRUCTION_TYPE_JMP,
pydasm.INSTRUCTION_TYPE_JMPC,
pydasm.INSTRUCTION_TYPE_LOOP,
pydasm.INSTRUCTION_TYPE_CALL,
pydasm.INSTRUCTION_TYPE_RET,
pydasm.INSTRUCTION_TYPE_PRIV]
gadgets = [[idx_end_instr]]
# Find all gadgets starting from each and every position in the buffer
for pos in range(idx_end_instr):
# If we have already visited this instruction as part of a previous
# instruction sequence, we can skip exploring the same sub-sequence
if all_instrs[pos]:
continue
tmp_gadget = []
has_bad = False
# Linear disassembly until we reach the end of the gadget
while pos < idx_end_instr:
instr = all_instrs[pos] # Acts as a cache of decoded instructions
if not instr:
# First time we visit this instruction: disassemble
instr = pydasm.get_instruction(ibuf[pos:], pydasm.MODE_32)
# Stop exploring this path if we hit an illegal or "bad" instruction
if not instr or instr.type in bad_instrs:
has_bad = True
break
# Save the decoded instruction to avoid disassembling it again
all_instrs[pos] = instr
# Non-bad instruction: append it to the current gadget
tmp_gadget.append(pos)
# Move to the next instruction
pos += instr.length
# If we "touched" the gadget's final instruction, this is a valid gadget
if not has_bad and pos == idx_end_instr:
# For completeness, also include the final instruction
tmp_gadget.append(idx_end_instr)
gadgets.append(tmp_gadget)
gadgets_ret = []
for gdgt in gadgets:
idx_first_instr = gdgt[0]
start_ea = end_ea - (idx_end_instr - idx_first_instr)
instrs = [all_instrs[p] for p in gdgt]
addrs = map(lambda idx: (end_ea - (idx_end_instr - idx)), gdgt)
gadgets_ret.append(Gadget(start_ea, end_ea, zip(addrs, instrs)))
return gadgets_ret
def find_gadgets(ea_start, ea_end):
"""Finds and returns a set of gadgets that are at most 64 bytes long."""
gadgets = set()
gadget_end_addresses = find_gadget_ends(ea_start, ea_end)
for g_ea in sorted(gadget_end_addresses):
g = extract_gadget(g_ea)
if g != None:
gadgets.update(g)
return gadgets
def find_gadgets5(ea_start, ea_end):
"""Finds and returns a set of gadgets that are at most 5 instructions long."""
gadgets5 = set()
for g in find_gadgets(ea_start, ea_end):
# print g
for i in range(min(5, len(g._instrs)), 1, -1):
# for i in range(min(15, len(g._instrs)), 1, -1):
instrs = g._instrs[-i:]
gadgets5.add(Gadget(instrs[0][0], instrs[-1][0], instrs))
return gadgets5
def find_payload_gadgets():
"""Finds and returns a set of gadgets that is used by the given exploit."""
# load the exploits gadgets
payload = util.get_payload(inp.get_input_file_path())
exp_gadgets = set()
for addr in payload:
gadgets = find_gadgets(addr, addr + 64)
for gad in gadgets:
for iaddr, ins in gad._instrs:
if iaddr == addr:
i = gad._instrs.index((iaddr, ins))
exp_gadgets.add(Gadget(addr, gad._instrs[-1][0], gad._instrs[i:]))
print "found", len(exp_gadgets), "gadgets for", len(payload), "addresses"
if len(exp_gadgets) != len(payload):
print "missing:", list(set(payload) - set(g.get_start_ea() for g in exp_gadgets))
return set((g.dump_simple(extra=True) for g in exp_gadgets))
def get_all_gadgets():
"""Returns a set of all the gadges found in all the code segments of
the file that is currently processed. (It uses find_gadgets5 which limits
the length of each gadget to 5 instructions)."""
all_gadgets = set()
for begin, end, name in inp.code_segments_iter():
all_gadgets |= find_gadgets5(begin, end)
return all_gadgets
def get_simple_gadgets(input_file):
"""Checks if a dump of the gadgets already exists and loads them. Otherwise,
it finds all the gadgets in the current input file, dumps them and also
returns them (simple form)."""
try:
gad_in = util.open_gadgets(input_file, "rb")
simple_gadgets = pickle.load(gad_in)
except IOError, e:
all_gadgets = get_all_gadgets()
simple_gadgets = set((g.dump_simple(extra=True) for g in all_gadgets))
# simple_gadgets = all_gadgets
gad_out = util.open_gadgets(input_file, "wb")
pickle.dump(simple_gadgets, gad_out)
gad_out.close()
return simple_gadgets
# return get_all_gadgets()
def get_payload_gadgets(input_file):
"""Checks if a dump of the payload gadgets already exists and loads them.
Otherwise, it finds all the gadgets corresponding to the addresses in the
payload, dumps them and also returns them (simple form)."""
try:
pay_gad_in = util.open_payload_gadgets(input_file, "rb")
payload_gadgets = pickle.load(pay_gad_in)
except IOError, e:
payload_gadgets = find_payload_gadgets() # already simple
pay_gad_out = util.open_payload_gadgets(input_file, "wb")
pickle.dump(payload_gadgets, pay_gad_out)
pay_gad_out.close()
return payload_gadgets
# executes as an IDA python script
if __name__ == "__main__":
# Find gadgets between cursor position and end of function
start_ea = ScreenEA()
end_ea = idc.FindFuncEnd(start_ea)
print "\nSearching for gadgets in %.8X:%.8X" % (start_ea, end_ea)
gadgets = find_gadgets(start_ea, end_ea)
print "Found %d gadgets:" % len(gadgets)
for g in sorted(gadgets):
print g
gadgets5 = find_gadgets5(start_ea, idc.FindFuncEnd(start_ea))
print "Found %d (sub)sequences 2-5 instructions long" % len(gadgets5)
for g in sorted(gadgets5):
print g
| kevinkoo001/ropf | gadget.py | Python | bsd-3-clause | 12,832 | [
"VisIt"
] | 0518a7bf2cf5cfcfe751a76f03724d9d247408e215cc07129bc8ad0ad3cd7c98 |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Utility functions and for loading neurons'''
import glob
import logging
import os
import shutil
import tempfile
import uuid
from functools import partial
from io import IOBase, open
from neurom._compat import StringType, filter
from neurom.core.population import Population
from neurom.exceptions import NeuroMError, RawDataError
from neurom.fst._core import FstNeuron
from neurom.io import neurolucida, swc
from neurom.io.datawrapper import DataWrapper
L = logging.getLogger(__name__)
def _is_morphology_file(filepath):
""" Check if `filepath` is a file with one of morphology file extensions. """
return (
os.path.isfile(filepath) and
os.path.splitext(filepath)[1].lower() in ('.swc', '.h5', '.asc')
)
class NeuronLoader(object):
"""
Caching morphology loader.
Arguments:
directory: path to directory with morphology files
file_ext: file extension to look for (if not set, will pick any of .swc|.h5|.asc)
cache_size: size of LRU cache (if not set, no caching done)
"""
def __init__(self, directory, file_ext=None, cache_size=None):
self.directory = directory
self.file_ext = file_ext
if cache_size is not None:
from pylru import FunctionCacheManager
self.get = FunctionCacheManager(self.get, size=cache_size)
def _filepath(self, name):
""" File path to `name` morphology file. """
if self.file_ext is None:
candidates = glob.glob(os.path.join(self.directory, name + ".*"))
try:
return next(filter(_is_morphology_file, candidates))
except StopIteration:
raise NeuroMError("Can not find morphology file for '%s' " % name)
else:
return os.path.join(self.directory, name + self.file_ext)
# pylint:disable=method-hidden
def get(self, name):
""" Get `name` morphology data. """
return load_neuron(self._filepath(name))
def get_morph_files(directory):
'''Get a list of all morphology files in a directory
Returns:
list with all files with extensions '.swc' , 'h5' or '.asc' (case insensitive)
'''
lsdir = (os.path.join(directory, m) for m in os.listdir(directory))
return list(filter(_is_morphology_file, lsdir))
def get_files_by_path(path):
'''Get a file or set of files from a file path
Return list of files with path
'''
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
return get_morph_files(path)
raise IOError('Invalid data path %s' % path)
def load_neuron(handle, reader=None):
'''Build section trees from an h5 or swc file'''
rdw = load_data(handle, reader)
if isinstance(handle, StringType):
name = os.path.splitext(os.path.basename(handle))[0]
else:
name = None
return FstNeuron(rdw, name)
def load_neurons(neurons,
neuron_loader=load_neuron,
name=None,
population_class=Population,
ignored_exceptions=()):
'''Create a population object from all morphologies in a directory\
of from morphologies in a list of file names
Parameters:
neurons: directory path or list of neuron file paths
neuron_loader: function taking a filename and returning a neuron
population_class: class representing populations
name (str): optional name of population. By default 'Population' or\
filepath basename depending on whether neurons is list or\
directory path respectively.
Returns:
neuron population object
'''
if isinstance(neurons, (list, tuple)):
files = neurons
name = name if name is not None else 'Population'
elif isinstance(neurons, StringType):
files = get_files_by_path(neurons)
name = name if name is not None else os.path.basename(neurons)
ignored_exceptions = tuple(ignored_exceptions)
pop = []
for f in files:
try:
pop.append(neuron_loader(f))
except NeuroMError as e:
if isinstance(e, ignored_exceptions):
L.info('Ignoring exception "%s" for file %s',
e, os.path.basename(f))
continue
raise
return population_class(pop, name=name)
def _get_file(handle):
'''Returns the filename of the file to read
If handle is a stream, a temp file is written on disk first
and its filename is returned'''
if not isinstance(handle, IOBase):
return handle
fd, temp_file = tempfile.mkstemp(str(uuid.uuid4()), prefix='neurom-')
os.close(fd)
with open(temp_file, 'w') as fd:
handle.seek(0)
shutil.copyfileobj(handle, fd)
return temp_file
def load_data(handle, reader=None):
'''Unpack data into a raw data wrapper'''
if not reader:
reader = os.path.splitext(handle)[1][1:].lower()
if reader not in _READERS:
raise NeuroMError('Do not have a loader for "%s" extension' % reader)
filename = _get_file(handle)
try:
return _READERS[reader](filename)
except Exception as e:
L.exception('Error reading file %s, using "%s" loader', filename, reader)
raise RawDataError('Error reading file %s:\n%s' % (filename, str(e)))
def _load_h5(filename):
'''Delay loading of h5py until it is needed'''
from neurom.io import hdf5
return hdf5.read(filename,
remove_duplicates=False,
data_wrapper=DataWrapper)
_READERS = {
'swc': partial(swc.read,
data_wrapper=DataWrapper),
'h5': _load_h5,
'asc': partial(neurolucida.read,
data_wrapper=DataWrapper)
}
| juanchopanza/NeuroM | neurom/io/utils.py | Python | bsd-3-clause | 7,484 | [
"NEURON"
] | b7c620f41948ed7a9bb99ee0b707cfd5bab97be521955878c50d5b3f5f41e6a5 |
"""
Tools for the instructor dashboard
"""
import dateutil
import json
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponseBadRequest
from django.utils.timezone import utc
from django.utils.translation import ugettext as _
from courseware.models import StudentFieldOverride
from courseware.field_overrides import disable_overrides
from courseware.student_field_overrides import (
clear_override_for_user,
get_override_for_user,
override_field_for_user,
)
from xmodule.fields import Date
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import UsageKey
from bulk_email.models import CourseAuthorization
DATE_FIELD = Date()
class DashboardError(Exception):
"""
Errors arising from use of the instructor dashboard.
"""
def response(self):
"""
Generate an instance of HttpResponseBadRequest for this error.
"""
error = unicode(self)
return HttpResponseBadRequest(json.dumps({'error': error}))
def handle_dashboard_error(view):
"""
Decorator which adds seamless DashboardError handling to a view. If a
DashboardError is raised during view processing, an HttpResponseBadRequest
is sent back to the client with JSON data about the error.
"""
def wrapper(request, course_id):
"""
Wrap the view.
"""
try:
return view(request, course_id=course_id)
except DashboardError, error:
return error.response()
return wrapper
def bulk_email_is_enabled_for_course(course_id):
"""
Staff can only send bulk email for a course if all the following conditions are true:
1. Bulk email feature flag is on.
2. It is a studio course.
3. Bulk email is enabled for the course.
EDCAST: ENABLE_INSTRUCTOR_EMAIL is changed to False
"""
bulk_email_enabled_globally = (settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] is False)
is_studio_course = (modulestore().get_modulestore_type(course_id) != ModuleStoreEnum.Type.xml)
bulk_email_enabled_for_course = CourseAuthorization.instructor_email_enabled(course_id)
if bulk_email_enabled_globally and is_studio_course and bulk_email_enabled_for_course:
return True
return False
def strip_if_string(value):
if isinstance(value, basestring):
return value.strip()
return value
def get_student_from_identifier(unique_student_identifier):
"""
Gets a student object using either an email address or username.
Returns the student object associated with `unique_student_identifier`
Raises User.DoesNotExist if no user object can be found.
"""
unique_student_identifier = strip_if_string(unique_student_identifier)
if "@" in unique_student_identifier:
student = User.objects.get(email=unique_student_identifier)
else:
student = User.objects.get(username=unique_student_identifier)
return student
def require_student_from_identifier(unique_student_identifier):
"""
Same as get_student_from_identifier() but will raise a DashboardError if
the student does not exist.
"""
try:
return get_student_from_identifier(unique_student_identifier)
except User.DoesNotExist:
raise DashboardError(
_("Could not find student matching identifier: {student_identifier}").format(
student_identifier=unique_student_identifier
)
)
def parse_datetime(datestr):
"""
Convert user input date string into an instance of `datetime.datetime` in
UTC.
"""
try:
return dateutil.parser.parse(datestr).replace(tzinfo=utc)
except ValueError:
raise DashboardError(_("Unable to parse date: ") + datestr)
def find_unit(course, url):
"""
Finds the unit (block, module, whatever the terminology is) with the given
url in the course tree and returns the unit. Raises DashboardError if no
unit is found.
"""
def find(node, url):
"""
Find node in course tree for url.
"""
if node.location.to_deprecated_string() == url:
return node
for child in node.get_children():
found = find(child, url)
if found:
return found
return None
unit = find(course, url)
if unit is None:
raise DashboardError(_("Couldn't find module for url: {0}").format(url))
return unit
def get_units_with_due_date(course):
"""
Returns all top level units which have due dates. Does not return
descendents of those nodes.
"""
units = []
def visit(node):
"""
Visit a node. Checks to see if node has a due date and appends to
`units` if it does. Otherwise recurses into children to search for
nodes with due dates.
"""
if getattr(node, 'due', None):
units.append(node)
else:
for child in node.get_children():
visit(child)
visit(course)
#units.sort(key=_title_or_url)
return units
def title_or_url(node):
"""
Returns the `display_name` attribute of the passed in node of the course
tree, if it has one. Otherwise returns the node's url.
"""
title = getattr(node, 'display_name', None)
if not title:
title = node.location.to_deprecated_string()
return title
def set_due_date_extension(course, unit, student, due_date):
"""
Sets a due date extension. Raises DashboardError if the unit or extended
due date is invalid.
"""
if due_date:
# Check that the new due date is valid:
with disable_overrides():
original_due_date = getattr(unit, 'due', None)
if not original_due_date:
raise DashboardError(_("Unit {0} has no due date to extend.").format(unit.location))
if due_date < original_due_date:
raise DashboardError(_("An extended due date must be later than the original due date."))
override_field_for_user(student, unit, 'due', due_date)
else:
# We are deleting a due date extension. Check that it exists:
if not get_override_for_user(student, unit, 'due'):
raise DashboardError(_("No due date extension is set for that student and unit."))
clear_override_for_user(student, unit, 'due')
def dump_module_extensions(course, unit):
"""
Dumps data about students with due date extensions for a particular module,
specified by 'url', in a particular course.
"""
data = []
header = [_("Username"), _("Full Name"), _("Extended Due Date")]
query = StudentFieldOverride.objects.filter(
course_id=course.id,
location=unit.location,
field='due')
for override in query:
due = DATE_FIELD.from_json(json.loads(override.value))
due = due.strftime("%Y-%m-%d %H:%M")
fullname = override.student.profile.name
data.append(dict(zip(
header,
(override.student.username, fullname, due))))
data.sort(key=lambda x: x[header[0]])
return {
"header": header,
"title": _("Users with due date extensions for {0}").format(
title_or_url(unit)),
"data": data
}
def dump_student_extensions(course, student):
"""
Dumps data about the due date extensions granted for a particular student
in a particular course.
"""
data = []
header = [_("Unit"), _("Extended Due Date")]
units = get_units_with_due_date(course)
units = {u.location: u for u in units}
query = StudentFieldOverride.objects.filter(
course_id=course.id,
student=student,
field='due')
for override in query:
location = override.location.replace(course_key=course.id)
if location not in units:
continue
due = DATE_FIELD.from_json(json.loads(override.value))
due = due.strftime("%Y-%m-%d %H:%M")
title = title_or_url(units[location])
data.append(dict(zip(header, (title, due))))
return {
"header": header,
"title": _("Due date extensions for {0} {1} ({2})").format(
student.first_name, student.last_name, student.username),
"data": data}
def add_block_ids(payload):
"""
rather than manually parsing block_ids from module_ids on the client, pass the block_ids explicitly in the payload
"""
if 'data' in payload:
for ele in payload['data']:
if 'module_id' in ele:
ele['block_id'] = UsageKey.from_string(ele['module_id']).block_id
| edcast-inc/edx-platform-edcast | lms/djangoapps/instructor/views/tools.py | Python | agpl-3.0 | 8,692 | [
"VisIt"
] | de6e14f12c406162da0f8f6341a7fd40ca7e937f26d7a83fd0e50d3f30ad5ca3 |
import unittest
from nn.models.neurons.neuron import SigmoidNeuron
class NeuronsTests(unittest.TestCase):
__sigmoid_neuron = None
def setUp(self):
self.__sigmoid_neuron = SigmoidNeuron()
def tearDown(self):
pass
def test_should_compute_sigmoid(self):
input_value = 5
expected_output = 0.993307149075
self.assertAlmostEqual(self.__sigmoid_neuron.compute(input_value), expected_output)
if __name__ == "__main__":
unittest.main()
| ADozois/ML_Challenge | tests/nn/models/neurons/neurons_tests.py | Python | mit | 496 | [
"NEURON"
] | 57174e68e2be904ed5c075d547f73e6c50959e550c59ae24300d23d8782916fb |
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`powersongimport` module provides the functionality for importing
PowerSong songs into the OpenLP database.
"""
import logging
import fnmatch
import os
from openlp.core.lib import translate
from openlp.plugins.songs.lib.songimport import SongImport
log = logging.getLogger(__name__)
class PowerSongImport(SongImport):
"""
The :class:`PowerSongImport` class provides the ability to import song files
from PowerSong.
**PowerSong 1.0 Song File Format:**
The file has a number of label-field (think key-value) pairs.
Label and Field strings:
* Every label and field is a variable length string preceded by an
integer specifying it's byte length.
* Integer is 32-bit but is encoded in 7-bit format to save space. Thus
if length will fit in 7 bits (ie <= 127) it takes up only one byte.
Metadata fields:
* Every PowerSong file has a TITLE field.
* There is zero or more AUTHOR fields.
* There is always a COPYRIGHTLINE label, but its field may be empty.
This field may also contain a CCLI number: e.g. "CCLI 176263".
Lyrics fields:
* Each verse is contained in a PART field.
* Lines have Windows line endings ``CRLF`` (0x0d, 0x0a).
* There is no concept of verse types.
Valid extensions for a PowerSong song file are:
* .song
"""
@staticmethod
def isValidSource(import_source):
"""
Checks if source is a PowerSong 1.0 folder:
* is a directory
* contains at least one *.song file
"""
if os.path.isdir(import_source):
for file in os.listdir(import_source):
if fnmatch.fnmatch(file, u'*.song'):
return True
return False
def doImport(self):
"""
Receive either a list of files or a folder (unicode) to import.
"""
from importer import SongFormat
PS_string = SongFormat.get(SongFormat.PowerSong, u'name')
if isinstance(self.importSource, unicode):
if os.path.isdir(self.importSource):
dir = self.importSource
self.importSource = []
for file in os.listdir(dir):
if fnmatch.fnmatch(file, u'*.song'):
self.importSource.append(os.path.join(dir, file))
else:
self.importSource = u''
if not self.importSource or not isinstance(self.importSource, list):
self.logError(translate('SongsPlugin.PowerSongImport', 'No songs to import.'),
translate('SongsPlugin.PowerSongImport', 'No %s files found.') % PS_string)
return
self.importWizard.progressBar.setMaximum(len(self.importSource))
for file in self.importSource:
if self.stopImportFlag:
return
self.setDefaults()
parse_error = False
with open(file, 'rb') as song_data:
while True:
try:
label = self._readString(song_data)
if not label:
break
field = self._readString(song_data)
except ValueError:
parse_error = True
self.logError(os.path.basename(file), unicode(
translate('SongsPlugin.PowerSongImport', 'Invalid %s file. Unexpected byte value.')) %
PS_string)
break
else:
if label == u'TITLE':
self.title = field.replace(u'\n', u' ')
elif label == u'AUTHOR':
self.parseAuthor(field)
elif label == u'COPYRIGHTLINE':
found_copyright = True
self._parseCopyrightCCLI(field)
elif label == u'PART':
self.addVerse(field)
if parse_error:
continue
# Check that file had TITLE field
if not self.title:
self.logError(os.path.basename(file), unicode(
translate('SongsPlugin.PowerSongImport', 'Invalid %s file. Missing "TITLE" header.')) % PS_string)
continue
# Check that file had COPYRIGHTLINE label
if not found_copyright:
self.logError(self.title, unicode(
translate('SongsPlugin.PowerSongImport', 'Invalid %s file. Missing "COPYRIGHTLINE" header.')) %
PS_string)
continue
# Check that file had at least one verse
if not self.verses:
self.logError(self.title, unicode(
translate('SongsPlugin.PowerSongImport', 'Verses not found. Missing "PART" header.')))
continue
if not self.finish():
self.logError(self.title)
def _readString(self, file_object):
"""
Reads in next variable-length string.
"""
string_len = self._read7BitEncodedInteger(file_object)
return unicode(file_object.read(string_len), u'utf-8', u'ignore')
def _read7BitEncodedInteger(self, file_object):
"""
Reads in a 32-bit integer in compressed 7-bit format.
Accomplished by reading the integer 7 bits at a time. The high bit
of the byte when set means to continue reading more bytes.
If the integer will fit in 7 bits (ie <= 127), it only takes up one
byte. Otherwise, it may take up to 5 bytes.
Reference: .NET method System.IO.BinaryReader.Read7BitEncodedInt
"""
val = 0
shift = 0
i = 0
while True:
# Check for corrupted stream (since max 5 bytes per 32-bit integer)
if i == 5:
raise ValueError
byte = self._readByte(file_object)
# Strip high bit and shift left
val += (byte & 0x7f) << shift
shift += 7
high_bit_set = byte & 0x80
if not high_bit_set:
break
i += 1
return val
def _readByte(self, file_object):
"""
Reads in next byte as an unsigned integer
Note: returns 0 at end of file.
"""
byte_str = file_object.read(1)
# If read result is empty, then reached end of file
if not byte_str:
return 0
else:
return ord(byte_str)
def _parseCopyrightCCLI(self, field):
"""
Look for CCLI song number, and get copyright
"""
copyright, sep, ccli_no = field.rpartition(u'CCLI')
if not sep:
copyright = ccli_no
ccli_no = u''
if copyright:
self.addCopyright(copyright.rstrip(u'\n').replace(u'\n', u' '))
if ccli_no:
ccli_no = ccli_no.strip(u' :')
if ccli_no.isdigit():
self.ccliNumber = ccli_no
| marmyshev/transitions | openlp/plugins/songs/lib/powersongimport.py | Python | gpl-2.0 | 9,259 | [
"Brian"
] | f0f818054169b52a512d1e66579485e1184945e20dee0f8f632cf35db69a46c0 |
from copy import deepcopy
import logging
import os
import time
import nibabel as nib
from nibabel.streamlines import detect_format
from nibabel.streamlines.tractogram import Tractogram
import numpy as np
from dipy.io.stateful_tractogram import Origin, Space, StatefulTractogram
from dipy.io.vtk import save_vtk_streamlines, load_vtk_streamlines
from dipy.io.dpy import Dpy
from dipy.io.utils import (create_tractogram_header,
is_header_compatible)
def save_tractogram(sft, filename, bbox_valid_check=True):
""" Save the stateful tractogram in any format (trk, tck, vtk, fib, dpy)
Parameters
----------
sft : StatefulTractogram
The stateful tractogram to save
filename : string
Filename with valid extension
bbox_valid_check : bool
Verification for negative voxel coordinates or values above the
volume dimensions. Default is True, to enforce valid file.
Returns
-------
output : bool
True if the saving operation was successful
"""
_, extension = os.path.splitext(filename)
if extension not in ['.trk', '.tck', '.vtk', '.fib', '.dpy']:
raise TypeError('Output filename is not one of the supported format')
if bbox_valid_check and not sft.is_bbox_in_vox_valid():
raise ValueError('Bounding box is not valid in voxel space, cannot '
'load a valid file if some coordinates are '
'invalid. Please use the function '
'remove_invalid_streamlines to discard invalid '
'streamlines or set bbox_valid_check to False')
old_space = deepcopy(sft.space)
old_origin = deepcopy(sft.origin)
sft.to_rasmm()
sft.to_center()
timer = time.time()
if extension in ['.trk', '.tck']:
tractogram_type = detect_format(filename)
header = create_tractogram_header(tractogram_type,
*sft.space_attributes)
new_tractogram = Tractogram(sft.streamlines,
affine_to_rasmm=np.eye(4))
if extension == '.trk':
new_tractogram.data_per_point = sft.data_per_point
new_tractogram.data_per_streamline = sft.data_per_streamline
fileobj = tractogram_type(new_tractogram, header=header)
nib.streamlines.save(fileobj, filename)
elif extension in ['.vtk', '.fib']:
save_vtk_streamlines(sft.streamlines, filename, binary=True)
elif extension in ['.dpy']:
dpy_obj = Dpy(filename, mode='w')
dpy_obj.write_tracks(sft.streamlines)
dpy_obj.close()
logging.debug('Save %s with %s streamlines in %s seconds',
filename, len(sft), round(time.time() - timer, 3))
sft.to_space(old_space)
sft.to_origin(old_origin)
return True
def load_tractogram(filename, reference, to_space=Space.RASMM,
to_origin=Origin.NIFTI, bbox_valid_check=True,
trk_header_check=True):
""" Load the stateful tractogram from any format (trk, tck, vtk, fib, dpy)
Parameters
----------
filename : string
Filename with valid extension
reference : Nifti or Trk filename, Nifti1Image or TrkFile, Nifti1Header or
trk.header (dict), or 'same' if the input is a trk file.
Reference that provides the spatial attribute.
Typically a nifti-related object from the native diffusion used for
streamlines generation
to_space : Enum (dipy.io.stateful_tractogram.Space)
Space to which the streamlines will be transformed after loading
to_origin : Enum (dipy.io.stateful_tractogram.Origin)
Origin to which the streamlines will be transformed after loading
NIFTI standard, default (center of the voxel)
TRACKVIS standard (corner of the voxel)
bbox_valid_check : bool
Verification for negative voxel coordinates or values above the
volume dimensions. Default is True, to enforce valid file.
trk_header_check : bool
Verification that the reference has the same header as the spatial
attributes as the input tractogram when a Trk is loaded
Returns
-------
output : StatefulTractogram
The tractogram to load (must have been saved properly)
"""
_, extension = os.path.splitext(filename)
if extension not in ['.trk', '.tck', '.vtk', '.fib', '.dpy']:
logging.error('Output filename is not one of the supported format')
return False
if to_space not in Space:
logging.error('Space MUST be one of the 3 choices (Enum)')
return False
if reference == 'same':
if extension == '.trk':
reference = filename
else:
logging.error('Reference must be provided, "same" is only '
'available for Trk file.')
return False
if trk_header_check and extension == '.trk':
if not is_header_compatible(filename, reference):
logging.error('Trk file header does not match the provided '
'reference')
return False
timer = time.time()
data_per_point = None
data_per_streamline = None
if extension in ['.trk', '.tck']:
tractogram_obj = nib.streamlines.load(filename).tractogram
streamlines = tractogram_obj.streamlines
if extension == '.trk':
data_per_point = tractogram_obj.data_per_point
data_per_streamline = tractogram_obj.data_per_streamline
elif extension in ['.vtk', '.fib']:
streamlines = load_vtk_streamlines(filename)
elif extension in ['.dpy']:
dpy_obj = Dpy(filename, mode='r')
streamlines = list(dpy_obj.read_tracks())
dpy_obj.close()
logging.debug('Load %s with %s streamlines in %s seconds',
filename, len(streamlines), round(time.time() - timer, 3))
sft = StatefulTractogram(streamlines, reference, Space.RASMM,
origin=Origin.NIFTI,
data_per_point=data_per_point,
data_per_streamline=data_per_streamline)
sft.to_space(to_space)
sft.to_origin(to_origin)
if bbox_valid_check and not sft.is_bbox_in_vox_valid():
raise ValueError('Bounding box is not valid in voxel space, cannot '
'load a valid file if some coordinates are invalid.'
'Please set bbox_valid_check to False and then use'
'the function remove_invalid_streamlines to discard'
'invalid streamlines.')
return sft
def load_generator(ttype):
""" Generate a loading function that performs a file extension
check to restrict the user to a single file format.
Parameters
----------
ttype : string
Extension of the file format that requires a loader
Returns
-------
output : function
Function (load_tractogram) that handle only one file format
"""
def f_gen(filename, reference, to_space=Space.RASMM,
to_origin=Origin.NIFTI, bbox_valid_check=True,
trk_header_check=True):
_, extension = os.path.splitext(filename)
if not extension == ttype:
raise ValueError('This function can only load {} files, for a more'
' general purpose, use load_tractogram instead.'.format(ttype))
sft = load_tractogram(filename, reference,
to_space=Space.RASMM,
to_origin=to_origin,
bbox_valid_check=bbox_valid_check,
trk_header_check=trk_header_check)
return sft
f_gen.__doc__ = load_tractogram.__doc__.replace(
'from any format (trk, tck, vtk, fib, dpy)',
'of the {} format'.format(ttype))
return f_gen
def save_generator(ttype):
""" Generate a saving function that performs a file extension
check to restrict the user to a single file format.
Parameters
----------
ttype : string
Extension of the file format that requires a saver
Returns
-------
output : function
Function (save_tractogram) that handle only one file format
"""
def f_gen(sft, filename, bbox_valid_check=True):
_, extension = os.path.splitext(filename)
if not extension == ttype:
raise ValueError('This function can only save {} file, for more'
' generability use save_tractogram instead.'.format(ttype))
save_tractogram(sft, filename, bbox_valid_check=bbox_valid_check)
f_gen.__doc__ = save_tractogram.__doc__.replace(
'in any format (trk, tck, vtk, fib, dpy)',
'of the {} format'.format(ttype))
return f_gen
load_trk = load_generator('.trk')
load_tck = load_generator('.tck')
load_vtk = load_generator('.vtk')
load_fib = load_generator('.fib')
load_dpy = load_generator('.dpy')
save_trk = save_generator('.trk')
save_tck = save_generator('.tck')
save_vtk = save_generator('.vtk')
save_fib = save_generator('.fib')
save_dpy = save_generator('.dpy')
| FrancoisRheaultUS/dipy | dipy/io/streamline.py | Python | bsd-3-clause | 9,234 | [
"VTK"
] | ab4f3e9db7591a49e86d17fc16b68841e1d131a503fdf56b9e54dd67694521e8 |
# -*- coding: utf8 -*-
#
# Copyright 2011 Kyrre Ness Sjøbæk
# This file is part of AcdOpti.
#
# AcdOpti is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AcdOpti is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AcdOpti. If not, see <http://www.gnu.org/licenses/>.
"""
The point of this file is to collect all global statics
settings in one file. Currently this is a hard-coded implementation,
but using a file like "~/.acdOpti" instead is a simple change of implementation.
"""
from AcdOptiExceptions import AcdOptiException_settings_notFound
class AcdOptiSettings:
def __init__(self):
pass
def getSetting(self, key):
"""
Gets a setting by the name "key".
Raises an AcdOptiException_settings_notFound if key is invalid.
"""
if key == "hopperUser":
return "ksjobak"
elif key == "acdtoolpath":
#return "/opt/acdtool/acdtool"
#return "~/PhD/ACE3P/ace3p/bin/acdtool"
return "acdtool"
elif key == "acdtool_preinit": #Not obligatory; tells acdOpti if some command needs to be run before starting acdtool
return "module load openmpi-x86_64"
elif key == "paraviewpath":
#return "/usr/bin/paraview"
#return "/opt/ParaView-3.14.1-Linux-64bit/bin/paraview"
return "paraview"
elif key == "terminalcommand":
return "gnome-terminal"
else:
raise AcdOptiException_settings_notFound
| kyrsjo/AcdOpti | src/acdOpti/AcdOptiSettings.py | Python | gpl-3.0 | 1,972 | [
"ParaView"
] | 574ad7858a80e8364bc9ba63d6b51c52c2fca230076718a2aea271362607751b |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# statusexe - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.statusexe import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
| heromod/migrid | mig/cgi-bin/statusexe.py | Python | gpl-2.0 | 1,106 | [
"Brian"
] | 891a94b84a47036f1fd4110342a70a9e8caab256d78f4be57f8d4ebb6ab02887 |
"""
dashi documentations goes here
"""
from . import histfactory as factory
from .fitting import model,gaussian,poly,leastsq #,poissonllh
from .visual import visual
from .storage import histsave,histload
from .objbundle import bundle, emptybundle, bundleize
from . import junkbox
import logging
logging.basicConfig(level=logging.INFO)
from . import tests
| achim1/dashi | dashi/__init__.py | Python | lgpl-3.0 | 360 | [
"Gaussian"
] | 7db3304ee528633aeef732dec22e32572b74c8fd49267823d749d33057477c5a |
# https://docs.aws.amazon.com/AmazonS3/latest/API/API_Operations.html
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.list_objects
"""
Configuration of an S3 storage
Like others, but in protocol S3 add:
* SecureConnection: true if https, false otherwise
* Aws_access_key_id
* Aws_secret_access_key
if the Aws variables are not defined, it will try to go throught the S3Gateway
The key of the objects are the LFN without trailing path.
The Path should be the BucketName
"""
import copy
import errno
import functools
import os
import requests
import boto3
from botocore.exceptions import ClientError
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.Adler import fileAdler
from DIRAC.Core.Utilities.DErrno import cmpError
from DIRAC.Core.Utilities.Pfn import pfnparse
from DIRAC.DataManagementSystem.Client.S3GatewayClient import S3GatewayClient
from DIRAC.Resources.Storage.StorageBase import StorageBase
LOG = gLogger.getSubLogger(__name__)
def _extractKeyFromS3Path(meth):
"""Decorator to split an s3 "external" url (s3://server:port/bucket/path)
and return only the path part.
"""
@functools.wraps(meth)
def extractKey(self, urls, *args, **kwargs):
# If set to False, we are already working with keys, so
# skip all the splitting
extractKeys = kwargs.pop("extractKeys", True)
keysToUrls = {}
keyArgs = {}
successful = {}
failed = {}
if extractKeys:
for url in urls:
res = self._getKeyFromURL(url) # pylint: disable=protected-access
if not res["OK"]:
failed[url] = res["Message"]
continue
key = res["Value"]
keysToUrls[key] = url
keyArgs[key] = urls[url]
else:
keyArgs = copy.copy(urls)
result = meth(self, keyArgs, *args, **kwargs)
if not result["OK"]:
return result
# Restore original paths
for key in result["Value"]["Failed"]:
failed[keysToUrls.get(key, key)] = result["Value"]["Failed"][key]
for key in result["Value"]["Successful"]:
successful[keysToUrls.get(key, key)] = result["Value"]["Successful"][key]
result["Value"].update({"Successful": successful, "Failed": failed})
return result
return extractKey
class S3Storage(StorageBase):
"""
.. class:: StorageBase
"""
pluginName = "S3"
_OUTPUT_PROTOCOLS = ["file", "s3", "http", "https"]
def __init__(self, storageName, parameters):
super(S3Storage, self).__init__(storageName, parameters)
self.isok = True
aws_access_key_id = parameters.get("Aws_access_key_id")
aws_secret_access_key = parameters.get("Aws_secret_access_key")
self.secureConnection = parameters.get("SecureConnection", "True") == "True"
proto = "https" if self.secureConnection else "http"
port = int(parameters.get("Port"))
if not port:
port = 443 if self.secureConnection else 80
endpoint_url = "%s://%s:%s" % (proto, parameters["Host"], port)
self.bucketName = parameters["Path"]
self.s3_client = boto3.client(
"s3",
endpoint_url=endpoint_url,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
self.srmSpecificParse = False
self.pluginName = "S3"
# if we have the credentials loaded, we can perform direct access
# otherwise we have to go through the S3Gateway
self.directAccess = aws_access_key_id and aws_secret_access_key
self.S3GatewayClient = S3GatewayClient()
def _getKeyFromURL(self, url):
"""Extract the Key from the URL.
The key is basically the LFN without trailing slash
I despise such path mangling, expecially after all the efforts to
get ride of such method. However, since in the case of S3 we need
to go back and forth between URL and LFN (for finding keys, checking
accesses in the gw, etc), there is no other option...
:param url: s3 url
:returns: S_OK(key) / S_ERROR
"""
res = pfnparse(url, srmSpecific=False)
if not res["OK"]:
return res
splitURL = res["Value"]
# The path originally looks like '/bucket/lhcb/user/c/chaen
# We remove the trailing slash, and get the relative path
# of bucket/lhcb/user/c/chaen starting from bucket,
# which gives you basically the LFN without trailing slash
path = os.path.relpath(splitURL["Path"].lstrip("/"), start=self.bucketName)
key = os.path.join(path, splitURL["FileName"])
return S_OK(key)
# @_extractKeyFromS3Path
# def direct_exists(self, keys):
# """ Check if the keys exists on the storage
# :param self: self reference
# :param keys: list of keys
# :returns: Failed dictionary: {pfn : error message}
# Successful dictionary: {pfn : bool}
# S_ERROR in case of argument problems
# """
# successful = {}
# failed = {}
# # If we have a direct access, we can just do the request directly
# if self.directAccess:
# for key in keys:
# try:
# self.s3_client.head_object(Bucket=self.bucketName, Key=key)
# successful[key] = True
# except ClientError as exp:
# if exp.response['Error']['Code'] == '404':
# successful[key] = False
# else:
# failed[key] = repr(exp)
# except Exception as exp:
# failed[key] = repr(exp)
# else:
# # Otherwise, ask the gw for a presigned URL,
# # and perform it with requests
# for key in keys:
# try:
# res = self.S3GatewayClient.createPresignedUrl(self.name, 'head_object', key)
# if not res['OK']:
# failed[key] = res['Message']
# continue
# presignedURL = res['Value']
# response = requests.get(presignedURL)
# if response.status_code == 200:
# successful[key] = True
# elif response.status_code == 404: # not found
# successful[key] = False
# else:
# failed[key] = response.reason
# except Exception as e:
# failed[key] = repr(e)
# resDict = {'Failed': failed, 'Successful': successful}
# return S_OK(resDict)
def exists(self, urls):
"""Check if the urls exists on the storage
:param urls: list of URLs
:returns: Failed dictionary: {url : error message}
Successful dictionary: {url : bool}
S_ERROR in case of argument problems
"""
if self.directAccess:
return self._direct_exists(urls)
return self._presigned_exists(urls)
@_extractKeyFromS3Path
def _direct_exists(self, urls):
"""Check if the files exists on the storage
:param urls: list of urls
:returns: Failed dictionary: {pfn : error message}
Successful dictionary: {pfn : bool}
S_ERROR in case of argument problems
"""
successful = {}
failed = {}
# the @_extractKeyFromS3Path transformed URL into keys
keys = urls
for key in keys:
try:
self.s3_client.head_object(Bucket=self.bucketName, Key=key)
successful[key] = True
except ClientError as exp:
if exp.response["Error"]["Code"] == "404":
successful[key] = False
else:
failed[key] = repr(exp)
except Exception as exp:
failed[key] = repr(exp)
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def _presigned_exists(self, urls):
"""Check if the URLs exists on the storage
:param urls: list of urls
:returns: Failed dictionary: {pfn : error message}
Successful dictionary: {pfn : bool}
S_ERROR in case of argument problems
"""
successful = {}
failed = {}
res = self.S3GatewayClient.createPresignedUrl(self.name, "head_object", urls)
if not res["OK"]:
return res
failed.update(res["Value"]["Failed"])
presignedURLs = res["Value"]["Successful"]
# Otherwise, ask the gw for a presigned URL,
# and perform it with requests
for url, presignedURL in presignedURLs.items():
try:
response = requests.get(presignedURL)
if response.status_code == 200:
successful[url] = True
elif response.status_code == 404: # not found
successful[url] = False
else:
failed[url] = response.reason
except Exception as e:
failed[url] = repr(e)
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def isFile(self, urls):
"""Check if the urls provided are a file or not
In practice, if the object exists, it is necessarily a file
:param urls: list of urls to be checked
:returns: * Failed dict: {path : error message}
* Successful dict: {path : bool}
* S_ERROR in case of argument problems
"""
return self.exists(urls)
def getFile(self, urls, localPath=False):
"""Make a local copy of the urls.
:param urls: list of urls on the storage
:param localPath: destination folder. Default is from current directory
:returns: * Successful dict: {path : size}
* Failed dict: {path : errorMessage}
* S_ERROR in case of argument problems
"""
if self.directAccess:
return self._direct_getFile(urls, localPath=localPath)
return self._presigned_getFile(urls, localPath=localPath)
@_extractKeyFromS3Path
def _direct_getFile(self, urls, localPath=False):
"""Make a local copy of the keys.
:param urls: list of urls on storage
:param localPath: destination folder. Default is from current directory
:returns: * Successful dict: {path : size}
* Failed dict: {path : errorMessage}
* S_ERROR in case of argument problems
"""
log = LOG.getSubLogger("getFile")
# the @_extractKeyFromS3Path transformed URL into keys
keys = urls
failed = {}
successful = {}
for src_key in keys:
try:
fileName = os.path.basename(src_key)
dest_file = os.path.join(localPath if localPath else os.getcwd(), fileName)
log.debug("Trying to download %s to %s" % (src_key, dest_file))
self.s3_client.download_file(self.bucketName, src_key, dest_file)
successful[src_key] = os.path.getsize(dest_file)
except Exception as exp:
failed[src_key] = repr(exp)
return S_OK({"Failed": failed, "Successful": successful})
def _presigned_getFile(self, urls, localPath=False):
"""Make a local copy of the files.
:param urls: list of urls on storage
:param localPath: destination folder. Default is from current directory
:returns: * Successful dict: {path : size}
* Failed dict: {path : errorMessage}
* S_ERROR in case of argument problems
"""
log = LOG.getSubLogger("getFile")
failed = {}
successful = {}
res = self.S3GatewayClient.createPresignedUrl(self.name, "get_object", urls)
if not res["OK"]:
return res
failed.update(res["Value"]["Failed"])
presignedURLs = res["Value"]["Successful"]
for src_url, presignedURL in presignedURLs.items():
try:
fileName = os.path.basename(src_url)
dest_file = os.path.join(localPath if localPath else os.getcwd(), fileName)
log.debug("Trying to download %s to %s" % (src_url, dest_file))
# Stream download to save memory
# https://requests.readthedocs.io/en/latest/user/advanced/#body-content-workflow
with requests.get(presignedURL, stream=True) as r:
r.raise_for_status()
with open(dest_file, "wb") as f:
for chunk in r.iter_content():
if chunk: # filter out keep-alive new chuncks
f.write(chunk)
successful[src_url] = os.path.getsize(dest_file)
except Exception as exp:
failed[src_url] = repr(exp)
return S_OK({"Failed": failed, "Successful": successful})
def putFile(self, urls, sourceSize=0):
"""Upload a local file.
..warning:: no 3rd party copy possible
:param urls: dictionary { urls : localFile }
:param sourceSize: size of the file in byte. Mandatory for third party copy (WHY ???)
Also, this parameter makes it essentially a non bulk operation for
third party copy, unless all files have the same size...
:returns: * Successful dict: { path : size }
* Failed dict: { path : error message }
* S_ERROR in case of argument problems
"""
if self.directAccess:
return self._direct_putFile(urls, sourceSize=sourceSize)
return self._presigned_putFile(urls, sourceSize=sourceSize)
@_extractKeyFromS3Path
def _direct_putFile(self, urls, sourceSize=0):
"""Upload a local file.
..warning:: no 3rd party copy possible
:param urls: dictionary { urls : localFile }
:param sourceSize: size of the file in byte. Mandatory for third party copy (WHY ???)
Also, this parameter makes it essentially a non bulk operation for
third party copy, unless all files have the same size...
:returns: * Successful dict: { path : size }
* Failed dict: { path : error message }
* S_ERROR in case of argument problems
"""
log = LOG.getSubLogger("putFile")
# the @_extractKeyFromS3Path transformed URL into keys
keys = urls
failed = {}
successful = {}
for dest_key, src_file in keys.items():
try:
cks = fileAdler(src_file)
if not cks:
log.warn("Cannot get ADLER32 checksum for %s" % src_file)
with open(src_file, "rb") as src_fd:
self.s3_client.put_object(
Body=src_fd, Bucket=self.bucketName, Key=dest_key, Metadata={"Checksum": cks}
)
successful[dest_key] = os.path.getsize(src_file)
except Exception as e:
failed[dest_key] = repr(e)
return S_OK({"Failed": failed, "Successful": successful})
def _presigned_putFile(self, urls, sourceSize=0):
"""Upload a local file.
..warning:: no 3rd party copy possible
:param urls: dictionary { urls : localFile }
:param sourceSize: size of the file in byte. Mandatory for third party copy (WHY ???)
Also, this parameter makes it essentially a non bulk operation for
third party copy, unless all files have the same size...
:returns: * Successful dict: { path : size }
* Failed dict: { path : error message }
* S_ERROR in case of argument problems
"""
log = LOG.getSubLogger("putFile")
failed = {}
successful = {}
# Construct a dict <url:{x-amz-meta-checksum: adler32}>
# it needs to be passed to createPresignedUrl
urlAdlers = {url: {"x-amz-meta-checksum": fileAdler(src_file)} for url, src_file in urls.items()}
res = self.S3GatewayClient.createPresignedUrl(self.name, "put_object", urlAdlers)
if not res["OK"]:
return res
failed.update(res["Value"]["Failed"])
# Contains <url: presignedResponse>
presignedResponses = res["Value"]["Successful"]
for dest_url, presignedResponse in presignedResponses.items():
src_file = urls[dest_url]
try:
cks = fileAdler(src_file)
if not cks:
log.warn("Cannot get ADLER32 checksum for %s" % src_file)
presignedURL = presignedResponse["url"]
presignedFields = presignedResponse["fields"]
with open(src_file, "rb") as src_fd:
# files = {'file': (dest_key, src_fd)}
files = {"file": src_fd}
response = requests.post(presignedURL, data=presignedFields, files=files)
if not response.ok:
raise Exception(response.reason)
successful[dest_url] = os.path.getsize(src_file)
except Exception as e:
failed[dest_url] = repr(e)
return S_OK({"Failed": failed, "Successful": successful})
def getFileMetadata(self, urls):
"""Get metadata associated to the file(s)
:param urls: list of urls on the storage
:returns: * successful dict { path : metadata }
* failed dict { path : error message }
* S_ERROR in case of argument problems
"""
if self.directAccess:
return self._direct_getFileMetadata(urls)
return self._presigned_getFileMetadata(urls)
@_extractKeyFromS3Path
def _direct_getFileMetadata(self, urls):
"""Get metadata associated to the file(s)
:param urls: list of urls on the storage
:returns: * successful dict { path : metadata }
* failed dict { path : error message }
* S_ERROR in case of argument problems
"""
# the @_extractKeyFromS3Path transformed URL into keys
keys = urls
failed = {}
successful = {}
for key in keys:
try:
response = self.s3_client.head_object(Bucket=self.bucketName, Key=key)
responseMetadata = response["ResponseMetadata"]["HTTPHeaders"]
metadataDict = self._addCommonMetadata(responseMetadata)
metadataDict["File"] = True
metadataDict["Size"] = int(metadataDict["content-length"])
metadataDict["Checksum"] = metadataDict.get("x-amz-meta-checksum", "")
successful[key] = metadataDict
except Exception as exp:
failed[key] = repr(exp)
return S_OK({"Failed": failed, "Successful": successful})
def _presigned_getFileMetadata(self, urls):
"""Get metadata associated to the file(s)
:param urls: list of urls on the storage
:returns: * successful dict { path : metadata }
* failed dict { path : error message }
* S_ERROR in case of argument problems
"""
failed = {}
successful = {}
res = self.S3GatewayClient.createPresignedUrl(self.name, "head_object", urls)
if not res["OK"]:
return res
failed.update(res["Value"]["Failed"])
presignedURLs = res["Value"]["Successful"]
for url, presignedURL in presignedURLs.items():
try:
response = requests.head(presignedURL)
if not response.ok:
raise Exception(response.reason)
# Although the interesting fields are the same as when doing the query directly
# the case is not quite the same, so make it lower everywhere
responseMetadata = {headerKey.lower(): headerVal for headerKey, headerVal in response.headers.items()}
metadataDict = self._addCommonMetadata(responseMetadata)
metadataDict["File"] = True
metadataDict["Size"] = int(metadataDict["content-length"])
metadataDict["Checksum"] = metadataDict.get("x-amz-meta-checksum", "")
successful[url] = metadataDict
except Exception as exp:
failed[url] = repr(exp)
return S_OK({"Failed": failed, "Successful": successful})
def removeFile(self, urls):
"""Physically remove the file specified by keys
A non existing file will be considered as successfully removed
:param urls: list of urls on the storage
:returns: * Successful dict {path : True}
* Failed dict {path : error message}
* S_ERROR in case of argument problems
"""
if self.directAccess:
return self._direct_removeFile(urls)
return self._presigned_removeFile(urls)
@_extractKeyFromS3Path
def _direct_removeFile(self, urls):
"""Physically remove the file specified by keys
A non existing file will be considered as successfully removed
:param urls: list of urls on the storage
:returns: * Successful dict {path : True}
* Failed dict {path : error message}
* S_ERROR in case of argument problems
"""
failed = {}
successful = {}
# the @_extractKeyFromS3Path transformed URL into keys
keys = urls
for key in keys:
try:
self.s3_client.delete_object(Bucket=self.bucketName, Key=key)
successful[key] = True
except Exception as exp:
failed[key] = repr(exp)
return S_OK({"Failed": failed, "Successful": successful})
def _presigned_removeFile(self, urls):
"""Physically remove the file specified by keys
A non existing file will be considered as successfully removed
:param urls: list of urls on the storage
:returns: * Successful dict {path : True}
* Failed dict {path : error message}
* S_ERROR in case of argument problems
"""
failed = {}
successful = {}
res = self.S3GatewayClient.createPresignedUrl(self.name, "delete_object", urls)
if not res["OK"]:
return res
failed.update(res["Value"]["Failed"])
presignedURLs = res["Value"]["Successful"]
for url, presignedURL in presignedURLs.items():
try:
response = requests.delete(presignedURL)
if not response.ok:
raise Exception(response.reason)
successful[url] = True
except Exception as exp:
failed[url] = repr(exp)
return S_OK({"Failed": failed, "Successful": successful})
def getFileSize(self, urls):
"""Get the physical size of the given file
:param urls: list of urls on the storage
:returns: * Successful dict {path : size}
* Failed dict {path : error message }
* S_ERROR in case of argument problem
"""
res = self.getFileMetadata(urls)
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
successful = {url: metadata["Size"] for url, metadata in res["Value"]["Successful"].items()}
return S_OK({"Successful": successful, "Failed": failed})
#############################################################
#
# These are the methods for directory manipulation
#
def createDirectory(self, urls):
"""Create directory on the storage.
S3 does not have such a concept, but we return OK for everything
:param urls: list of urls to be created on the storage
:returns: Always Successful dict {path : True }
"""
return S_OK({"Failed": {}, "Successful": {url: True for url in urls}})
@staticmethod
def notAvailable(*_args, **_kwargs):
"""Generic method for unavailable method on S3"""
return S_ERROR("Functionality not available on S3")
listDirectory = (
isDirectory
) = getDirectory = removeDirectory = getDirectorySize = getDirectoryMetadata = putDirectory = notAvailable
def getTransportURL(self, urls, protocols):
"""Get a transport URL for given urls
If http/https is requested, the URLs will be valid for 24hours
:param dict urls: s3 urls
:param list protocols: a list of acceptable transport protocols in priority order.
In practice, besides 's3', it can only be:
* 'https' if secureConnection is True
* 'http' othewise
:returns: succ/failed dict url with required protocol
"""
res = super(S3Storage, self).getTransportURL(urls, protocols)
# if the result is OK or the error different than errno.EPROTONOSUPPORT
# we just return
if not cmpError(res, errno.EPROTONOSUPPORT):
return res
# We support only http if it is an insecured connection and https if it is a secured connection
if self.secureConnection and "https" not in protocols:
return S_ERROR(errno.EPROTONOSUPPORT, "Only https protocol is supported")
elif not self.secureConnection and "http" not in protocols:
return S_ERROR(errno.EPROTONOSUPPORT, "Only http protocol is supported")
# Make the presigned URLs valid for 24h
if self.directAccess:
return self.createPresignedUrl(urls, "get_object", expiration=60 * 60 * 24)
return self.S3GatewayClient.createPresignedUrl(self.name, "get_object", urls, expiration=60 * 60 * 24)
@_extractKeyFromS3Path
def createPresignedUrl(self, urls, s3_method, expiration=3600):
"""Generate a presigned URL to share an S3 object
:param urls: urls for which to generate a presigned URL. If s3_method is put_object, it must be a dict <url:Fields>
where fields are the metadata of the file
(see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.generate_presigned_post) # pylint: disable=line-too-long # noqa
:param s3_method: name of the method for which to generate a presigned URL
:param expiration: Time in seconds for the presigned URL to remain valid
:return: Presigned URL as string. If error, returns None.
"""
# the decorator transformed the urls into keys
keys = urls
successful = {}
failed = {}
# Generate a presigned URL for the S3 object
log = LOG.getSubLogger("createPresignedUrl")
for key in keys:
try:
if s3_method != "put_object":
response = self.s3_client.generate_presigned_url(
ClientMethod=s3_method, Params={"Bucket": self.bucketName, "Key": key}, ExpiresIn=expiration
)
else:
fields = keys.get(key)
if not isinstance(fields, dict):
fields = None
response = self.s3_client.generate_presigned_post(
self.bucketName, key, Fields=fields, ExpiresIn=expiration
)
successful[key] = response
except ClientError as e:
log.debug(e)
failed[key] = repr(e)
# The response contains the presigned URL
return S_OK({"Successful": successful, "Failed": failed})
| DIRACGrid/DIRAC | src/DIRAC/Resources/Storage/S3Storage.py | Python | gpl-3.0 | 28,163 | [
"DIRAC"
] | 4f39a4387e7d2ecc4747bfc7040c515211121d24c6085c0b3a95acddaaa0e884 |
##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Parsplice(CMakePackage):
"""ParSplice code implements the Parallel Trajectory Splicing algorithm"""
homepage = "https://gitlab.com/exaalt/parsplice"
url = "https://gitlab.com/api/v4/projects/exaalt%2Fparsplice/repository/archive.tar.gz?sha=v1.1"
git = "https://gitlab.com/exaalt/parsplice.git"
tags = ['ecp', 'ecp-apps']
version('develop', branch='master')
version('1.1', '3a72340d49d731a076e8942f2ae2f4e9')
depends_on("cmake@3.1:", type='build')
depends_on("berkeley-db")
depends_on("nauty")
depends_on("boost")
depends_on("mpi")
depends_on("eigen@3:")
depends_on("lammps+lib@20170901:")
def cmake_args(self):
options = ['-DBUILD_SHARED_LIBS=ON']
return options
| mfherbst/spack | var/spack/repos/builtin/packages/parsplice/package.py | Python | lgpl-2.1 | 1,997 | [
"LAMMPS"
] | eefa2b5a4068326d720f7be6c43b83a84b971aa0036ca966853198aa210710f3 |
#
# mainTab
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'phonopy'
tab.settings['Output file name'] = 'OUTCAR.born'
#
# SettingsTab
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = False
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 5
tab.settings['Mass definition'] = 'program'
#
# 0th Scenario tabs
#
tab = self.notebook.scenarios[0]
tab.settings['Matrix'] = 'ptfe'
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['Volume fraction'] = 0.1
tab.settings['Ellipsoid a/b'] = 0.5
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Effective medium method'] = 'Averaged permittivity'
tab.settings['Particle shape'] = 'Sphere'
tab.settings['Particle size(mu)'] = 0.0
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Legend'] = 'Average permittivity'
# Add new scenarios
methods = ['Maxwell-Garnett', 'Bruggeman']
shapes = ['Needle','Ellipsoid','Plate']
hkls = [[0,0,1], [0,0,1], [1,0,0]]
vfs = [0.1]
sizes = [0.0001]
sigmas = [0.0]
for method in methods:
for shape,hkl in zip(shapes,hkls):
for vf in vfs:
for size,sigma in zip(sizes,sigmas):
self.notebook.addScenario()
tab = self.notebook.scenarios[-1]
tab.settings['Volume fraction'] = vf
tab.settings['Particle shape'] = shape
tab.settings['Particle size(mu)'] = size
tab.settings['Effective medium method'] = method
tab.settings['Particle size distribution sigma(mu)'] = sigma
tab.settings['Unique direction - h'] = hkl[0]
tab.settings['Unique direction - k'] = hkl[1]
tab.settings['Unique direction - l'] = hkl[2]
#tab.settings['Legend'] = method + ' ' + shape + ' vf='+str(vf)+' size='+str(size)+' sigma=',str(sigma)
tab.settings['Legend'] = method + ' ' + shape + ' hkl='+str(hkl)
#
# Plotting Tab
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 0.0
tab.settings['Maximum frequency'] = 300.0
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Plot title'] = 'Phonopy Na2(SO4)2'
#
# Analysis Tab
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 800
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
| JohnKendrick/PDielec | Examples/Phonopy/Na2SO42/script.py | Python | mit | 2,605 | [
"phonopy"
] | babd59538ef9f2b8f39629287d67d791dec371e257f8effa91a64f810e76c56b |
# -*- coding: utf-8 -*-
#
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006-2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Contains parsers for all supported programs"""
# These import statements are added for the convenience of users...
# Rather than having to type:
# from cclib.parser.gaussianparser import Gaussian
# they can use:
# from cclib.parser import Gaussian
from .adfparser import ADF
from .daltonparser import DALTON
from .gamessparser import GAMESS
from .gamessukparser import GAMESSUK
from .gaussianparser import Gaussian
from .jaguarparser import Jaguar
from .molproparser import Molpro
from .mopacparser import MOPAC
from .nwchemparser import NWChem
from .orcaparser import ORCA
from .psiparser import Psi
from .qchemparser import QChem
# This allow users to type:
# from cclib.parser import ccopen
# from cclib.parser import ccread
from .ccio import ccopen
from .ccio import ccread
from .data import ccData
| ben-albrecht/cclib | cclib/parser/__init__.py | Python | lgpl-2.1 | 1,333 | [
"ADF",
"Dalton",
"GAMESS",
"Gaussian",
"Jaguar",
"MOPAC",
"Molpro",
"NWChem",
"ORCA",
"cclib"
] | 199d2d1006f50b189f93b68e066a5c7fbbe25877bd3a57ad3349be638513fe6e |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
from monty.serialization import loadfn
import warnings
import numpy as np
import multiprocessing
import logging
from pymatgen.analysis.pourbaix_diagram import PourbaixDiagram, PourbaixEntry, \
PourbaixPlotter, IonEntry, MultiEntry
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.core.ion import Ion
from pymatgen import SETTINGS
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
logger = logging.getLogger(__name__)
class PourbaixEntryTest(unittest.TestCase):
_multiprocess_shared_ = True
"""
Test all functions using a fictitious entry
"""
def setUp(self):
# comp = Composition("Mn2O3")
self.solentry = ComputedEntry("Mn2O3", 49)
ion = Ion.from_formula("MnO4-")
self.ionentry = IonEntry(ion, 25)
self.PxIon = PourbaixEntry(self.ionentry)
self.PxSol = PourbaixEntry(self.solentry)
self.PxIon.concentration = 1e-4
def test_pourbaix_entry(self):
self.assertEqual(self.PxIon.entry.energy, 25, "Wrong Energy!")
self.assertEqual(self.PxIon.entry.name,
"MnO4[-]", "Wrong Entry!")
self.assertEqual(self.PxSol.entry.energy, 49, "Wrong Energy!")
self.assertEqual(self.PxSol.entry.name,
"Mn2O3", "Wrong Entry!")
# self.assertEqual(self.PxIon.energy, 25, "Wrong Energy!")
# self.assertEqual(self.PxSol.energy, 49, "Wrong Energy!")
self.assertEqual(self.PxIon.concentration, 1e-4, "Wrong concentration!")
def test_calc_coeff_terms(self):
self.assertEqual(self.PxIon.npH, -8, "Wrong npH!")
self.assertEqual(self.PxIon.nPhi, -7, "Wrong nPhi!")
self.assertEqual(self.PxIon.nH2O, 4, "Wrong nH2O!")
self.assertEqual(self.PxSol.npH, -6, "Wrong npH!")
self.assertEqual(self.PxSol.nPhi, -6, "Wrong nPhi!")
self.assertEqual(self.PxSol.nH2O, 3, "Wrong nH2O!")
def test_to_from_dict(self):
d = self.PxIon.as_dict()
ion_entry = self.PxIon.from_dict(d)
self.assertEqual(ion_entry.entry.name, "MnO4[-]", "Wrong Entry!")
d = self.PxSol.as_dict()
sol_entry = self.PxSol.from_dict(d)
self.assertEqual(sol_entry.name, "Mn2O3(s)", "Wrong Entry!")
self.assertEqual(sol_entry.energy, self.PxSol.energy,
"as_dict and from_dict energies unequal")
def test_energy_functions(self):
# TODO: test these for values
self.PxSol.energy_at_conditions(10, 0)
self.PxSol.energy_at_conditions(np.array([1, 2, 3]), 0)
self.PxSol.energy_at_conditions(10, np.array([1, 2, 3]))
self.PxSol.energy_at_conditions(np.array([1, 2, 3]),
np.array([1, 2, 3]))
def test_multi_entry(self):
# TODO: More robust multientry test
m_entry = MultiEntry([self.PxSol, self.PxIon])
for attr in ['energy', 'composition', 'nPhi']:
self.assertEqual(getattr(m_entry, attr),
getattr(self.PxSol, attr) + getattr(self.PxIon, attr))
# As dict, from dict
m_entry_dict = m_entry.as_dict()
m_entry_new = MultiEntry.from_dict(m_entry_dict)
self.assertEqual(m_entry_new.energy, m_entry.energy)
def test_get_elt_fraction(self):
entry = ComputedEntry("Mn2Fe3O3", 49)
pbentry = PourbaixEntry(entry)
self.assertAlmostEqual(pbentry.get_element_fraction("Fe"), 0.6)
self.assertAlmostEqual(pbentry.get_element_fraction("Mn"), 0.4)
class PourbaixDiagramTest(unittest.TestCase):
_multiprocess_shared_ = True
@classmethod
def setUpClass(cls):
cls.test_data = loadfn(os.path.join(test_dir, 'pourbaix_test_data.json'))
cls.pbx = PourbaixDiagram(cls.test_data['Zn'], filter_solids=True)
cls.pbx_nofilter = PourbaixDiagram(cls.test_data['Zn'],
filter_solids=False)
def test_pourbaix_diagram(self):
self.assertEqual(set([e.name for e in self.pbx.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)"},
"List of stable entries does not match")
self.assertEqual(set([e.name for e in self.pbx_nofilter.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)",
"ZnO2(s)", "ZnH(s)"},
"List of stable entries for unfiltered pbx does not match")
pbx_lowconc = PourbaixDiagram(self.test_data['Zn'], conc_dict={"Zn": 1e-8},
filter_solids=True)
self.assertEqual(set([e.name for e in pbx_lowconc.stable_entries]),
{"Zn(HO)2(aq)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)"})
def test_properties(self):
self.assertEqual(len(self.pbx.unstable_entries), 2)
def test_multicomponent(self):
# Assure no ions get filtered at high concentration
ag_n = [e for e in self.test_data['Ag-Te-N']
if "Te" not in e.composition]
highconc = PourbaixDiagram(ag_n, filter_solids=True,
conc_dict={"Ag": 1e-5, "N": 1})
entry_sets = [set(e.entry_id) for e in highconc.stable_entries]
self.assertIn({"mp-124", "ion-17"}, entry_sets)
# Binary system
pd_binary = PourbaixDiagram(self.test_data['Ag-Te'], filter_solids=True,
comp_dict={"Ag": 0.5, "Te": 0.5},
conc_dict={"Ag": 1e-8, "Te": 1e-8})
self.assertEqual(len(pd_binary.stable_entries), 30)
test_entry = pd_binary.find_stable_entry(8, 2)
self.assertTrue("mp-499" in test_entry.entry_id)
# Find a specific multientry to test
self.assertEqual(pd_binary.get_decomposition_energy(test_entry, 8, 2), 0)
pd_ternary = PourbaixDiagram(self.test_data['Ag-Te-N'], filter_solids=True)
self.assertEqual(len(pd_ternary.stable_entries), 49)
# Fetch a solid entry and a ground state entry mixture
ag_te_n = self.test_data['Ag-Te-N'][-1]
ground_state_ag_with_ions = MultiEntry([self.test_data['Ag-Te-N'][i] for i in [4, 18, 30]],
weights=[1 / 3, 1 / 3, 1 / 3])
self.assertAlmostEqual(pd_ternary.get_decomposition_energy(ag_te_n, 2, -1), 2.767822855765)
self.assertAlmostEqual(pd_ternary.get_decomposition_energy(ag_te_n, 10, -2), 3.756840056890625)
self.assertAlmostEqual(pd_ternary.get_decomposition_energy(ground_state_ag_with_ions, 2, -1), 0)
# Test invocation of pourbaix diagram from ternary data
new_ternary = PourbaixDiagram(pd_ternary.all_entries)
self.assertEqual(len(new_ternary.stable_entries), 49)
self.assertAlmostEqual(new_ternary.get_decomposition_energy(ag_te_n, 2, -1), 2.767822855765)
self.assertAlmostEqual(new_ternary.get_decomposition_energy(ag_te_n, 10, -2), 3.756840056890625)
self.assertAlmostEqual(new_ternary.get_decomposition_energy(ground_state_ag_with_ions, 2, -1), 0)
def test_get_pourbaix_domains(self):
domains = PourbaixDiagram.get_pourbaix_domains(self.test_data['Zn'])
self.assertEqual(len(domains[0]), 7)
def test_get_decomposition(self):
# Test a stable entry to ensure that it's zero in the stable region
entry = self.test_data['Zn'][12] # Should correspond to mp-2133
self.assertAlmostEqual(self.pbx.get_decomposition_energy(entry, 10, 1),
0.0, 5, "Decomposition energy of ZnO is not 0.")
# Test an unstable entry to ensure that it's never zero
entry = self.test_data['Zn'][11]
ph, v = np.meshgrid(np.linspace(0, 14), np.linspace(-2, 4))
result = self.pbx_nofilter.get_decomposition_energy(entry, ph, v)
self.assertTrue((result >= 0).all(),
"Unstable energy has hull energy of 0 or less")
# Test an unstable hydride to ensure HER correction works
self.assertAlmostEqual(self.pbx.get_decomposition_energy(entry, -3, -2),
3.6979147983333)
# Test a list of pHs
self.pbx.get_decomposition_energy(entry, np.linspace(0, 2, 5), 2)
# Test a list of Vs
self.pbx.get_decomposition_energy(entry, 4, np.linspace(-3, 3, 10))
# Test a set of matching arrays
ph, v = np.meshgrid(np.linspace(0, 14), np.linspace(-3, 3))
self.pbx.get_decomposition_energy(entry, ph, v)
def test_get_stable_entry(self):
entry = self.pbx.get_stable_entry(0, 0)
self.assertEqual(entry.entry_id, "ion-0")
def test_multielement_parallel(self):
# Simple test to ensure that multiprocessing is working
test_entries = self.test_data["Ag-Te-N"]
nproc = multiprocessing.cpu_count()
pbx = PourbaixDiagram(test_entries, filter_solids=True, nproc=nproc)
self.assertEqual(len(pbx.stable_entries), 49)
def test_solid_filter(self):
entries = self.test_data['Zn']
pbx = PourbaixDiagram(entries, filter_solids=False)
oxidized_phase = pbx.find_stable_entry(10, 2)
self.assertEqual(oxidized_phase.name, "ZnO2(s)")
entries = self.test_data['Zn']
pbx = PourbaixDiagram(entries, filter_solids=True)
oxidized_phase = pbx.find_stable_entry(10, 2)
self.assertEqual(oxidized_phase.name, "ZnO(s)")
def test_serialization(self):
d = self.pbx.as_dict()
new = PourbaixDiagram.from_dict(d)
self.assertEqual(set([e.name for e in new.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)"},
"List of stable entries does not match")
# Test with unprocessed entries included, this should result in the
# previously filtered entries being included
d = self.pbx.as_dict(include_unprocessed_entries=True)
new = PourbaixDiagram.from_dict(d)
self.assertEqual(
set([e.name for e in new.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)", "ZnO2(s)", "ZnH(s)"},
"List of stable entries for unfiltered pbx does not match")
pd_binary = PourbaixDiagram(self.test_data['Ag-Te'], filter_solids=True,
comp_dict={"Ag": 0.5, "Te": 0.5},
conc_dict={"Ag": 1e-8, "Te": 1e-8})
new_binary = PourbaixDiagram.from_dict(pd_binary.as_dict())
self.assertEqual(len(pd_binary.stable_entries),
len(new_binary.stable_entries))
# The two tests below rely on the MP Rest interface.
@unittest.skipIf(not SETTINGS.get("PMG_MAPI_KEY"),
"PMG_MAPI_KEY environment variable not set.")
def test_heavy(self):
from pymatgen import MPRester
mpr = MPRester()
entries = mpr.get_pourbaix_entries(["Li", "Mg", "Sn", "Pd"])
pbx = PourbaixDiagram(entries, nproc=4, filter_solids=False)
entries = mpr.get_pourbaix_entries(["Ba", "Ca", "V", "Cu", "F"])
pbx = PourbaixDiagram(entries, nproc=4, filter_solids=False)
entries = mpr.get_pourbaix_entries(["Ba", "Ca", "V", "Cu", "F", "Fe"])
pbx = PourbaixDiagram(entries, nproc=4, filter_solids=False)
entries = mpr.get_pourbaix_entries(["Na", "Ca", "Nd", "Y", "Ho", "F"])
pbx = PourbaixDiagram(entries, nproc=4, filter_solids=False)
@unittest.skipIf(not SETTINGS.get("PMG_MAPI_KEY"),
"PMG_MAPI_KEY environment variable not set.")
def test_mpr_pipeline(self):
from pymatgen import MPRester
mpr = MPRester()
data = mpr.get_pourbaix_entries(["Zn"])
pbx = PourbaixDiagram(data, filter_solids=True, conc_dict={"Zn": 1e-8})
pbx.find_stable_entry(10, 0)
data = mpr.get_pourbaix_entries(["Ag", "Te"])
pbx = PourbaixDiagram(data, filter_solids=True,
conc_dict={"Ag": 1e-8, "Te": 1e-8})
self.assertEqual(len(pbx.stable_entries), 30)
test_entry = pbx.find_stable_entry(8, 2)
self.assertAlmostEqual(test_entry.energy, 2.3894017960000009, 3)
# Test custom ions
entries = mpr.get_pourbaix_entries(["Sn", "C", "Na"])
ion = IonEntry(Ion.from_formula("NaO28H80Sn12C24+"), -161.676)
custom_ion_entry = PourbaixEntry(ion, entry_id='my_ion')
pbx = PourbaixDiagram(entries + [custom_ion_entry], filter_solids=True,
comp_dict={"Na": 1, "Sn": 12, "C": 24})
self.assertAlmostEqual(pbx.get_decomposition_energy(custom_ion_entry, 5, 2),
2.1209002582, 1)
class PourbaixPlotterTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
self.test_data = loadfn(os.path.join(test_dir, "pourbaix_test_data.json"))
self.pd = PourbaixDiagram(self.test_data["Zn"])
self.plotter = PourbaixPlotter(self.pd)
def tearDown(self):
warnings.simplefilter("default")
def test_plot_pourbaix(self):
plotter = PourbaixPlotter(self.pd)
# Default limits
plotter.get_pourbaix_plot()
# Non-standard limits
plotter.get_pourbaix_plot(limits=[[-5, 4], [-2, 2]])
def test_plot_entry_stability(self):
entry = self.pd.all_entries[0]
self.plotter.plot_entry_stability(entry, limits=[[-2, 14], [-3, 3]])
# binary system
pd_binary = PourbaixDiagram(self.test_data['Ag-Te'],
comp_dict={"Ag": 0.5, "Te": 0.5})
binary_plotter = PourbaixPlotter(pd_binary)
plt = binary_plotter.plot_entry_stability(self.test_data['Ag-Te'][53])
plt.close()
if __name__ == '__main__':
unittest.main()
| tschaume/pymatgen | pymatgen/analysis/tests/test_pourbaix_diagram.py | Python | mit | 14,093 | [
"pymatgen"
] | 26d42ca632fb1951309888806e9153f5df67009306a9cb390b26c2ef5efc46bc |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*************************
espressopp.ParticleAccess
*************************
Abstract base class for analysis/measurement/io
.. function:: espressopp.ParticleAccess.perform_action()
:rtype:
"""
from espressopp import pmi
from _espressopp import ParticleAccess
class ParticleAccessLocal(ParticleAccess):
def perform_action(self):
if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.perform_action(self)
if pmi.isController :
class ParticleAccess(metaclass=pmi.Proxy):
pmiproxydefs = dict(
pmicall = [ 'perform_action' ]
)
| espressopp/espressopp | src/ParticleAccess.py | Python | gpl-3.0 | 1,506 | [
"ESPResSo"
] | 5f1237e7573460f8e7be5b654096117c98844a343b727268282a075f791a6f5f |
'''
Steps for problem.feature lettuce tests
'''
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from lettuce.django import django_url
from common import i_am_registered_for_the_course
from problems_setup import PROBLEM_DICT, answer_problem, problem_has_answer, add_problem_to_course
from nose.tools import assert_equal
@step(u'I am viewing a "([^"]*)" problem with "([^"]*)" attempt')
def view_problem_with_attempts(step, problem_type, attempts):
i_am_registered_for_the_course(step, 'model_course')
# Ensure that the course has this problem type
add_problem_to_course(world.scenario_dict['COURSE'].number, problem_type, {'max_attempts': attempts})
# Go to the one section in the factory-created course
# which should be loaded with the correct problem
chapter_name = world.scenario_dict['SECTION'].display_name.replace(" ", "_")
section_name = chapter_name
url = django_url('/courses/%s/%s/%s/courseware/%s/%s' %
(world.scenario_dict['COURSE'].org, world.scenario_dict['COURSE'].number, world.scenario_dict['COURSE'].display_name.replace(' ', '_'),
chapter_name, section_name,))
world.browser.visit(url)
@step(u'I am viewing a "([^"]*)" that shows the answer "([^"]*)"')
def view_problem_with_show_answer(step, problem_type, answer):
i_am_registered_for_the_course(step, 'model_course')
# Ensure that the course has this problem type
add_problem_to_course('model_course', problem_type, {'showanswer': answer})
# Go to the one section in the factory-created course
# which should be loaded with the correct problem
chapter_name = world.scenario_dict['SECTION'].display_name.replace(" ", "_")
section_name = chapter_name
url = django_url('/courses/%s/%s/%s/courseware/%s/%s' %
(world.scenario_dict['COURSE'].org, world.scenario_dict['COURSE'].number, world.scenario_dict['COURSE'].display_name.replace(' ', '_'),
chapter_name, section_name,))
world.browser.visit(url)
@step(u'I am viewing a "([^"]*)" problem')
def view_problem(step, problem_type):
i_am_registered_for_the_course(step, 'model_course')
# Ensure that the course has this problem type
add_problem_to_course('model_course', problem_type)
# Go to the one section in the factory-created course
# which should be loaded with the correct problem
chapter_name = world.scenario_dict['SECTION'].display_name.replace(" ", "_")
section_name = chapter_name
url = django_url('/courses/%s/%s/%s/courseware/%s/%s' %
(world.scenario_dict['COURSE'].org, world.scenario_dict['COURSE'].number, world.scenario_dict['COURSE'].display_name.replace(' ', '_'),
chapter_name, section_name,))
world.browser.visit(url)
@step(u'External graders respond "([^"]*)"')
def set_external_grader_response(step, correctness):
assert(correctness in ['correct', 'incorrect'])
response_dict = {'correct': True if correctness == 'correct' else False,
'score': 1 if correctness == 'correct' else 0,
'msg': 'Your problem was graded %s' % correctness}
# Set the fake xqueue server to always respond
# correct/incorrect when asked to grade a problem
world.xqueue_server.set_grade_response(response_dict)
@step(u'I answer a "([^"]*)" problem "([^"]*)ly"')
def answer_problem_step(step, problem_type, correctness):
""" Mark a given problem type correct or incorrect, then submit it.
*problem_type* is a string representing the type of problem (e.g. 'drop down')
*correctness* is in ['correct', 'incorrect']
"""
# Change the answer on the page
input_problem_answer(step, problem_type, correctness)
# Submit the problem
check_problem(step)
@step(u'I input an answer on a "([^"]*)" problem "([^"]*)ly"')
def input_problem_answer(_, problem_type, correctness):
"""
Have the browser input an answer (either correct or incorrect)
"""
assert(correctness in ['correct', 'incorrect'])
assert(problem_type in PROBLEM_DICT)
answer_problem(problem_type, correctness)
@step(u'I check a problem')
def check_problem(step):
# first scroll down so the loading mathjax button does not
# cover up the Check button
world.browser.execute_script("window.scrollTo(0,1024)")
world.css_click("input.check")
@step(u'The "([^"]*)" problem displays a "([^"]*)" answer')
def assert_problem_has_answer(step, problem_type, answer_class):
'''
Assert that the problem is displaying a particular answer.
These correspond to the same correct/incorrect
answers we set in answer_problem()
We can also check that a problem has been left blank
by setting answer_class='blank'
'''
assert answer_class in ['correct', 'incorrect', 'blank']
assert problem_type in PROBLEM_DICT
problem_has_answer(problem_type, answer_class)
@step(u'I reset the problem')
def reset_problem(_step):
world.css_click('input.reset')
@step(u'I press the button with the label "([^"]*)"$')
def press_the_button_with_label(_step, buttonname):
button_css = 'button span.show-label'
elem = world.css_find(button_css).first
world.css_has_text(button_css, elem)
world.css_click(button_css)
@step(u'The "([^"]*)" button does( not)? appear')
def action_button_present(_step, buttonname, doesnt_appear):
button_css = 'section.action input[value*="%s"]' % buttonname
if bool(doesnt_appear):
assert world.is_css_not_present(button_css)
else:
assert world.is_css_present(button_css)
@step(u'the Show/Hide button label is "([^"]*)"$')
def show_hide_label_is(_step, label_name):
# The label text is changed by static/xmodule_js/src/capa/display.js
# so give it some time to change on the page.
label_css = 'button.show span.show-label'
world.wait_for(lambda _: world.css_has_text(label_css, label_name))
@step(u'I should see a score of "([^"]*)"$')
def see_score(_step, score):
# The problem progress is changed by
# cms/static/xmodule_js/src/capa/display.js
# so give it some time to render on the page.
score_css = 'section.problem-progress'
expected_text = '({})'.format(score)
world.wait_for(lambda _: world.css_has_text(score_css, expected_text))
@step(u'[Mm]y "([^"]*)" answer is( NOT)? marked "([^"]*)"')
def assert_answer_mark(_step, problem_type, isnt_marked, correctness):
"""
Assert that the expected answer mark is visible
for a given problem type.
*problem_type* is a string identifying the type of problem (e.g. 'drop down')
*correctness* is in ['correct', 'incorrect', 'unanswered']
"""
# Determine which selector(s) to look for based on correctness
assert(correctness in ['correct', 'incorrect', 'unanswered'])
assert(problem_type in PROBLEM_DICT)
# At least one of the correct selectors should be present
for sel in PROBLEM_DICT[problem_type][correctness]:
if bool(isnt_marked):
has_expected = world.is_css_not_present(sel)
else:
has_expected = world.is_css_present(sel)
# As soon as we find the selector, break out of the loop
if has_expected:
break
# Expect that we found the expected selector
assert(has_expected)
| abo-abo/edx-platform | lms/djangoapps/courseware/features/problems.py | Python | agpl-3.0 | 7,370 | [
"VisIt"
] | a07acadf22a250158e3094c7f13d057e7c012353418cc8e281ce829130bde103 |
from __future__ import print_function
import functools
from copy import copy
import numpy as np
import scipy
from scipy import matrix
import regreg.api as rr
import regreg.affine as ra
from .query import query, optimization_sampler
from .reconstruction import reconstruct_full_from_internal
from .randomization import split
class group_lasso_view(query):
def __init__(self, loss, epsilon, penalty, randomization, solve_args={'min_its':50, 'tol':1.e-10}):
"""
Fits the logistic regression to a candidate active set, without penalty.
Calls the method bootstrap_covariance() to bootstrap the covariance matrix.
Computes $\bar{\beta}_E$ which is the restricted
M-estimator (i.e. subject to the constraint $\beta_{-E}=0$).
Parameters:
-----------
active: np.bool
The active set from fitting the logistic lasso
solve_args: dict
Arguments to be passed to regreg solver.
Returns:
--------
None
Notes:
------
Sets self._beta_unpenalized which will be used in the covariance matrix calculation.
Also computes Hessian of loss at restricted M-estimator as well as the bootstrap covariance.
"""
query.__init__(self, randomization)
(self.loss,
self.epsilon,
self.penalty,
self.randomization,
self.solve_args) = (loss,
epsilon,
penalty,
randomization,
solve_args)
# Methods needed for subclassing a query
def solve(self, scaling=1, solve_args={'min_its':20, 'tol':1.e-10}, nboot=2000):
self.randomize()
(loss,
randomized_loss,
epsilon,
penalty,
randomization,
solve_args) = (self.loss,
self.randomized_loss,
self.epsilon,
self.penalty,
self.randomization,
self.solve_args)
# initial solution
problem = rr.simple_problem(randomized_loss, penalty)
self.initial_soln = problem.solve(**solve_args)
# find the active groups and their direction vectors
# as well as unpenalized groups
groups = np.unique(penalty.groups)
active_groups = np.zeros(len(groups), np.bool)
unpenalized_groups = np.zeros(len(groups), np.bool)
active_directions = []
active = np.zeros(loss.shape, np.bool)
unpenalized = np.zeros(loss.shape, np.bool)
initial_scalings = []
active_directions_list = [] ## added for group lasso
active_penalty = []
for i, g in enumerate(groups):
group = penalty.groups == g
active_groups[i] = (np.linalg.norm(self.initial_soln[group]) > 1.e-6 * penalty.weights[g]) and (penalty.weights[g] > 0)
unpenalized_groups[i] = (penalty.weights[g] == 0)
if active_groups[i]:
active[group] = True
z = np.zeros(active.shape, np.float)
z[group] = self.initial_soln[group] / np.linalg.norm(self.initial_soln[group])
active_directions.append(z)
active_directions_list.append(z[group]) ## added for group lasso
active_penalty.append(penalty.weights[g]) ## added
initial_scalings.append(np.linalg.norm(self.initial_soln[group]))
if unpenalized_groups[i]:
unpenalized[group] = True
self.active_penalty = active_penalty
# solve the restricted problem
self._overall = active + unpenalized > 0
self._inactive = ~self._overall
self._unpenalized = unpenalized
self.active_directions_list = active_directions_list ## added for group lasso
self._active_directions = np.array(active_directions).T
self._active_groups = np.array(active_groups, np.bool)
self._unpenalized_groups = np.array(unpenalized_groups, np.bool)
self.selection_variable = {'groups':self._active_groups,
'variables':self._overall,
'directions':self._active_directions}
# initial state for opt variables
initial_subgrad = -(self.randomized_loss.smooth_objective(self.initial_soln, 'grad') +
self.randomized_loss.quadratic.objective(self.initial_soln, 'grad'))
# the quadratic of a smooth_atom is not included in computing the smooth_objective
self.initial_subgrad = initial_subgrad
initial_subgrad = initial_subgrad[self._inactive]
initial_unpenalized = self.initial_soln[self._unpenalized]
self.observed_opt_state = np.concatenate([initial_scalings,
initial_unpenalized,
initial_subgrad], axis=0)
# set the _solved bit
self._solved = True
# Now setup the pieces for linear decomposition
(loss,
epsilon,
penalty,
initial_soln,
overall,
inactive,
unpenalized,
active_groups,
active_directions) = (self.loss,
self.epsilon,
self.penalty,
self.initial_soln,
self._overall,
self._inactive,
self._unpenalized,
self._active_groups,
self._active_directions)
# scaling should be chosen to be Lipschitz constant for gradient of Gaussian part
# we are implicitly assuming that
# loss is a pairs model
self.scaling = scaling
_sqrt_scaling = np.sqrt(self.scaling)
_beta_unpenalized = restricted_Mest(loss, overall, solve_args=solve_args)
beta_full = np.zeros(overall.shape)
beta_full[overall] = _beta_unpenalized
#_hessian = loss.hessian(beta_full)
self._beta_full = beta_full
# observed state for score in internal coordinates
self.observed_internal_state = np.hstack([_beta_unpenalized * _sqrt_scaling,
-loss.smooth_objective(beta_full, 'grad')[inactive] / _sqrt_scaling])
# form linear part
self.num_opt_var = self.observed_opt_state.shape[0]
p = loss.shape[0] # shorthand for p
# (\bar{\beta}_{E \cup U}, N_{-E}, c_E, \beta_U, z_{-E})
# E for active
# U for unpenalized
# -E for inactive
_opt_linear_term = np.zeros((p, self._active_groups.sum() + unpenalized.sum() + inactive.sum()))
_score_linear_term = np.zeros((p, p))
# \bar{\beta}_{E \cup U} piece -- the unpenalized M estimator
Mest_slice = slice(0, overall.sum())
X, y = loss.data
W = self.loss.saturated_loss.hessian(X.dot(beta_full))
_Mest_hessian_active = np.dot(X.T, X[:, active] * W[:, None])
_Mest_hessian_unpen = np.dot(X.T, X[:, unpenalized] * W[:, None])
_score_linear_term[:, Mest_slice] = -np.hstack([_Mest_hessian_active, _Mest_hessian_unpen]) / _sqrt_scaling
# N_{-(E \cup U)} piece -- inactive coordinates of score of M estimator at unpenalized solution
null_idx = range(overall.sum(), p)
inactive_idx = np.nonzero(inactive)[0]
for _i, _n in zip(inactive_idx, null_idx):
_score_linear_term[_i,_n] = -_sqrt_scaling
# c_E piece
scaling_slice = slice(0, active_groups.sum())
if len(active_directions)==0:
_opt_hessian=0
else:
_opt_hessian = np.dot(_Mest_hessian, active_directions[overall]) + epsilon * active_directions
_opt_linear_term[:, scaling_slice] = _opt_hessian / _sqrt_scaling
self.observed_opt_state[scaling_slice] *= _sqrt_scaling
# beta_U piece
unpenalized_slice = slice(active_groups.sum(), active_groups.sum() + unpenalized.sum())
unpenalized_directions = np.identity(p)[:,unpenalized]
if unpenalized.sum():
_opt_linear_term[:, unpenalized_slice] = (np.dot(_Mest_hessian, unpenalized_directions[overall])
+ epsilon * unpenalized_directions) / _sqrt_scaling
self.observed_opt_state[unpenalized_slice] *= _sqrt_scaling
# subgrad piece
subgrad_idx = range(active_groups.sum() + unpenalized.sum(), active_groups.sum() + inactive.sum() + unpenalized.sum())
subgrad_slice = slice(active_groups.sum() + unpenalized.sum(), active_groups.sum() + inactive.sum() + unpenalized.sum())
for _i, _s in zip(inactive_idx, subgrad_idx):
_opt_linear_term[_i,_s] = _sqrt_scaling
self.observed_opt_state[subgrad_idx] /= _sqrt_scaling
# form affine part
_opt_affine_term = np.zeros(p)
idx = 0
groups = np.unique(penalty.groups)
for i, g in enumerate(groups):
if active_groups[i]:
group = penalty.groups == g
_opt_affine_term[group] = active_directions[:,idx][group] * penalty.weights[g]
idx += 1
# two transforms that encode score and optimization
# variable roles
# later, we will modify `score_transform`
# in `linear_decomposition`
self.opt_transform = (_opt_linear_term, _opt_affine_term)
self.score_transform = (_score_linear_term, np.zeros(_score_linear_term.shape[0]))
# now store everything needed for the projections
# the projection acts only on the optimization
# variables
self.scaling_slice = scaling_slice
# weights are scaled here because the linear terms scales them by scaling
new_groups = penalty.groups[inactive]
new_weights = dict([(g, penalty.weights[g] / _sqrt_scaling) for g in penalty.weights.keys() if g in np.unique(new_groups)])
# we form a dual group lasso object
# to do the projection
self.group_lasso_dual = rr.group_lasso_dual(new_groups, weights=new_weights, bound=1.)
self.subgrad_slice = subgrad_slice
self._setup = True
self._marginalize_subgradient = False
self.scaling_slice = scaling_slice
self.unpenalized_slice = unpenalized_slice
self.ndim = loss.shape[0]
self.nboot = nboot
def get_sampler(self):
# setup the default optimization sampler
if not hasattr(self, "_sampler"):
def projection(group_lasso_dual, subgrad_slice, scaling_slice, opt_state):
"""
Full projection for Langevin.
The state here will be only the state of the optimization variables.
"""
new_state = opt_state.copy() # not really necessary to copy
new_state[scaling_slice] = np.maximum(opt_state[scaling_slice], 0)
new_state[subgrad_slice] = group_lasso_dual.bound_prox(opt_state[subgrad_slice])
return new_state
projection = functools.partial(projection, self.group_lasso_dual, self.subgrad_slice, self.scaling_slice)
def grad_log_density(query,
opt_linear,
rand_gradient,
internal_state,
opt_state):
full_state = reconstruct_full_from_internal(query.opt_transform, query.score_transform, internal_state, opt_state)
return opt_linear.T.dot(rand_gradient(full_state).T)
grad_log_density = functools.partial(grad_log_density, self, self.opt_transform[0], self.randomization.gradient)
def log_density(query,
opt_linear,
rand_log_density,
internal_state,
opt_state):
full_state = reconstruct_full_from_internal(query.opt_transform, query.score_transform, internal_state, opt_state)
return rand_log_density(full_state)
log_density = functools.partial(log_density, self, self.opt_transform[0], self.randomization.log_density)
self._sampler = optimization_sampler(self.observed_opt_state,
self.observed_internal_state.copy(),
self.score_transform,
self.opt_transform,
projection,
grad_log_density,
log_density)
return self._sampler
sampler = property(get_sampler, query.set_sampler)
def decompose_subgradient(self, conditioning_groups=None, marginalizing_groups=None):
"""
ADD DOCSTRING
conditioning_groups and marginalizing_groups should be disjoint
"""
groups = np.unique(self.penalty.groups)
condition_inactive_groups = np.zeros_like(groups, dtype=bool)
if conditioning_groups is None:
conditioning_groups = np.zeros_like(groups, dtype=np.bool)
if marginalizing_groups is None:
marginalizing_groups = np.zeros_like(groups, dtype=np.bool)
if np.any(conditioning_groups * marginalizing_groups):
raise ValueError("cannot simultaneously condition and marginalize over a group's subgradient")
if not self._setup:
raise ValueError('setup_sampler should be called before using this function')
condition_inactive_variables = np.zeros_like(self._inactive, dtype=bool)
moving_inactive_groups = np.zeros_like(groups, dtype=bool)
moving_inactive_variables = np.zeros_like(self._inactive, dtype=bool)
_inactive_groups = ~(self._active_groups+self._unpenalized)
inactive_marginal_groups = np.zeros_like(self._inactive, dtype=bool)
limits_marginal_groups = np.zeros_like(self._inactive, np.float)
for i, g in enumerate(groups):
if (_inactive_groups[i]) and conditioning_groups[i]:
group = self.penalty.groups == g
condition_inactive_groups[i] = True
condition_inactive_variables[group] = True
elif (_inactive_groups[i]) and (~conditioning_groups[i]) and (~marginalizing_groups[i]):
group = self.penalty.groups == g
moving_inactive_groups[i] = True
moving_inactive_variables[group] = True
if (_inactive_groups[i]) and marginalizing_groups[i]:
group = self.penalty.groups == g
inactive_marginal_groups[i] = True
limits_marginal_groups[i] = self.penalty.weights[g]
opt_linear, opt_offset = self.opt_transform
new_linear = np.zeros((opt_linear.shape[0], (self._active_groups.sum() +
self._unpenalized_groups.sum() +
moving_inactive_variables.sum())))
new_linear[:, self.scaling_slice] = opt_linear[:, self.scaling_slice]
new_linear[:, self.unpenalized_slice] = opt_linear[:, self.unpenalized_slice]
inactive_moving_idx = np.nonzero(moving_inactive_variables)[0]
subgrad_idx = range(self._active_groups.sum() + self._unpenalized.sum(),
self._active_groups.sum() + self._unpenalized.sum() +
moving_inactive_variables.sum())
subgrad_slice = subgrad_idx
for _i, _s in zip(inactive_moving_idx, subgrad_idx):
new_linear[_i, _s] = 1.
observed_opt_state = self.observed_opt_state[:(self._active_groups.sum() +
self._unpenalized_groups.sum() +
moving_inactive_variables.sum())]
observed_opt_state[subgrad_idx] = self.initial_subgrad[moving_inactive_variables]
condition_linear = np.zeros((opt_linear.shape[0], (self._active_groups.sum() +
self._unpenalized_groups.sum() +
condition_inactive_variables.sum())))
inactive_condition_idx = np.nonzero(condition_inactive_variables)[0]
subgrad_condition_idx = range(self._active_groups.sum() + self._unpenalized.sum(),
self._active_groups.sum() + self._unpenalized.sum() + condition_inactive_variables.sum())
for _i, _s in zip(inactive_condition_idx, subgrad_condition_idx):
condition_linear[_i, _s] = 1.
new_offset = condition_linear[:,subgrad_condition_idx].dot(self.initial_subgrad[condition_inactive_variables]) + opt_offset
new_opt_transform = (new_linear, new_offset)
print("limits marginal groups", limits_marginal_groups)
print("inactive marginal groups", inactive_marginal_groups)
def _fraction(_cdf, _pdf, full_state_plus, full_state_minus, inactive_marginal_groups):
return (np.divide(_pdf(full_state_plus) - _pdf(full_state_minus),
_cdf(full_state_plus) - _cdf(full_state_minus)))[inactive_marginal_groups]
def new_grad_log_density(query,
limits_marginal_groups,
inactive_marginal_groups,
_cdf,
_pdf,
opt_linear,
deriv_log_dens,
internal_state,
opt_state):
full_state = reconstruct_full_from_internal(new_opt_transform, query.score_transform, internal_state, opt_state)
p = query.penalty.shape[0]
weights = np.zeros(p)
if inactive_marginal_groups.sum()>0:
full_state_plus = full_state + np.multiply(limits_marginal_groups, np.array(inactive_marginal_groups, np.float))
full_state_minus = full_state - np.multiply(limits_marginal_groups, np.array(inactive_marginal_groups, np.float))
weights[inactive_marginal_groups] = _fraction(_cdf, _pdf, full_state_plus, full_state_minus, inactive_marginal_groups)
weights[~inactive_marginal_groups] = deriv_log_dens(full_state)[~inactive_marginal_groups]
return -opt_linear.T.dot(weights)
new_grad_log_density = functools.partial(new_grad_log_density,
self,
limits_marginal_groups,
inactive_marginal_groups,
self.randomization._cdf,
self.randomization._pdf,
new_opt_transform[0],
self.randomization._derivative_log_density)
def new_log_density(query,
limits_marginal_groups,
inactive_marginal_groups,
_cdf,
_pdf,
opt_linear,
log_dens,
internal_state,
opt_state):
full_state = reconstruct_full_from_internal(new_opt_transform,
query.score_transform,
internal_state,
opt_state)
full_state = np.atleast_2d(full_state)
p = query.penalty.shape[0]
logdens = np.zeros(full_state.shape[0])
if inactive_marginal_groups.sum()>0:
full_state_plus = full_state + np.multiply(limits_marginal_groups, np.array(inactive_marginal_groups, np.float))
full_state_minus = full_state - np.multiply(limits_marginal_groups, np.array(inactive_marginal_groups, np.float))
logdens += np.sum(np.log(_cdf(full_state_plus) - _cdf(full_state_minus))[:,inactive_marginal_groups], axis=1)
logdens += log_dens(full_state[:,~inactive_marginal_groups])
return np.squeeze(logdens) # should this be negative to match the gradient log density?
new_log_density = functools.partial(new_log_density,
self,
limits_marginal_groups,
inactive_marginal_groups,
self.randomization._cdf,
self.randomization._pdf,
self.opt_transform[0],
self.randomization._log_density)
new_groups = self.penalty.groups[moving_inactive_groups]
_sqrt_scaling = np.sqrt(self.scaling)
new_weights = dict([(g, self.penalty.weights[g] / _sqrt_scaling) for g in self.penalty.weights.keys() if g in np.unique(new_groups)])
new_group_lasso_dual = rr.group_lasso_dual(new_groups, weights=new_weights, bound=1.)
def new_projection(group_lasso_dual,
noverall,
opt_state):
new_state = opt_state.copy()
new_state[self.scaling_slice] = np.maximum(opt_state[self.scaling_slice], 0)
new_state[noverall:] = group_lasso_dual.bound_prox(opt_state[noverall:])
return new_state
new_projection = functools.partial(new_projection,
new_group_lasso_dual,
self._overall.sum())
new_selection_variable = copy(self.selection_variable)
new_selection_variable['subgradient'] = self.observed_opt_state[self.subgrad_slice]
self.sampler = optimization_sampler(observed_opt_state,
self.observed_internal_state.copy(),
self.score_transform,
new_opt_transform,
new_projection,
new_grad_log_density,
new_log_density,
selection_info=(self, new_selection_variable))
def condition_on_scalings(self):
"""
Maybe we should allow subgradients of only some variables...
"""
if not self._setup:
raise ValueError('setup_sampler should be called before using this function')
opt_linear, opt_offset = self.opt_transform
new_offset = opt_linear[:,self.scaling_slice].dot(self.observed_opt_state[self.scaling_slice]) + opt_offset
new_linear = opt_linear[:,self.subgrad_slice]
self.opt_transform = (new_linear, new_offset)
# for group LASSO this will induce a bigger jacobian
self.selection_variable['scalings'] = self.observed_opt_state[self.scaling_slice]
# reset slices
self.observed_opt_state = self.observed_opt_state[self.subgrad_slice]
self.subgrad_slice = slice(None, None, None)
self.scaling_slice = np.zeros(new_linear.shape[1], np.bool)
self.num_opt_var = new_linear.shape[1]
# def grad_log_density(self, internal_state, opt_state):
# """
# marginalizing over the sub-gradient
# full_state is
# density should be expressed in terms of opt_state coordinates
# """
# if not self._setup:
# raise ValueError('setup_sampler should be called before using this function')
# if self._marginalize_subgradient:
# full_state = reconstruct_full_from_internal(self, internal_state, opt_state)
# p = self.penalty.shape[0]
# weights = np.zeros(p)
# if self.inactive_marginal_groups.sum()>0:
# full_state_plus = full_state + np.multiply(self.limits_marginal_groups, np.array(self.inactive_marginal_groups, np.float))
# full_state_minus = full_state - np.multiply(self.limits_marginal_groups, np.array(self.inactive_marginal_groups, np.float))
# def fraction(full_state_plus, full_state_minus, inactive_marginal_groups):
# return (np.divide(self.randomization._pdf(full_state_plus) - self.randomization._pdf(full_state_minus),
# self.randomization._cdf(full_state_plus) - self.randomization._cdf(full_state_minus)))[inactive_marginal_groups]
# if self.inactive_marginal_groups.sum() > 0:
# weights[self.inactive_marginal_groups] = fraction(full_state_plus, full_state_minus, self.inactive_marginal_groups)
# weights[~self.inactive_marginal_groups] = self.randomization._derivative_log_density(full_state)[~self.inactive_marginal_groups]
# opt_linear = self.opt_transform[0]
# return -opt_linear.T.dot(weights)
# else:
# return query.grad_log_density(self, internal_state, opt_state)
def restricted_Mest(Mest_loss, active, solve_args={'min_its':50, 'tol':1.e-10}):
"""
Fit a restricted model using only columns `active`.
Parameters
----------
Mest_loss : objective function
A GLM loss.
active : ndarray
Which columns to use.
solve_args : dict
Passed to `solve`.
Returns
-------
soln : ndarray
Solution to restricted problem.
"""
X, Y = Mest_loss.data
if not Mest_loss._is_transform and hasattr(Mest_loss, 'saturated_loss'): # M_est is a glm
X_restricted = X[:,active]
loss_restricted = rr.affine_smooth(Mest_loss.saturated_loss, X_restricted)
else:
I_restricted = ra.selector(active, ra.astransform(X).input_shape[0], ra.identity((active.sum(),)))
loss_restricted = rr.affine_smooth(Mest_loss, I_restricted.T)
beta_E = loss_restricted.solve(**solve_args)
return beta_E
class group_lasso_split(group_lasso_view):
def __init__(self, loss, epsilon, subsample_size, penalty, solve_args={'min_its':50, 'tol':1.e-10}):
total_size = loss.saturated_loss.shape[0]
self.randomization = split(loss.shape, subsample_size, total_size)
group_lasso.__init__(self, loss, epsilon, penalty, self.randomization, solve_args=solve_args)
total_size = loss.saturated_loss.shape[0]
if subsample_size > total_size:
raise ValueError('subsample size must be smaller than total sample size')
self.total_size, self.subsample_size = total_size, subsample_size
class group_lasso_group_lasso(group_lasso_view):
def __init__(self, loss, epsilon, penalty, randomization, solve_args={'min_its': 50, 'tol': 1.e-10}):
group_lasso.__init__(self, loss, epsilon, penalty, randomization, solve_args=solve_args)
self.Q = self._Mest_hessian[self._overall,:] + epsilon * np.identity(self._overall.sum())
self.Qinv = np.linalg.inv(self.Q)
self.form_VQLambda()
def form_VQLambda(self):
nactive_groups = len(self.active_directions_list)
nactive_vars = sum([self.active_directions_list[i].shape[0] for i in range(nactive_groups)])
V = np.zeros((nactive_vars, nactive_vars - nactive_groups))
Lambda = np.zeros((nactive_vars, nactive_vars))
temp_row, temp_col = 0, 0
for g in range(len(self.active_directions_list)):
size_curr_group = self.active_directions_list[g].shape[0]
Lambda[temp_row:(temp_row + size_curr_group), temp_row:(temp_row + size_curr_group)] \
= self.active_penalty[g] * np.identity(size_curr_group)
def null(A, eps=1e-12):
u, s, vh = np.linalg.svd(A)
padding = max(0, np.shape(A)[1] - np.shape(s)[0])
null_mask = np.concatenate(((s <= eps), np.ones((padding,), dtype=bool)), axis=0)
null_space = scipy.compress(null_mask, vh, axis=0)
return scipy.transpose(null_space)
V_g = null(matrix(self.active_directions_list[g]))
V[temp_row:(temp_row + V_g.shape[0]), temp_col:(temp_col + V_g.shape[1])] = V_g
temp_row += V_g.shape[0]
temp_col += V_g.shape[1]
self.VQLambda = np.dot(np.dot(V.T, self.Qinv), Lambda.dot(V))
return self.VQLambda
def derivative_logdet_jacobian(self, scalings):
nactive_groups = len(self.active_directions_list)
nactive_vars = np.sum([self.active_directions_list[i].shape[0] for i in range(nactive_groups)])
from scipy.linalg import block_diag
matrix_list = [scalings[i] * np.identity(self.active_directions_list[i].shape[0] - 1) for i in
range(scalings.shape[0])]
Gamma_minus = block_diag(*matrix_list)
jacobian_inv = np.linalg.inv(Gamma_minus + self.VQLambda)
group_sizes = [self._active_directions[i].shape[0] for i in range(nactive_groups)]
group_sizes_cumsum = np.concatenate(([0], np.array(group_sizes).cumsum()))
jacobian_inv_blocks = [
jacobian_inv[group_sizes_cumsum[i]:group_sizes_cumsum[i + 1],
group_sizes_cumsum[i]:group_sizes_cumsum[i + 1]]
for i in range(nactive_groups)]
der = np.zeros(self.observed_opt_state.shape[0])
der[self.scaling_slice] = np.array([np.matrix.trace(jacobian_inv_blocks[i]) for i in range(scalings.shape[0])])
return der
#### Subclasses of different randomized views
class glm_group_lasso(group_lasso_view):
def setup_sampler(self, scaling=1., solve_args={'min_its':50, 'tol':1.e-10}):
bootstrap_score = pairs_bootstrap_glm(self.loss,
self.selection_variable['variables'],
beta_full=self._beta_full,
inactive=~self.selection_variable['variables'])[0]
return bootstrap_score
class split_glm_group_lasso(group_lasso_split):
def setup_sampler(self, scaling=1., solve_args={'min_its': 50, 'tol': 1.e-10}, B=1000):
# now we need to estimate covariance of
# loss.grad(\beta_E^*) - 1/pi * randomized_loss.grad(\beta_E^*)
m, n, p = self.subsample_size, self.total_size, self.loss.shape[0] # shorthand
from .glm import pairs_bootstrap_score # need to correct these imports!!!
bootstrap_score = pairs_bootstrap_score(self.loss,
self._overall,
beta_active=self._beta_full[self._overall],
solve_args=solve_args)
# find unpenalized MLE on subsample
newq, oldq = identity_quadratic(0, 0, 0, 0), self.randomized_loss.quadratic
self.randomized_loss.quadratic = newq
beta_active_subsample = restricted_Mest(self.randomized_loss,
self._overall)
bootstrap_score_split = pairs_bootstrap_score(self.loss,
self._overall,
beta_active=beta_active_subsample,
solve_args=solve_args)
self.randomized_loss.quadratic = oldq
inv_frac = n / m
def subsample_diff(m, n, indices):
subsample = np.random.choice(indices, size=m, replace=False)
full_score = bootstrap_score(indices) # a sum of n terms
randomized_score = bootstrap_score_split(subsample) # a sum of m terms
return full_score - randomized_score * inv_frac
first_moment = np.zeros(p)
second_moment = np.zeros((p, p))
_n = np.arange(n)
for _ in range(B):
indices = np.random.choice(_n, size=n, replace=True)
randomized_score = subsample_diff(m, n, indices)
first_moment += randomized_score
second_moment += np.multiply.outer(randomized_score, randomized_score)
first_moment /= B
second_moment /= B
cov = second_moment - np.multiply.outer(first_moment,
first_moment)
self.randomization.set_covariance(cov)
bootstrap_score = pairs_bootstrap_glm(self.loss,
self.selection_variable['variables'],
beta_full=self._beta_full,
inactive=~self.selection_variable['variables'])[0]
return bootstrap_score
| selective-inference/selective-inference | selectinf/randomized/sandbox/group_lasso.py | Python | bsd-3-clause | 33,512 | [
"Gaussian"
] | d0e993bdf2d20e497298241c92c6d9ca3ed2b26ee2f8e625582d5dc3d4300bee |
"""
"""
import warnings
import os
import sys
import posixpath
import fnmatch
import py
# Moved from local.py.
iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt')
try:
from os import fspath
except ImportError:
def fspath(path):
"""
Return the string representation of the path.
If str or bytes is passed in, it is returned unchanged.
This code comes from PEP 519, modified to support earlier versions of
python.
This is required for python < 3.6.
"""
if isinstance(path, (py.builtin.text, py.builtin.bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
return path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
try:
import pathlib
except ImportError:
pass
else:
if isinstance(path, pathlib.PurePath):
return py.builtin.text(path)
raise TypeError("expected str, bytes or os.PathLike object, not "
+ path_type.__name__)
class Checkers:
_depend_on_existence = 'exists', 'link', 'dir', 'file'
def __init__(self, path):
self.path = path
def dir(self):
raise NotImplementedError
def file(self):
raise NotImplementedError
def dotfile(self):
return self.path.basename.startswith('.')
def ext(self, arg):
if not arg.startswith('.'):
arg = '.' + arg
return self.path.ext == arg
def exists(self):
raise NotImplementedError
def basename(self, arg):
return self.path.basename == arg
def basestarts(self, arg):
return self.path.basename.startswith(arg)
def relto(self, arg):
return self.path.relto(arg)
def fnmatch(self, arg):
return self.path.fnmatch(arg)
def endswith(self, arg):
return str(self.path).endswith(arg)
def _evaluate(self, kw):
for name, value in kw.items():
invert = False
meth = None
try:
meth = getattr(self, name)
except AttributeError:
if name[:3] == 'not':
invert = True
try:
meth = getattr(self, name[3:])
except AttributeError:
pass
if meth is None:
raise TypeError(
"no %r checker available for %r" % (name, self.path))
try:
if py.code.getrawcode(meth).co_argcount > 1:
if (not meth(value)) ^ invert:
return False
else:
if bool(value) ^ bool(meth()) ^ invert:
return False
except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
# EBUSY feels not entirely correct,
# but its kind of necessary since ENOMEDIUM
# is not accessible in python
for name in self._depend_on_existence:
if name in kw:
if kw.get(name):
return False
name = 'not' + name
if name in kw:
if not kw.get(name):
return False
return True
class NeverRaised(Exception):
pass
class PathBase(object):
""" shared implementation for filesystem path objects."""
Checkers = Checkers
def __div__(self, other):
return self.join(fspath(other))
__truediv__ = __div__ # py3k
def basename(self):
""" basename part of path. """
return self._getbyspec('basename')[0]
basename = property(basename, None, None, basename.__doc__)
def dirname(self):
""" dirname part of path. """
return self._getbyspec('dirname')[0]
dirname = property(dirname, None, None, dirname.__doc__)
def purebasename(self):
""" pure base name of the path."""
return self._getbyspec('purebasename')[0]
purebasename = property(purebasename, None, None, purebasename.__doc__)
def ext(self):
""" extension of the path (including the '.')."""
return self._getbyspec('ext')[0]
ext = property(ext, None, None, ext.__doc__)
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
return self.new(basename='').join(*args, **kwargs)
def read_binary(self):
""" read and return a bytestring from reading the path. """
with self.open('rb') as f:
return f.read()
def read_text(self, encoding):
""" read and return a Unicode string from reading the path. """
with self.open("r", encoding=encoding) as f:
return f.read()
def read(self, mode='r'):
""" read and return a bytestring from reading the path. """
with self.open(mode) as f:
return f.read()
def readlines(self, cr=1):
""" read and return a list of lines from the path. if cr is False, the
newline will be removed from the end of each line. """
if sys.version_info < (3, ):
mode = 'rU'
else: # python 3 deprecates mode "U" in favor of "newline" option
mode = 'r'
if not cr:
content = self.read(mode)
return content.split('\n')
else:
f = self.open(mode)
try:
return f.readlines()
finally:
f.close()
def load(self):
""" (deprecated) return object unpickled from self.read() """
f = self.open('rb')
try:
import pickle
return py.error.checked_call(pickle.load, f)
finally:
f.close()
def move(self, target):
""" move this path to target. """
if target.relto(self):
raise py.error.EINVAL(
target,
"cannot move path into a subdirectory of itself")
try:
self.rename(target)
except py.error.EXDEV: # invalid cross-device link
self.copy(target)
self.remove()
def __repr__(self):
""" return a string representation of this path. """
return repr(str(self))
def check(self, **kw):
""" check a path for existence and properties.
Without arguments, return True if the path exists, otherwise False.
valid checkers::
file=1 # is a file
file=0 # is not a file (may not even exist)
dir=1 # is a dir
link=1 # is a link
exists=1 # exists
You can specify multiple checker definitions, for example::
path.check(file=1, link=1) # a link pointing to a file
"""
if not kw:
kw = {'exists': 1}
return self.Checkers(self)._evaluate(kw)
def fnmatch(self, pattern):
"""return true if the basename/fullname matches the glob-'pattern'.
valid pattern characters::
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
If the pattern contains a path-separator then the full path
is used for pattern matching and a '*' is prepended to the
pattern.
if the pattern doesn't contain a path-separator the pattern
is only matched against the basename.
"""
return FNMatcher(pattern)(self)
def relto(self, relpath):
""" return a string which is the relative part of the path
to the given 'relpath'.
"""
if not isinstance(relpath, (str, PathBase)):
raise TypeError("%r: not a string or path object" %(relpath,))
strrelpath = str(relpath)
if strrelpath and strrelpath[-1] != self.sep:
strrelpath += self.sep
#assert strrelpath[-1] == self.sep
#assert strrelpath[-2] != self.sep
strself = self.strpath
if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
if os.path.normcase(strself).startswith(
os.path.normcase(strrelpath)):
return strself[len(strrelpath):]
elif strself.startswith(strrelpath):
return strself[len(strrelpath):]
return ""
def ensure_dir(self, *args):
""" ensure the path joined with args is a directory. """
return self.ensure(*args, **{"dir": True})
def bestrelpath(self, dest):
""" return a string which is a relative path from self
(assumed to be a directory) to dest such that
self.join(bestrelpath) == dest and if not such
path can be determined return dest.
"""
try:
if self == dest:
return os.curdir
base = self.common(dest)
if not base: # can be the case on windows
return str(dest)
self2base = self.relto(base)
reldest = dest.relto(base)
if self2base:
n = self2base.count(self.sep) + 1
else:
n = 0
l = [os.pardir] * n
if reldest:
l.append(reldest)
target = dest.sep.join(l)
return target
except AttributeError:
return str(dest)
def exists(self):
return self.check()
def isdir(self):
return self.check(dir=1)
def isfile(self):
return self.check(file=1)
def parts(self, reverse=False):
""" return a root-first list of all ancestor directories
plus the path itself.
"""
current = self
l = [self]
while 1:
last = current
current = current.dirpath()
if last == current:
break
l.append(current)
if not reverse:
l.reverse()
return l
def common(self, other):
""" return the common part shared with the other path
or None if there is no common part.
"""
last = None
for x, y in zip(self.parts(), other.parts()):
if x != y:
return last
last = x
return last
def __add__(self, other):
""" return new path object with 'other' added to the basename"""
return self.new(basename=self.basename+str(other))
def __cmp__(self, other):
""" return sort value (-1, 0, +1). """
try:
return cmp(self.strpath, other.strpath)
except AttributeError:
return cmp(str(self), str(other)) # self.path, other.path)
def __lt__(self, other):
try:
return self.strpath < other.strpath
except AttributeError:
return str(self) < str(other)
def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
""" yields all paths below the current one
fil is a filter (glob pattern or callable), if not matching the
path will not be yielded, defaulting to None (everything is
returned)
rec is a filter (glob pattern or callable) that controls whether
a node is descended, defaulting to None
ignore is an Exception class that is ignoredwhen calling dirlist()
on any of the paths (by default, all exceptions are reported)
bf if True will cause a breadthfirst search instead of the
default depthfirst. Default: False
sort if True will sort entries within each directory level.
"""
for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
yield x
def _sortlist(self, res, sort):
if sort:
if hasattr(sort, '__call__'):
warnings.warn(DeprecationWarning(
"listdir(sort=callable) is deprecated and breaks on python3"
), stacklevel=3)
res.sort(sort)
else:
res.sort()
def samefile(self, other):
""" return True if other refers to the same stat object as self. """
return self.strpath == str(other)
def __fspath__(self):
return self.strpath
class Visitor:
def __init__(self, fil, rec, ignore, bf, sort):
if isinstance(fil, py.builtin._basestring):
fil = FNMatcher(fil)
if isinstance(rec, py.builtin._basestring):
self.rec = FNMatcher(rec)
elif not hasattr(rec, '__call__') and rec:
self.rec = lambda path: True
else:
self.rec = rec
self.fil = fil
self.ignore = ignore
self.breadthfirst = bf
self.optsort = sort and sorted or (lambda x: x)
def gen(self, path):
try:
entries = path.listdir()
except self.ignore:
return
rec = self.rec
dirs = self.optsort([p for p in entries
if p.check(dir=1) and (rec is None or rec(p))])
if not self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
for p in self.optsort(entries):
if self.fil is None or self.fil(p):
yield p
if self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
class FNMatcher:
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, path):
pattern = self.pattern
if (pattern.find(path.sep) == -1 and
iswin32 and
pattern.find(posixpath.sep) != -1):
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posixpath.sep, path.sep)
if pattern.find(path.sep) == -1:
name = path.basename
else:
name = str(path) # path.strpath # XXX svn?
if not os.path.isabs(pattern):
pattern = '*' + path.sep + pattern
return fnmatch.fnmatch(name, pattern)
| UK992/servo | tests/wpt/web-platform-tests/tools/third_party/py/py/_path/common.py | Python | mpl-2.0 | 14,626 | [
"VisIt"
] | d1910e325726000244f0c9d7eb892a826f2b10358e74fac1f837d4ef2d92ba99 |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2016 Malte Gotz
This contains mainly the DoseWidget class to display a dose in a QWidget
and acess its methods
ToDo:
save advanced settings?
ideas:
click and drag for selection of area
or click and drag moves center for new input style, and area selection for old
"""
#get ready for python 3
from __future__ import (print_function, division, absolute_import,
unicode_literals)
from collections import OrderedDict
import logging
import numpy as np
import os
import traceback
#enable compatibility to both pyqt4 and pyqt5 and load the proper modules
try:
if os.environ['QT_API'] == 'pyqt5':
from PyQt5.QtWidgets import (QWidget, QFileDialog, qApp)
from PyQt5 import QtCore
from matplotlib.backends.backend_qt5agg import FigureCanvas
else:
from PyQt4.QtGui import (QWidget, QFileDialog, qApp)
from PyQt4 import QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvas
except ImportError:
raise ImportError("dosewidget requires PyQt4 or PyQt5. "
"QT_API: {!s}".format(os.environ['QT_API']))
#load qt design UI, use relative import if run as a module
if os.environ["QT_API"] == "pyqt5":
from .dosewidget_ui_qt5 import Ui_DoseWidget
else:
from .dosewidget_ui_qt4 import Ui_DoseWidget
from .navtoolbar import MyNavigationToolbar
from matplotlib.figure import Figure
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.patches import Ellipse, Rectangle
from matplotlib.lines import Line2D
import matplotlib.ticker as ticker
from scipy.optimize import curve_fit
from scipy.ndimage import gaussian_filter, center_of_mass
# simple edit of additional settings and gui helper functions
from mg.pyguitools import EasyEditSettings, SimplePlotWindow, gui_save, gui_restore
#2D gauss fitting
from mg.dataprocessing import (gauss2D, fit_2D_gauss, cross, FitError, sg_2d_filter)
#the DoseArray from ebttools provides the core of the backend
from ebttools.core import DoseArray
class ScalarFormatterWithUnit(ticker.ScalarFormatter):
"""extension of the ScalarFormatter appending a unit to each tick label"""
def __init__(self, unit="Gy", useOffset=None, useMathText=None, useLocale=None):
self.unit=unit
super(ScalarFormatterWithUnit,self).__init__(useOffset, useMathText, useLocale)
def __call__(self, x, pos=None):
"""
Return the format for tick value `x` at position `pos`.
"""
if len(self.locs) == 0:
return ''
else:
s = self.pprint_val(x)+" "+self.unit
return self.fix_minus(s)
def gauss(x, A, x0, sigma, offset):
return A*np.exp(-np.square(x-x0)/(2*sigma**2))+offset
#define possibilities for color maps and filter for their availability
#on the current installation
import matplotlib.cm as cm
possibleCmapChoices = ["inferno","viridis","hot","gnuplot","spectral","jet",
"rainbow","gray","seismic"]
colorMapChoices = []
for cmap in possibleCmapChoices:
try:
cm.get_cmap(cmap)
colorMapChoices.append(cmap)
except ValueError:
pass
_advSettings = EasyEditSettings([("area stat linecolor","red"),
("area stat linewidth",2.0),
("isodose color","yellow"),
("isodose linewidth",2.0),
("isodose fontsize",14),
("label axes",True),
("color map",[0]+colorMapChoices),
("show grid",False),
("profile interpolation",
[0,"nearest","linear","spline"])])
_defaultSettings = _advSettings.get_settings()
class DoseWidget(QWidget):
"""Class to display a dose distribution
The DoseArray provided at construction is displayed in a matplotlib plot
embedded in a QWidget. The widget may be inserted into a PyQt UI as any
other widget would.
In addition it provides GUI interfaces to calculate and fit aspects of the
dose distribution by calling the methods of the DoseArray.
"""
def __init__(self, doseDistribution, settings=_defaultSettings,
calculationSettings = None, loadUI = False):
"""Constructor
Parameters
---------------
doseDistribtuion : DoseArray
A DoseArray object to be displayed here
settings : dict of the settings, optional
_defaultSettings is a dict of the settings used here, constructed
using the EasyEditSettings. See the _advSettings as to what keys the
dict should contain
calculationData : dict, optional
should contain the settings used to calculate the doseDistribution
from a film. It will be saved using the dict_keys as headers
loadUI: bool, optional
load saved ui settings
"""
QWidget.__init__(self)
#save local copy of dose
self.doseDistribution = doseDistribution
self.calculationSettings = calculationSettings
# Set up the user interface from Designer.
self.ui = Ui_DoseWidget()
self.ui.setupUi(self)
self.settings = settings
self.set_ui_limits()
# setup the selection of evaluation functions:
# key of first level dict is the combo box text (order dict so that they appear in defined order in combobox)
# each entry should be another dictionary with the elements eval pointing
# to the evaluation function, marker which is a function returning an appropriate
# marker artist and tip which is a tooltip string
functions = OrderedDict([("Rectangle",{"eval":self.rectangle,
"marker":self.rectangle_marker,
"tip":"rectangular selection area"}),
("Ellipse",{"eval":self.ellipse,
"marker":self.ellipse_marker,
"tip":"elliptical selection area"}),
("Profile",{"eval":self.profile,
"marker":self.line_marker,
"tip":"get a profile"}),
("Profile: Parabola Fit",{"eval":self.profile_with_parabola,
"marker":self.line_marker,
"tip":"get a profile and fit a parabola"}),
("Profile: Gauss Fit",{"eval":self.profile_with_gauss,
"marker":self.line_marker,
"tip":"get a profile and fit a gaussian"}),
("2D Gauss Fit",{"eval":self.fit_2D_gauss,
"marker":self.axis_parallel_rectangle_marker,
"tip":"fit the entire distribution with a 2D Gaussian"}),
("Find Maximum",{"eval":self.find_max,
"marker":self.axis_parallel_rectangle_marker,
"tip":"find the maximum and its location in a rectangular selection"}),
("Center of Mass",{"eval":self.center_of_mass,
"marker":self.axis_parallel_rectangle_marker,
"tip":"determine the center of mass in a rectangular selection"})])
#set up combobox with functions dictionary
for key in functions:
self.ui.evalFunction.addItem(key,functions[key])
idx = self.ui.evalFunction.count()-1
self.ui.evalFunction.setItemData(idx,functions[key]["tip"],
QtCore.Qt.ToolTipRole)
#the smoothing functions available
sFunc = OrderedDict([("Savitzky-Golay",{"name":"sg",
"settings":self.ui.sgSettingsLayout,
"tip":"Savitzky-Golay filter fits a polynomial of a given order to a number of points defined by the window size"}),
("Gaussian",{"name":"gauss",
"settings":self.ui.gaussSettingsLayout,
"tip":"Gauss filter averages an area weighted by the defined gaussian"})])
#list all the settings layouts. This is used for dynamic hiding and showing the relevant assests
self.smoothSettings = [self.ui.sgSettingsLayout,self.ui.gaussSettingsLayout]
for key in sFunc:
self.ui.smoothFunction.addItem(key,sFunc[key])
idx = self.ui.smoothFunction.count()-1
self.ui.smoothFunction.setItemData(idx,sFunc[key]["tip"],
QtCore.Qt.ToolTipRole)
#load UI before connecting slots to avoid needless on change firing
if loadUI:
self.load_ui_values()
#matplotlib frame setup
self.create_mplframe()
self.make_dose_plot()
#connect slots
#value changes
self.ui.xCenter.valueChanged.connect(self.ROI_value_change)
self.ui.yCenter.valueChanged.connect(self.ROI_value_change)
self.ui.width.valueChanged.connect(self.ROI_value_change)
self.ui.height.valueChanged.connect(self.ROI_value_change)
self.ui.angle.valueChanged.connect(self.ROI_value_change)
self.ui.evalFunction.currentIndexChanged.connect(self.ROI_value_change)
self.ui.x0.valueChanged.connect(self.ROI_value_change)
self.ui.x1.valueChanged.connect(self.ROI_value_change)
self.ui.y0.valueChanged.connect(self.ROI_value_change)
self.ui.y1.valueChanged.connect(self.ROI_value_change)
self.ui.smooth.stateChanged.connect(self.smooth_change)
self.ui.smoothFunction.currentIndexChanged.connect(self.smooth_combo_change)
self.ui.smoothOrder.valueChanged.connect(self.sg_value_change)
self.ui.smoothWindowSize.valueChanged.connect(self.sg_value_change)
self.ui.smoothSigma.valueChanged.connect(self.gauss_value_change)
qApp.focusChanged.connect(self.focus_change)
#buttons
self.ui.alternateSpecToggle.stateChanged.connect(self.toggle_ROI_spec)
self.ui.refreshButton.clicked.connect(self.refresh)
self.ui.bestLimits.clicked.connect(self.set_optimal_scale)
self.ui.calculateButton.clicked.connect(self.calculate)
self.ui.exportTxtButton.clicked.connect(self.save_as_txt)
self.ui.exportNpButton.clicked.connect(self.save_as_numpy)
self.ui.clearFitButton.clicked.connect(self.clear_2D_fit)
self.ui.showIsoLines.stateChanged.connect(self.isodose_change)
self.ui.browseSaveTable.clicked.connect(self.save_file_dialog)
self.ui.saveCalculationData.clicked.connect(self.save_calc_to_file)
#call some slots explicitly to properly apply loaded values
if loadUI:
self.toggle_ROI_spec()
self.isodose_change()
self.update_dose_plot()
self.update_marker()
self.smooth_change()
#initialize some variables
self.savePath = ""
self.centerMarker = []
##############################################################################
# setup methods for the window and update settings
def create_mplframe(self):
"""creates the matplotlib canvas and figure with colorbar
"""
### create matplotlib figure and canvas as central widget
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
divider = make_axes_locatable(self.ax1)
self.clbAxes = divider.append_axes("right", size="5%", pad=0.6)
self.canvas = FigureCanvas(self.fig)
self.toolbar = MyNavigationToolbar(self.canvas, None)
self.toolbar.centeredSelection = True
self.ui.imageLayout.addWidget(self.canvas)
self.ui.imageLayout.addWidget(self.toolbar)
#connect the toolbar selection to matploblib as a callback
self.canvas.mpl_connect('selection_changed',self.toolbar_selection)
def set_ui_limits(self):
"""set the limits of the various UI elements depending on the dose distribution
"""
#set dose limits
doseMax = np.max(self.doseDistribution)
self.ui.doseMin.setMaximum(doseMax*10.)
self.ui.doseMax.setMaximum(doseMax*10.)
self.ui.nominalDose.setMaximum(doseMax*10.)
self.ui.doseMin.setMinimum(0.0)
self.ui.doseMax.setMinimum(0.0)
self.ui.nominalDose.setMinimum(0.0)
self.set_optimal_scale()
#set ROI limits
xMax = self.doseDistribution.shape[1]/self.doseDistribution.DPC
yMax = self.doseDistribution.shape[0]/self.doseDistribution.DPC
self.ui.xCenter.setMaximum(xMax)
self.ui.xCenter.setMinimum(0.0)
self.ui.yCenter.setMaximum(yMax)
self.ui.yCenter.setMinimum(0.0)
self.ui.height.setMinimum(0.0)
self.ui.height.setMaximum(yMax)
self.ui.width.setMaximum(xMax)
self.ui.width.setMinimum(0.0)
self.ui.x0.setMinimum(0.0)
self.ui.x1.setMinimum(0.0)
self.ui.y0.setMinimum(0.0)
self.ui.y1.setMinimum(0.0)
self.ui.x0.setMaximum(xMax)
self.ui.x1.setMaximum(xMax)
self.ui.y0.setMaximum(yMax)
self.ui.y1.setMaximum(yMax)
self.ui.smoothWindowSize.setMaximum(max(self.doseDistribution.shape[1],
self.doseDistribution.shape[0]))
self.ui.smoothOrder.setMaximum(self.ui.smoothWindowSize.value()-1)
self.ui.smoothSigma.setMaximum(max(self.doseDistribution.shape[1],
self.doseDistribution.shape[0]))
def set_settings(self, settings):
self.settings = settings
def get_settings(self):
return self.settings
def load_ui_values(self):
"""load values of previously stored session"""
#create a QSettings object to store the settings
QtSettings=QtCore.QSettings("OncoRay","EBT Evaluation")
#load values for various elements
QtSettings.beginGroup("DoseWidget")
gui_restore(self.ui,QtSettings)
QtSettings.endGroup()
def save_ui_values(self):
"""save the settings of the GUI
"""
#create a QSettings object to store the settings
QtSettings=QtCore.QSettings("OncoRay","EBT Evaluation")
#save element content
QtSettings.beginGroup("DoseWidget")
gui_save(self.ui,QtSettings)
QtSettings.endGroup()
##############################################################################
# UI update methods
def get_iso_list(self):
text = self.ui.isoListField.toPlainText()
textList = text.split("\n")
isoLines = []
for line in textList:
try:
value = float(line)
if value < 0:
raise ValueError
isoLines.append(value)
except ValueError:
logging.error("{!s} is not a senisble percentage".format(line))
isoLines.sort()
return np.array(isoLines)
def match_ui_inputs(self,newIsMaster=True):
"""caculate the old style ROI specification from the new or vice versa
depending on who is master
"""
idx = self.ui.evalFunction.currentIndex()
currentMarkerFunction = self.ui.evalFunction.itemData(idx)["marker"]
if newIsMaster:
if currentMarkerFunction == self.line_marker:
phi_rad = self.ui.angle.value()*np.pi/180.
x0 = self.ui.xCenter.value() - np.cos(phi_rad)*self.ui.width.value()/2.0
y0 = self.ui.yCenter.value() + np.sin(phi_rad)*self.ui.width.value()/2.0
x1 = self.ui.xCenter.value() + np.cos(phi_rad)*self.ui.width.value()/2.0
y1 = self.ui.yCenter.value() - np.sin(phi_rad)*self.ui.width.value()/2.0
else:
x0 = self.ui.xCenter.value()-self.ui.width.value()/2.0
x1 = self.ui.xCenter.value()+self.ui.width.value()/2.0
y0 = self.ui.yCenter.value()-self.ui.height.value()/2.0
y1 = self.ui.yCenter.value()+self.ui.height.value()/2.0
#update but block triggering of further updates
self.ui.x0.blockSignals(True)
self.ui.x0.setValue(x0)
self.ui.x0.blockSignals(False)
self.ui.x1.blockSignals(True)
self.ui.x1.setValue(x1)
self.ui.x1.blockSignals(False)
self.ui.y0.blockSignals(True)
self.ui.y0.setValue(y0)
self.ui.y0.blockSignals(False)
self.ui.y1.blockSignals(True)
self.ui.y1.setValue(y1)
self.ui.y1.blockSignals(False)
else:
xCenter = (self.ui.x0.value()+self.ui.x1.value())/2.0
yCenter = (self.ui.y0.value()+self.ui.y1.value())/2.0
if currentMarkerFunction == self.line_marker:
width = np.hypot(self.ui.x0.value()-self.ui.x1.value(),
self.ui.y0.value()-self.ui.y1.value())
height = 0.0
try:
angle = np.arctan((self.ui.y0.value()-self.ui.y1.value())/
(self.ui.x1.value()-self.ui.x0.value()))*180./np.pi
except ZeroDivisionError:
angle = 90.0
else:
width = np.abs(self.ui.x0.value()-self.ui.x1.value())
height = np.abs(self.ui.y0.value()-self.ui.y1.value())
angle = 0.0
#update but block triggering of further updates
elements = [self.ui.xCenter,self.ui.yCenter,self.ui.width,
self.ui.height,self.ui.angle]
for element in elements:
element.blockSignals(True)
self.ui.xCenter.setValue(xCenter)
self.ui.yCenter.setValue(yCenter)
self.ui.width.setValue(width)
self.ui.height.setValue(height)
self.ui.angle.setValue(angle)
for element in elements:
element.blockSignals(False)
def make_dose_plot(self):
"""draw the dose distribution in the plot
"""
shape = self.doseDistribution.shape
yMax = shape[0]/self.doseDistribution.DPC
xMax = shape[1]/self.doseDistribution.DPC
#smooth if desired
if self.ui.smooth.isChecked():
if not hasattr(self,"origDoseDistribution"): #save original dose distribution, if not already saved
self.origDoseDistribution = self.doseDistribution
#create a smoothed array of the original dose and then cast it to an dose array
#then copy the DPC and unit from the old array, there should be a more comfortable way....
self.doseDistribution = self.smooth_dose(self.origDoseDistribution).view(DoseArray)
self.doseDistribution.DPC = self.origDoseDistribution.DPC
self.doseDistribution.unit = self.origDoseDistribution.unit
elif hasattr(self,"origDoseDistribution"):#restor original dose distribution if smooth is unchecked
self.doseDistribution = self.origDoseDistribution
del self.origDoseDistribution
self.ax1.cla()
#plot the dose distrubtion
self.dosePlot = self.ax1.imshow(self.doseDistribution,
interpolation="nearest",
extent=[0,xMax,yMax,0],
zorder=-1)#image should be lowest zorder
self.clb = self.fig.colorbar(self.dosePlot, cax = self.clbAxes,
orientation="vertical",
format=ScalarFormatterWithUnit(unit=self.doseDistribution.unit))
self.ax1.minorticks_on()
for axis in ['top','bottom','left','right']:
self.ax1.spines[axis].set_linewidth(2.0)
self.ax1.tick_params(which='major',direction="out",width=2.0,length=6,
bottom=True,top=True,left=True,right=True,
labelbottom=True,labeltop=True,labelleft=True,labelright=True)
self.ax1.tick_params(which='minor',direction="out",width=1.5,length=4,
bottom=True,top=True,left=True,right=True)
self.update_dose_plot()
def plot_isodose(self):
percentages = self.get_iso_list()
if len(percentages) == 0:
return None
shape = self.doseDistribution.shape
yMax = shape[0]/self.doseDistribution.DPC
xMax = shape[1]/self.doseDistribution.DPC
levels = percentages*self.ui.nominalDose.value()/100.
cPlot = self.ax1.contour(self.doseDistribution,
levels = levels,
colors=self.settings["isodose color"],
linewidths=self.settings["isodose linewidth"],
origin='image',
extent=[0,xMax,yMax,0],
zorder=0)#set relatively low zorder, so they are just above the image
labels = {}
for level, percentage in zip(cPlot.levels, percentages):
labels[level] = "{:.0f}".format(percentage)
cLabels = self.ax1.clabel(cPlot,fmt=labels,
fontsize= self.settings["isodose fontsize"])
return (cPlot, cLabels)
def set_center(self, xCenter, yCenter):
"""sets the center of the ROI
takes care of dealing with the different possible selection schemes"""
for element, value in zip((self.ui.xCenter,self.ui.yCenter),(xCenter,yCenter)):
#ensure elements are enabled, update the values without signals
element.setEnabled(True)
element.blockSignals(True)
element.setValue(value)
element.blockSignals(False)
#keep enabled or disable based on the setting alternate spec
element.setEnabled((not self.ui.alternateSpecToggle.isChecked()))
#match inputs with the center as master and update the ROI marker
self.match_ui_inputs(newIsMaster=True)
self.update_marker()
def smooth_dose(self, originalDose):
idx = self.ui.smoothFunction.currentIndex()
name = self.ui.smoothFunction.itemData(idx)["name"]
if name == "sg":
smoothedDose = sg_2d_filter(originalDose,
self.ui.smoothWindowSize.value(),
self.ui.smoothOrder.value(),
derivative=None)
elif name == "gauss":
smoothedDose = gaussian_filter(originalDose,
self.ui.smoothSigma.value())
else:
logging.error("unkown smoothing function with name "+name)
smoothedDose = originalDose
return smoothedDose
def update_dose_plot(self):
"""set limits and colormap of the dose plot
"""
#set limits of the color map
self.dosePlot.set_clim(self.ui.doseMin.value(),
self.ui.doseMax.value())
#set the colormap and add it to the plot
self.dosePlot.set_cmap(self.settings["color map"])
self.clb.update_normal(self.dosePlot)
#isodoses
#remove old isodoses if there are any
if hasattr(self,"contourPlot"):
for coll, label in zip(self.contourPlot[0].collections, self.contourPlot[1]):
coll.remove()
label.remove()
del self.contourPlot
if self.ui.showIsoLines.isChecked():
cPlot = self.plot_isodose()
if cPlot is not None:
self.contourPlot = cPlot
#labels and grid
if self.settings["label axes"]:
self.ax1.set_xlabel("x-position in cm")
self.ax1.set_ylabel("y-position in cm")
else:
self.ax1.set_xlabel("")
self.ax1.set_ylabel("")
self.ax1.grid(self.settings["show grid"])
def update_marker(self):
#try removing old marker, attribute error is raised when the variable is
#not set and value error when it has already been removed
if hasattr(self,"area_marker"):
try:
self.area_marker.remove()
except (ValueError) as e:
logging.debug("ignored "+str(e))
#get the appropriate new marker artist
idx = self.ui.evalFunction.currentIndex()
artist = self.ui.evalFunction.itemData(idx)["marker"]()
#add new marker to plot
if artist != None:
self.area_marker = self.ax1.add_artist(artist)
self.canvas.draw()
##############################################################################
# markers of the ROI. Each evaluation methods specificies one of these to mark
# the region of interest and they are then called by the update_marker method
# whenever the input changes
def axis_parallel_rectangle_marker(self):
"""rectangle marker that forces axis parallelity
"""
if self.ui.angle.value() != 0.0:
logging.warning("this ROI is axis parallel only and does not support rotation")
#if signals are not blocked and the angle is forced to zero
#going back to some non zero angle causes the marker gets somehow duplicated
self.ui.angle.blockSignals(True)
self.ui.angle.setValue(0.0)
self.ui.angle.blockSignals(False)
return self.rectangle_marker()
def ellipse_marker(self):
"""returns a matplotlib artist for an ellipse
"""
#create a marker
artist = Ellipse((self.ui.xCenter.value(),self.ui.yCenter.value()),
self.ui.width.value(),self.ui.height.value(),
angle=-self.ui.angle.value(),
color=self.settings["area stat linecolor"],
linewidth=self.settings["area stat linewidth"],
fill=False)
return artist
def line_marker(self):
"""returns a matplotlib artist for a line
"""
#use the convieniently already calculated old style coordinates for the line
artist = Line2D([self.ui.x0.value(),self.ui.x1.value()],
[self.ui.y0.value(),self.ui.y1.value()],
color=self.settings["area stat linecolor"],
linewidth=self.settings["area stat linewidth"])
return artist
def no_marker(self):
return None
def rectangle_marker(self):
"""return a matplotlib artist for a rectangle
"""
#calculate the lower left corner for the given center, width, height and angle
phi_rad = self.ui.angle.value()*np.pi/180.
lowerLeftY = (self.ui.yCenter.value() -
(self.ui.height.value()/2.0*np.cos(phi_rad) -
self.ui.width.value()/2.0*np.sin(phi_rad)))
lowerLeftX = (self.ui.xCenter.value() -
(self.ui.width.value()/2.0*np.cos(phi_rad) +
self.ui.height.value()/2.0*np.sin(phi_rad)))
#create a rectangle artist
artist = Rectangle((lowerLeftX,lowerLeftY),
self.ui.width.value(),
self.ui.height.value(),
angle=-self.ui.angle.value(),
color=self.settings["area stat linecolor"],
linewidth=self.settings["area stat linewidth"],
fill=False)
return artist
##############################################################################
# evaluation methods which are called upon when clicking calcualte
def rectangle(self):
"""calculate and print the stats for a rectangle area
"""
stats = self.doseDistribution.rectangle_stats(self.ui.xCenter.value(),
self.ui.yCenter.value(),
self.ui.width.value()/2.0,
self.ui.height.value()/2.0,
self.ui.angle.value())
logging.debug(stats)
logging.info("### Statistics for rectangle ###")
logging.info("sum: {:.4e} Gy*cm^2".format(stats[0]/self.doseDistribution.DPC**2))
logging.info("average: {:.4e} Gy".format(stats[1]))
logging.info("standard deviation: {:.4e} Gy".format(stats[2]))
logging.info("minimum: {:.4e} Gy".format(stats[3]))
logging.info("maximum: {:.4e} Gy".format(stats[4]))
logging.info("--------------------------------------------------------------")
return stats
def center_of_mass(self):
"""calculate the center of mass
"""
# get resolution and calculate ROI
DPC = self.doseDistribution.DPC
xlim = sorted([int(self.ui.x0.value()*DPC),int(self.ui.x1.value()*DPC)])
ylim = sorted([int(self.ui.y0.value()*DPC),int(self.ui.y1.value()*DPC)])
#create a label array for ndimage:
label = np.zeros_like(self.doseDistribution,dtype=np.uint8)
label[ylim[0]:ylim[1],xlim[0]:xlim[1]]=1
loc = center_of_mass(self.doseDistribution,labels=label,index=1)
yPos = (loc[0]+0.5)/DPC
xPos = (loc[1]+0.5)/DPC
logging.info("### Center of Mass ###")
logging.info("location x; y: {:.4e}; {:.4e}".format(yPos, xPos))
logging.info("--------------------------------------------------------------")
self.centerMarker.append(self.ax1.scatter(xPos, yPos, s=100, marker = "+",
c=self.settings["area stat linecolor"]))
#use the results as input, if desired
if self.ui.useAsCenter.isChecked():
self.set_center(xPos, yPos)
self.canvas.draw()
def ellipse(self):
"""calculate and print the stats for an ellipse area
"""
#get the stats
stats = self.doseDistribution.ellipse_stats(self.ui.xCenter.value(),
self.ui.yCenter.value(),
self.ui.width.value()/2.0,
self.ui.height.value()/2.0,
self.ui.angle.value())
logging.info("### Statistics for ellipse ###")
logging.info("sum: {:.4e} Gy*cm^2".format(stats[0]/self.doseDistribution.DPC**2))
logging.info("average: {:.4e} Gy".format(stats[1]))
logging.info("standard deviation: {:.4e} Gy".format(stats[2]))
logging.info("minimum: {:.4e} Gy".format(stats[3]))
logging.info("maximum: {:.4e} Gy".format(stats[4]))
logging.info("--------------------------------------------------------------")
return stats
def find_max(self):
"""find the max and its coordinates in the ROI"""
# get resolution and calculate ROI
DPC = self.doseDistribution.DPC
xlim = sorted([int(self.ui.x0.value()*DPC),int(self.ui.x1.value()*DPC)])
ylim = sorted([int(self.ui.y0.value()*DPC),int(self.ui.y1.value()*DPC)])
#slicing should only create a view and not copy any data (no/very small memory cost)
selection = self.doseDistribution[ylim[0]:ylim[1],xlim[0]:xlim[1]]
maximum = float(selection.max())
maxLocs = np.argwhere(selection==maximum)
if len(maxLocs) > 1:
logging.warning("maximum location not unique, "
+"averaging the determined maxima")
loc = maxLocs.mean(axis=0)
else:
loc = maxLocs[0]
yPos, xPos = (loc+[ylim[0],xlim[0]]+0.5)/DPC
logging.info("### Maximum determination ###")
logging.info("max: {:.4e}".format(maximum))
logging.info("location x; y: {:.4e}; {:.4e}".format(yPos, xPos))
logging.info("--------------------------------------------------------------")
self.centerMarker.append(self.ax1.scatter(xPos, yPos, s=100, marker = "+",
c=self.settings["area stat linecolor"]))
#use the results as input, if desired
if self.ui.useAsMax.isChecked():
self.ui.doseMax.setValue(maximum)
if self.ui.useAsCenter.isChecked():
self.set_center(xPos, yPos)
self.canvas.draw()
def profile(self):
"""get a profile of the image
"""
#construct the window
windowName = "profile ({:.3e},{:.3e}) - ({:.3e},{:.3e})".format(self.ui.x0.value(),
self.ui.y0.value(),
self.ui.x1.value(),
self.ui.y1.value())
self.profileWindow = SimplePlotWindow(name=windowName)
#get the x and y profile data and plot it
y = self.doseDistribution.profile(self.ui.x0.value(),
self.ui.y0.value(),
self.ui.x1.value(),
self.ui.y1.value(),
interpolation=self.settings["profile interpolation"])
x = np.linspace(0,self.ui.width.value(),len(y))
self.profileWindow.ax1.plot(x,y,label="profile")
#show the window
self.profileWindow.show()
def profile_with_parabola(self):
"""get a profile of the image and fit it with a 2nd order polynomial
"""
#construct the window
windowName = "profile ({:.3e},{:.3e}) - ({:.3e},{:.3e})".format(self.ui.x0.value(),
self.ui.y0.value(),
self.ui.x1.value(),
self.ui.y1.value())
self.profileWindow = SimplePlotWindow(name=windowName)
#get the x and y profile data and plot it
y = self.doseDistribution.profile(self.ui.x0.value(),
self.ui.y0.value(),
self.ui.x1.value(),
self.ui.y1.value(),
interpolation=self.settings["profile interpolation"])
x = np.linspace(0,self.ui.width.value(),len(y))
self.profileWindow.ax1.plot(x,y,label="profile")
#fit, construct and plot function
p = np.polyfit(x,y,2)
func = np.poly1d(p)
fittedY = func(x)
self.profileWindow.ax1.plot(x,fittedY,label="fit")
#log the results
logging.info("### Fit results ###")
logging.info("y = {:.4e}*x^2 + {:.4e}*x + {:.4e}".format(*p))
self.log_fit_points_of_interest(x,y,fittedY)
logging.info("--------------------------------------------------------------")
self.profileWindow.show()
def profile_with_gauss(self):
"""get a profile of the image and fit it with a gaussian
"""
#construct the window
windowName = "profile ({:.3e},{:.3e}) - ({:.3e},{:.3e})".format(self.ui.x0.value(),
self.ui.y0.value(),
self.ui.x1.value(),
self.ui.y1.value())
self.profileWindow = SimplePlotWindow(name=windowName)
#get the x and y profile data and plot it
y = self.doseDistribution.profile(self.ui.x0.value(),
self.ui.y0.value(),
self.ui.x1.value(),
self.ui.y1.value(),
interpolation=self.settings["profile interpolation"])
x = np.linspace(0,self.ui.width.value(),len(y))
self.profileWindow.ax1.plot(x,y,label="profile")
#make some educated guesses for start parameters
center = (y*x).sum()/y.sum() #expected value
width = ((x - center)**2).sum()/len(x)
p0 = [float((np.max(y)-np.min(y))),float(center),float(width),
float(np.min(y))]
logging.debug("Parameter guess: {:.4e}, {:.4e}, {:.4e}, {:.4e}".format(*p0))
#fit and plot function
p, cov, info, msg, success = curve_fit(gauss,x,y,p0=p0, full_output=True)
if success != 1 and success != 2 and success != 3 and success !=4:
logging.error("Fit failed with message: "+msg)
elif cov is None:
logging.error("None covariance matrix after {:d} iterations".format(info["nfev"]))
else:
fittedY = gauss(x,*p)
self.profileWindow.ax1.plot(x,fittedY,label="fit")
#log the results
logging.info("### Fit results ###")
logging.info("y = A*exp(-(x-x0)/2*sigma^2) + offset".format(*p))
logging.info("A = {:.4e} +- {:.4e}".format(p[0],np.sqrt(cov[0][0])))
logging.info("x0 = {:.4e} +- {:.4e}".format(p[1],np.sqrt(cov[1][1])))
logging.info("sigma = {:.4e} +- {:.4e}".format(p[2],np.sqrt(cov[2][2])))
logging.info("offset = {:.4e} +- {:.4e}".format(p[3],np.sqrt(cov[3][3])))
self.log_fit_points_of_interest(x,y,fittedY)
logging.info("--------------------------------------------------------------")
self.profileWindow.show()
def log_fit_points_of_interest(self,x, y, fittedY):
"""print the results of a 1D fit on a profile
outputs maximum and left and right edge of the profile and fit
"""
dataMax = float(np.max(y)) #don't know why it's not float already, but it is not
fitMax = np.max(fittedY)
logging.info("points of interest of the fit (profile data)")
logging.info("max / position: {:.4e} ({:.4e}) / "
"{:.4e} ({:.4e})".format(fitMax, dataMax,
x[np.argmax(fittedY)],
x[np.argmax(y)]))
logging.info("left edge / % of max: {:.4e} ({:.4e}) / "
"{:.3f} ({:.3f})".format(fittedY[0],y[0],
100.*fittedY[0]/fitMax,
100.*y[0]/dataMax))
logging.info("right edge / % of max: {:.4e} ({:.4e}) / "
"{:.3f} ({:.3f})".format(fittedY[-1],y[-1],
100.*fittedY[-1]/fitMax,
100.*y[-1]/dataMax))
def fit_2D_gauss(self):
# get resolution and calculate ROI
DPC = self.doseDistribution.DPC
xlim = sorted([int(self.ui.x0.value()*DPC),int(self.ui.x1.value()*DPC)])
ylim = sorted([int(self.ui.y0.value()*DPC),int(self.ui.y1.value()*DPC)])
""" debug code to check if ROI works
self.testWindow = SimplePlotWindow(name="test")
self.testWindow.ax1.imshow(self.doseDistribution[ylim[0]:ylim[1],
xlim[0]:xlim[1]])
self.testWindow.show()
"""
try:
popt, cov = fit_2D_gauss(self.doseDistribution[ylim[0]:ylim[1],
xlim[0]:xlim[1]],
useRotation=False)
#calculate center coordinates in the original scale
#+0.5 to use the center of the pixel (0th pixel is at 0.5/DPC)
xCenter = (popt[1]+xlim[0]+0.5)/DPC
yCenter = (popt[2]+ylim[0]+0.5)/DPC
logging.info("### Results of 2D Guassian fit ###")
logging.info("A*exp(-(x-x0)^2/(2*sigmaX^2)-(y-y0)^2/(2*sigmaY^2)) + offset")
logging.info("A = {:.4e} +- {:.4e}".format(popt[0],np.sqrt(cov[0][0])))
logging.info(("x0/y0 = {:.4e}/{:.4e} +- {:.4e}/{:.4e}"+
"").format(xCenter,yCenter,
np.sqrt(cov[1][1])/DPC,np.sqrt(cov[2][2])/DPC))
logging.info(("simgaX/sigmaY = {:.4e}/{:.4e} +- {:.4e}/{:.4e}"+
"").format(popt[3]/DPC,popt[4]/DPC,np.sqrt(cov[3][3])/DPC,np.sqrt(cov[4][4])/DPC))
logging.info("offset = {:.4e} +- {:.4e}".format(popt[5],np.sqrt(cov[5][5])))
logging.info("--------------------------------------------------------------")
#create a 2D Gauss function using the fitted parameters
gauss = gauss2D(*popt)
#clear old contour and make new
self.clear_2D_fit()
#use the limits to create a grid as coordinates for the plot,
#imshow uses 1st index as y, therefor y comes first
#use extent to align the contour with the original plot
self.contour = self.ax1.contour(gauss(*np.mgrid[0:ylim[1]-ylim[0],0:xlim[1]-xlim[0]]),
extent=(xlim[0]/DPC, xlim[1]/DPC,
ylim[0]/DPC, ylim[1]/DPC))
centerArtist = cross(xCenter,yCenter,
popt[3]/DPC,popt[4]/DPC,
self.settings["area stat linecolor"])
self.centerMarker.append(self.ax1.add_artist(centerArtist))
#use the results as input, if desired
if self.ui.useAsMax.isChecked():
self.ui.doseMax.setValue(popt[0])
if self.ui.useAsCenter.isChecked():
self.set_center(xCenter, yCenter)
self.canvas.draw()
except FitError as e:
logging.error("error fitting 2D Gaussian: "+str(e))
###############################################################################
# slots
def calculate(self):
idx = self.ui.evalFunction.currentIndex()
try:
calcResult = self.ui.evalFunction.itemData(idx)["eval"]()
return calcResult
except ValueError as e:
logging.error("Value Error: "+str(e))
logging.debug(traceback.format_exc().replace("\n"," - "))
logging.error("check evaluation method and ROI")
except Exception as e:
logging.critical("This should not happen - Excpetion in calculation")
logging.critical(traceback.format_exc().replace("\n"," - "))
def clear_2D_fit(self):
if hasattr(self,"contour"):
try:
for coll in self.contour.collections:
coll.remove()
del self.contour
except ValueError as e:
logging.debug("excepted: "+str(e))
#clear all the markers
empty = False
while(not empty):
try:
marker = self.centerMarker.pop()
marker.remove()
except IndexError:
empty = True
self.canvas.draw()
#three callbacks to connect to matplotlib button press events
#they put the click coordinates into the input fields
def click_to_center(self, event):
if self.toolbar.mode == "" and event.inaxes != None:
self.ui.xCenter.setValue(event.xdata)
self.ui.yCenter.setValue(event.ydata)
def click_to_pos0(self, event):
if self.toolbar.mode == "" and event.inaxes != None:
self.ui.x0.setValue(event.xdata)
self.ui.y0.setValue(event.ydata)
def click_to_pos1(self, event):
if self.toolbar.mode == "" and event.inaxes != None:
self.ui.x1.setValue(event.xdata)
self.ui.y1.setValue(event.ydata)
#catch if the focus leaves or enters a certain widget and connect matplotlib
#button press callbacks accordingly
def focus_change(self, old, new):
if (old == self.ui.xCenter or old == self.ui.yCenter or
old == self.ui.x0 or old == self.ui.y0 or
old == self.ui.x1 or old == self.ui.y1):
self.canvas.mpl_disconnect(self.cid)
if new == self.ui.xCenter or new == self.ui.yCenter:
self.cid = self.canvas.mpl_connect('button_press_event',
self.click_to_center)
elif new == self.ui.x0 or new == self.ui.y0:
self.cid = self.canvas.mpl_connect('button_press_event',
self.click_to_pos0)
elif new == self.ui.x1 or new == self.ui.y1:
self.cid = self.canvas.mpl_connect('button_press_event',
self.click_to_pos1)
def gauss_value_change(self):
self.redrawDose = True
def isodose_change(self):
self.ui.nominalDose.setEnabled(self.ui.showIsoLines.isChecked())
self.ui.isoListField.setEnabled(self.ui.showIsoLines.isChecked())
def refresh(self):
#remake the doseplot if flag was set, otherwise do minimal update
if self.redrawDose:
self.make_dose_plot()
self.redrawDose = False
else:
self.update_dose_plot()
self.canvas.draw()
def ROI_value_change(self):
self.match_ui_inputs((not self.ui.alternateSpecToggle.isChecked()))
self.update_marker()
def save_as_numpy(self):
self.savePath = QFileDialog.getSaveFileName(self,
caption = "select a save file",
directory = self.savePath,
filter="Numpy files (*.npy);;All files (*)")
#in pyqt5 a tuple is returned, unpack it
if os.environ['QT_API'] == 'pyqt5':
self.savePath, _ = self.savePath
if self.savePath != "":
np.save(self.savePath,self.doseDistribution)
else:
logging.debug("save canceled")
def save_as_txt(self):
self.savePath = QFileDialog.getSaveFileName(self,
caption = "select a save file",
directory = self.savePath,
filter="Text files (*.txt);;All files (*)")
#in pyqt5 a tuple is returned, unpack it
if os.environ['QT_API'] == 'pyqt5':
self.savePath, _ = self.savePath
if self.savePath != "":
np.savetxt(self.savePath,self.doseDistribution,delimiter="\t")
else:
logging.debug("save canceled")
def save_calc_to_file(self):
#check if selected evaluation is compatible
idx = self.ui.evalFunction.currentIndex()
evalFunction = self.ui.evalFunction.itemText(idx)
if evalFunction not in ('Rectangle','Ellipse'):
logging.error(evalFunction + ' not supported for saving to file, '
'select Rectangle or Ellipse')
return
filePath = self.ui.saveTablePath.text()
if filePath == '':
logging.error("please specifiy file path")
return
#calculate the values
stats = self.calculate()
#names of the value isn correct order
statNames = ["sum",
"avg",
"std",
"min",
"max"]
#names of the ui fields to be saved
uiElementNames = ["xCenter",
"yCenter",
"height",
"width",
"angle"]
#combine the names of everything that should be saved
header = ["Film No."] + statNames + ['area type'] + uiElementNames
#list of empty strings to take the data
saveContent = ['']*len(header)
saveContent[header.index('Film No.')] = self.ui.filmNumber.text()
#put values from the stats in the correct list position according to their name
for (name, value) in zip(statNames, stats):
saveContent[header.index(name)] = str(value)
saveContent[header.index('area type')] = evalFunction
#get values from the desired ui fields and put them in the list
for name in uiElementNames:
element = getattr(self.ui,name)
saveContent[header.index(name)] = str(element.value())
#add the data from the dose calculation, if present
if self.calculationSettings is not None:
keys = list(self.calculationSettings.keys())
keys.sort()
for key in keys:
header.append(key)
saveContent.append(str(self.calculationSettings[key]))
#create strings
headerStr = "\t".join(header)+"\n"
saveStr = "\t".join(saveContent)+"\n"
try:
if os.path.isfile(filePath):
with open(filePath,mode="a") as saveFile:
saveFile.write(saveStr)
logging.info(("info for "+self.ui.filmNumber.text()+" written to file"))
else:
with open(filePath,mode="w") as saveFile:
saveFile.write(headerStr)
saveFile.write(saveStr)
logging.info(("info for "+self.ui.filmNumber.text()+" written to file"))
except (OSError, IOError) as e:
logging.error("failed to write to file"+filePath)
logging.debug("Error: "+str(e))
def save_file_dialog(self):
filePath =QFileDialog.getSaveFileName(self,
caption = 'select a file to save to',
directory = self.ui.saveTablePath.text(),
options = QFileDialog.DontConfirmOverwrite)
#in pyqt5 a tuple is returned, unpack it
if os.environ['QT_API'] == 'pyqt5':
filePath, _ = filePath
if filePath != '':
self.ui.saveTablePath.setText(filePath)
else:
logging.info('file selection canceled')
def set_optimal_scale(self):
doseMax = np.max(self.doseDistribution)
self.ui.doseMin.setValue(0.0)
self.ui.doseMax.setValue(doseMax)
self.ui.nominalDose.setValue(doseMax)
def smooth_change(self):
if self.ui.smooth.isChecked():
self.ui.smoothFunction.setEnabled(True)
self.smooth_combo_change()
else:
self.ui.smoothFunction.setDisabled(True)
#hide everything
for layout in self.smoothSettings:
for i in range(layout.count()):
try:
layout.itemAt(i).widget().hide()
except AttributeError as e:
logging.debug(str(e))
self.redrawDose = True
def smooth_combo_change(self):
#hide everything
for layout in self.smoothSettings:
for i in range(layout.count()):
try:
layout.itemAt(i).widget().hide()
except AttributeError as e:
logging.debug(str(e))
#show the settings relevant to the current function
idx = self.ui.smoothFunction.currentIndex()
currentSettings = self.ui.smoothFunction.itemData(idx)["settings"]
for i in range(currentSettings.count()):
try:
currentSettings.itemAt(i).widget().show()
except AttributeError as e:
logging.debug(str(e))
def sg_value_change(self):
windowSize = self.ui.smoothWindowSize.value()
if (windowSize % 2) == 0:
self.ui.smoothWindowSize.setValue(windowSize+1)
self.ui.smoothOrder.setMaximum(self.ui.smoothWindowSize.value()-1)
self.redrawDose = True
def toggle_ROI_spec(self):
"""switch between the two blocks of defining the ROI
"""
if self.ui.alternateSpecToggle.isChecked():
self.ui.x0.setEnabled(True)
self.ui.y0.setEnabled(True)
self.ui.x1.setEnabled(True)
self.ui.y1.setEnabled(True)
self.ui.xCenter.setDisabled(True)
self.ui.yCenter.setDisabled(True)
self.ui.height.setDisabled(True)
self.ui.width.setDisabled(True)
self.ui.angle.setDisabled(True)
self.toolbar.centeredSelection=False
else:
self.ui.x0.setDisabled(True)
self.ui.y0.setDisabled(True)
self.ui.x1.setDisabled(True)
self.ui.y1.setDisabled(True)
self.ui.xCenter.setEnabled(True)
self.ui.yCenter.setEnabled(True)
self.ui.height.setEnabled(True)
self.ui.width.setEnabled(True)
self.ui.angle.setEnabled(True)
self.toolbar.centeredSelection=True
self.ROI_value_change()
def toolbar_selection(self):
#get selection
selection = self.toolbar.get_selection()
#check for ROI specification scheme
if self.ui.alternateSpecToggle.isChecked(): #with simple corners
#tuple of elements that need updating (in same order as selection)
elements = (self.ui.x0,self.ui.y0,self.ui.x1,self.ui.y1)
#block the signals from the elements while updating, then call
#the update slot manually. (avoids circular and repeated updates)
for element, value in zip(elements, selection):
element.blockSignals(True)
element.setValue(value)
element.blockSignals(False)
self.ROI_value_change()
else: #with width and center, needs additional calculation
#returned values should be ordered (lower value x0, highvalue x1)
centerX = (selection[2] + selection[0])/2.0
centerY = (selection[3] + selection[1])/2.0
#simplify the angle possibilities by using the cyclic nature of rotation
#and convert to rad
angle = np.pi*(self.ui.angle.value()%180.)/180.
deltaX = selection[2] - selection[0]
deltaY = selection[3] - selection[1]
if angle >= np.pi/2.0: #greater than 90 is the same as smaller, but switching widht and height
angle = angle%(np.pi/2.0)
switch = True
else:
switch = False
if angle == 0: # zero is easy
width = deltaX
height = deltaY
#everything else needs special conditions, because there will not
#exist a rotated rectangle for all possible combinations of angles
#and widht and height selected
else:
if angle <= np.pi/4.0:
deltaY = max(deltaY,np.tan(angle)*deltaX)
deltaX = max(deltaX,np.tan(angle)*deltaY)
else:#i.e. pi/4 < angle < pi/2
deltaY = max(deltaY,deltaY/np.tan(angle))
deltaX = max(deltaX,deltaY/np.tan(angle))
height = ((np.cos(angle)*deltaY-np.sin(angle)*deltaX) /
(np.cos(angle)**2-np.sin(angle)**2))
width = ((np.cos(angle)*deltaX-np.sin(angle)*deltaY) /
(np.cos(angle)**2-np.sin(angle)**2))
if switch:
width, height = height, width #amazingly, this works, yeah python
for element, value in zip((self.ui.xCenter,self.ui.yCenter,
self.ui.height, self.ui.width),
(centerX, centerY, height, width)):
element.blockSignals(True)
element.setValue(value)
element.blockSignals(False)
self.ROI_value_change()
| mgotz/EBT_evaluation | ebttools/gui/dosewidget.py | Python | mit | 60,487 | [
"Gaussian"
] | b2e68fafe2851b504ad591dab8542fddbc9a9cdd98f9169227b38ec0ffe67fab |
import numpy as np
from ase import Atoms
from ase.calculators.calculator import kptdensity2monkhorstpack as kd2mp
kd = 25 / (2 * np.pi)
a = 6.0
N = kd2mp(Atoms(cell=(a, a, a), pbc=True), kd)[0]
assert N * a / (2 * np.pi) >= kd, 'Too small k-point density'
| suttond/MODOI | ase/test/calculator/kd2mp.py | Python | lgpl-3.0 | 256 | [
"ASE"
] | 07ba7da3a780466431f185050110d715a00cfaf7290c865b0a2dfa2a2e3c8f64 |
###############################################################################
#
# Copyright (c) 2011 Ruslan Spivak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
__author__ = 'Ruslan Spivak <ruslan.spivak@gmail.com>'
from slimit import ast
class ECMAVisitor(object):
def __init__(self):
self.indent_level = 0
def _make_indent(self):
return ' ' * self.indent_level
def visit(self, node):
method = 'visit_%s' % node.__class__.__name__
return getattr(self, method, self.generic_visit)(node)
def generic_visit(self, node):
return 'GEN: %r' % node
def visit_Program(self, node):
return '\n'.join(self.visit(child) for child in node)
def visit_Block(self, node):
s = '{\n'
self.indent_level += 2
s += '\n'.join(
self._make_indent() + self.visit(child) for child in node)
self.indent_level -= 2
s += '\n' + self._make_indent() + '}'
return s
def visit_VarStatement(self, node):
s = 'var %s;' % ', '.join(self.visit(child) for child in node)
return s
def visit_VarDecl(self, node):
output = []
output.append(self.visit(node.identifier))
if node.initializer is not None:
output.append(' = %s' % self.visit(node.initializer))
return ''.join(output)
def visit_Identifier(self, node):
return node.value
def visit_Assign(self, node):
if node.op == ':':
template = '%s%s %s'
else:
template = '%s %s %s'
if getattr(node, '_parens', False):
template = '(%s)' % template
return template % (
self.visit(node.left), node.op, self.visit(node.right))
def visit_GetPropAssign(self, node):
template = 'get %s() {\n%s\n%s}'
if getattr(node, '_parens', False):
template = '(%s)' % template
self.indent_level += 2
body = '\n'.join(
(self._make_indent() + self.visit(el))
for el in node.elements
)
self.indent_level -= 2
tail = self._make_indent()
return template % (self.visit(node.prop_name), body, tail)
def visit_SetPropAssign(self, node):
template = 'set %s(%s) {\n%s\n%s}'
if getattr(node, '_parens', False):
template = '(%s)' % template
if len(node.parameters) > 1:
raise SyntaxError(
'Setter functions must have one argument: %s' % node)
params = ','.join(self.visit(param) for param in node.parameters)
self.indent_level += 2
body = '\n'.join(
(self._make_indent() + self.visit(el))
for el in node.elements
)
self.indent_level -= 2
tail = self._make_indent()
return template % (self.visit(node.prop_name), params, body, tail)
def visit_Number(self, node):
return node.value
def visit_Comma(self, node):
s = '%s, %s' % (self.visit(node.left), self.visit(node.right))
if getattr(node, '_parens', False):
s = '(' + s + ')'
return s
def visit_EmptyStatement(self, node):
return node.value
def visit_If(self, node):
s = 'if ('
if node.predicate is not None:
s += self.visit(node.predicate)
s += ') '
s += self.visit(node.consequent)
if node.alternative is not None:
s += ' else '
s += self.visit(node.alternative)
return s
def visit_Boolean(self, node):
return node.value
def visit_For(self, node):
s = 'for ('
if node.init is not None:
s += self.visit(node.init)
if node.init is None:
s += ' ; '
elif isinstance(node.init, (ast.Assign, ast.Comma, ast.FunctionCall,
ast.UnaryOp, ast.Identifier, ast.BinOp,
ast.Conditional, ast.Regex, ast.NewExpr)):
s += '; '
else:
s += ' '
if node.cond is not None:
s += self.visit(node.cond)
s += '; '
if node.count is not None:
s += self.visit(node.count)
s += ') ' + self.visit(node.statement)
return s
def visit_ForIn(self, node):
if isinstance(node.item, ast.VarDecl):
template = 'for (var %s in %s) '
else:
template = 'for (%s in %s) '
s = template % (self.visit(node.item), self.visit(node.iterable))
s += self.visit(node.statement)
return s
def visit_BinOp(self, node):
if getattr(node, '_parens', False):
template = '(%s %s %s)'
else:
template = '%s %s %s'
return template % (
self.visit(node.left), node.op, self.visit(node.right))
def visit_UnaryOp(self, node):
s = self.visit(node.value)
if node.postfix:
s += node.op
elif node.op in ('delete', 'void', 'typeof'):
s = '%s %s' % (node.op, s)
else:
s = '%s%s' % (node.op, s)
if getattr(node, '_parens', False):
s = '(%s)' % s
return s
def visit_ExprStatement(self, node):
return '%s;' % self.visit(node.expr)
def visit_DoWhile(self, node):
s = 'do '
s += self.visit(node.statement)
s += ' while (%s);' % self.visit(node.predicate)
return s
def visit_While(self, node):
s = 'while (%s) ' % self.visit(node.predicate)
s += self.visit(node.statement)
return s
def visit_Null(self, node):
return 'null'
def visit_String(self, node):
return node.value
def visit_Continue(self, node):
if node.identifier is not None:
s = 'continue %s;' % self.visit_Identifier(node.identifier)
else:
s = 'continue;'
return s
def visit_Break(self, node):
if node.identifier is not None:
s = 'break %s;' % self.visit_Identifier(node.identifier)
else:
s = 'break;'
return s
def visit_Return(self, node):
if node.expr is None:
return 'return;'
else:
return 'return %s;' % self.visit(node.expr)
def visit_With(self, node):
s = 'with (%s) ' % self.visit(node.expr)
s += self.visit(node.statement)
return s
def visit_Label(self, node):
s = '%s: %s' % (
self.visit(node.identifier), self.visit(node.statement))
return s
def visit_Switch(self, node):
s = 'switch (%s) {\n' % self.visit(node.expr)
self.indent_level += 2
for case in node.cases:
s += self._make_indent() + self.visit_Case(case)
if node.default is not None:
s += self.visit_Default(node.default)
self.indent_level -= 2
s += self._make_indent() + '}'
return s
def visit_Case(self, node):
s = 'case %s:\n' % self.visit(node.expr)
self.indent_level += 2
elements = '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
if elements:
s += elements + '\n'
self.indent_level -= 2
return s
def visit_Default(self, node):
s = self._make_indent() + 'default:\n'
self.indent_level += 2
s += '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
if node.elements is not None:
s += '\n'
self.indent_level -= 2
return s
def visit_Throw(self, node):
s = 'throw %s;' % self.visit(node.expr)
return s
def visit_Debugger(self, node):
return '%s;' % node.value
def visit_Try(self, node):
s = 'try '
s += self.visit(node.statements)
if node.catch is not None:
s += ' ' + self.visit(node.catch)
if node.fin is not None:
s += ' ' + self.visit(node.fin)
return s
def visit_Catch(self, node):
s = 'catch (%s) %s' % (
self.visit(node.identifier), self.visit(node.elements))
return s
def visit_Finally(self, node):
s = 'finally %s' % self.visit(node.elements)
return s
def visit_FuncDecl(self, node):
self.indent_level += 2
elements = '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
self.indent_level -= 2
s = 'function %s(%s) {\n%s' % (
self.visit(node.identifier),
', '.join(self.visit(param) for param in node.parameters),
elements,
)
s += '\n' + self._make_indent() + '}'
return s
def visit_FuncExpr(self, node):
self.indent_level += 2
elements = '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
self.indent_level -= 2
ident = node.identifier
ident = '' if ident is None else ' %s' % self.visit(ident)
header = 'function%s(%s)'
if getattr(node, '_parens', False):
header = '(' + header
s = (header + ' {\n%s') % (
ident,
', '.join(self.visit(param) for param in node.parameters),
elements,
)
s += '\n' + self._make_indent() + '}'
if getattr(node, '_parens', False):
s += ')'
return s
def visit_Conditional(self, node):
if getattr(node, '_parens', False):
template = '(%s ? %s : %s)'
else:
template = '%s ? %s : %s'
s = template % (
self.visit(node.predicate),
self.visit(node.consequent), self.visit(node.alternative))
return s
def visit_Regex(self, node):
if getattr(node, '_parens', False):
return '(%s)' % node.value
else:
return node.value
def visit_NewExpr(self, node):
s = 'new %s(%s)' % (
self.visit(node.identifier),
', '.join(self.visit(arg) for arg in node.args)
)
return s
def visit_DotAccessor(self, node):
if getattr(node, '_parens', False):
template = '(%s.%s)'
else:
template = '%s.%s'
s = template % (self.visit(node.node), self.visit(node.identifier))
return s
def visit_BracketAccessor(self, node):
s = '%s[%s]' % (self.visit(node.node), self.visit(node.expr))
return s
def visit_FunctionCall(self, node):
s = '%s(%s)' % (self.visit(node.identifier),
', '.join(self.visit(arg) for arg in node.args))
if getattr(node, '_parens', False):
s = '(' + s + ')'
return s
def visit_Object(self, node):
s = '{\n'
self.indent_level += 2
s += ',\n'.join(self._make_indent() + self.visit(prop)
for prop in node.properties)
self.indent_level -= 2
if node.properties:
s += '\n'
s += self._make_indent() + '}'
return s
def visit_Array(self, node):
s = '['
length = len(node.items) - 1
for index, item in enumerate(node.items):
if isinstance(item, ast.Elision):
s += ','
elif index != length:
s += self.visit(item) + ','
else:
s += self.visit(item)
s += ']'
return s
def visit_This(self, node):
return 'this'
| slideclick/slimit | src/slimit/visitors/ecmavisitor.py | Python | mit | 12,757 | [
"VisIt"
] | aa8973e27c335f46a42c768d84ec43ea2c2dd837843de7a85c232c7d9eff63b3 |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import JTensor
from bigdl.nn.layer import Layer
import numpy as np
if sys.version >= '3':
long = int
unicode = str
class Criterion(JavaValue):
"""
Criterion is helpful to train a neural network.
Given an input and a target, they compute a gradient according to a given loss function.
"""
def __init__(self, jvalue, bigdl_type, *args):
self.value = jvalue if jvalue else callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), *args)
self.bigdl_type = bigdl_type
def __str__(self):
return self.value.toString()
def forward(self, input, target):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Takes an input object, and computes the corresponding loss of the criterion,
compared with `target`
:param input: ndarray or list of ndarray
:param target: ndarray or list of ndarray
:return: value of loss
"""
jinput, input_is_table = Layer.check_input(input)
jtarget, target_is_table = Layer.check_input(target)
output = callBigDlFunc(self.bigdl_type,
"criterionForward",
self.value,
jinput,
input_is_table,
jtarget,
target_is_table)
return output
def backward(self, input, target):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Performs a back-propagation step through the criterion, with respect to the given input.
:param input: ndarray or list of ndarray
:param target: ndarray or list of ndarray
:return: ndarray
"""
jinput, input_is_table = Layer.check_input(input)
jtarget, target_is_table = Layer.check_input(target)
output = callBigDlFunc(self.bigdl_type,
"criterionBackward",
self.value,
jinput,
input_is_table,
jtarget,
target_is_table)
return Layer.convert_output(output)
@classmethod
def of(cls, jcriterion, bigdl_type="float"):
"""
Create a python Criterion by a java criterion object
:param jcriterion: A java criterion object which created by Py4j
:return: a criterion.
"""
criterion = Criterion(bigdl_type, jcriterion)
criterion.value = jcriterion
criterion.bigdl_type = bigdl_type
return criterion
class ClassNLLCriterion(Criterion):
'''
The negative log likelihood criterion. It is useful to train a classification problem with n
classes. If provided, the optional argument weights should be a 1D Tensor assigning weight to
each of the classes. This is particularly useful when you have an unbalanced training set.
The input given through a forward() is expected to contain log-probabilities/probabilities of
each class: input has to be a 1D Tensor of size n. Obtaining log-probabilities/probabilities
in a neural network is easily achieved by adding a LogSoftMax/SoftMax layer in the last layer
of your neural network. You may use CrossEntropyCriterion instead, if you prefer not to add an
extra layer to your network. This criterion expects a class index (1 to the number of class) as
target when calling forward(input, target) and backward(input, target).
In the log-probabilities case,
The loss can be described as:
loss(x, class) = -x[class]
or in the case of the weights argument it is specified as follows:
loss(x, class) = -weights[class] * x[class]
Due to the behaviour of the backend code, it is necessary to set sizeAverage to false when
calculating losses in non-batch mode.
Note that if the target is `-1`, the training process will skip this sample.
In other will, the forward process will return zero output and the backward process
will also return zero `gradInput`.
By default, the losses are averaged over observations for each minibatch. However, if the field
sizeAverage is set to false, the losses are instead summed for each minibatch.
In particular, when weights=None, size_average=True and logProbAsInput=False, this is same as
`sparse_categorical_crossentropy` loss in keras.
:param weights: weights of each class
:param size_average: whether to average or not
:param logProbAsInput: indicating whether to accept log-probabilities or probabilities as input.
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> classNLLCriterion = ClassNLLCriterion(weights, True, True)
creating: createClassNLLCriterion
>>> classNLLCriterion = ClassNLLCriterion()
creating: createClassNLLCriterion
'''
def __init__(self,
weights=None,
size_average=True,
logProbAsInput=True,
bigdl_type="float"):
super(ClassNLLCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average, logProbAsInput)
class MSECriterion(Criterion):
'''
Creates a criterion that measures the mean squared error between n elements
in the input x and output y:
```
loss(x, y) = 1/n \sum |x_i - y_i|^2
```
If x and y are d-dimensional Tensors with a total of n elements,
the sum operation still operates over all the elements, and divides by n.
The two Tensors must have the same number of elements (but their sizes might be different).
The division by n can be avoided if one sets the internal variable sizeAverage to false.
By default, the losses are averaged over observations for each minibatch. However,
if the field sizeAverage is set to false, the losses are instead summed.
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
'''
def __init__(self, bigdl_type="float"):
super(MSECriterion, self).__init__(None, bigdl_type)
class AbsCriterion(Criterion):
'''
measures the mean absolute value of the element-wise difference between input
>>> absCriterion = AbsCriterion(True)
creating: createAbsCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(AbsCriterion, self).__init__(None, bigdl_type,
size_average)
class ClassSimplexCriterion(Criterion):
'''
ClassSimplexCriterion implements a criterion for classification.
It learns an embedding per class, where each class' embedding is a
point on an (N-1)-dimensional simplex, where N is the number of classes.
:param nClasses: the number of classes.
>>> classSimplexCriterion = ClassSimplexCriterion(2)
creating: createClassSimplexCriterion
'''
def __init__(self,
n_classes,
bigdl_type="float"):
super(ClassSimplexCriterion, self).__init__(None, bigdl_type,
n_classes)
class CosineDistanceCriterion(Criterion):
"""
Creates a criterion that measures the loss given an input and target,
Loss = 1 - cos(x, y)
>>> cosineDistanceCriterion = CosineDistanceCriterion(True)
creating: createCosineDistanceCriterion
>>> cosineDistanceCriterion.forward(np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
... np.array([5.0, 4.0, 3.0, 2.0, 1.0]))
0.07272728
"""
def __init__(self,
size_average=True,
bigdl_type="float"):
super(CosineDistanceCriterion, self).__init__(None, bigdl_type,
size_average)
class CosineEmbeddingCriterion(Criterion):
"""
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors, and a Tensor label y with values 1 or -1.
:param margin: a number from -1 to 1, 0 to 0.5 is suggested
>>> cosineEmbeddingCriterion = CosineEmbeddingCriterion(1e-5, True)
creating: createCosineEmbeddingCriterion
>>> cosineEmbeddingCriterion.forward([np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
... np.array([5.0, 4.0, 3.0, 2.0, 1.0])],
... [np.ones(5)])
0.0
"""
def __init__(self,
margin=0.0,
size_average=True,
bigdl_type="float"):
super(CosineEmbeddingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class DistKLDivCriterion(Criterion):
'''
The Kullback-Leibler divergence criterion
:param sizeAverage:
>>> distKLDivCriterion = DistKLDivCriterion(True)
creating: createDistKLDivCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(DistKLDivCriterion, self).__init__(None, bigdl_type,
size_average)
class HingeEmbeddingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an
input x which is a 1-dimensional vector and a label y (1 or -1).
This is usually used for measuring whether two inputs are similar
or dissimilar,
e.g. using the L1 pairwise distance, and is typically used for
learning nonlinear embeddings or semi-supervised learning.
If x and y are n-dimensional Tensors, the sum operation still operates
over all the elements, and divides by n (this can be avoided if one sets
the internal variable sizeAverage to false). The margin has a default
value of 1, or can be set in the constructor.
>>> hingeEmbeddingCriterion = HingeEmbeddingCriterion(1e-5, True)
creating: createHingeEmbeddingCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(HingeEmbeddingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class L1HingeEmbeddingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors, and a label y (1 or -1):
:param margin:
>>> l1HingeEmbeddingCriterion = L1HingeEmbeddingCriterion(1e-5)
creating: createL1HingeEmbeddingCriterion
>>> l1HingeEmbeddingCriterion = L1HingeEmbeddingCriterion()
creating: createL1HingeEmbeddingCriterion
>>> input1 = np.array([2.1, -2.2])
>>> input2 = np.array([-0.55, 0.298])
>>> input = [input1, input2]
>>> target = np.array([1.0])
>>> result = l1HingeEmbeddingCriterion.forward(input, target)
>>> (result == 5.148)
True
'''
def __init__(self,
margin=1.0,
bigdl_type="float"):
super(L1HingeEmbeddingCriterion, self).__init__(None, bigdl_type,
margin)
class MarginCriterion(Criterion):
'''
Creates a criterion that optimizes a two-class classification hinge loss (margin-based loss)
between input x (a Tensor of dimension 1) and output y.
When margin = 1, size_average = True and squared = False, this is the same as hinge loss in keras;
When margin = 1, size_average = False and squared = True, this is the same as squared_hinge loss in keras.
:param margin: if unspecified, is by default 1.
:param size_average: size average in a mini-batch
:param squared: whether to calculate the squared hinge loss
>>> marginCriterion = MarginCriterion(1e-5, True, False)
creating: createMarginCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
squared=False,
bigdl_type="float"):
super(MarginCriterion, self).__init__(None, bigdl_type,
margin,
size_average,
squared)
class MarginRankingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors of size 1 (they contain only scalars), and a label y (1 or -1).
In batch mode, x is a table of two Tensors of size batchsize, and y is a Tensor of size
batchsize containing 1 or -1 for each corresponding pair of elements in the input Tensor.
If y == 1 then it assumed the first input should be ranked higher (have a larger value) than
the second input, and vice-versa for y == -1.
:param margin:
>>> marginRankingCriterion = MarginRankingCriterion(1e-5, True)
creating: createMarginRankingCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(MarginRankingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class MultiCriterion(Criterion):
'''
a weighted sum of other criterions each applied to the same input and target
>>> multiCriterion = MultiCriterion()
creating: createMultiCriterion
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
>>> multiCriterion = multiCriterion.add(mSECriterion)
>>> multiCriterion = multiCriterion.add(mSECriterion)
'''
def __init__(self,
bigdl_type="float"):
super(MultiCriterion, self).__init__(None, bigdl_type)
def add(self, criterion, weight=1.0):
self.value.add(criterion.value, weight)
return self
class MultiLabelMarginCriterion(Criterion):
'''
Creates a criterion that optimizes a multi-class multi-classification hinge loss (
margin-based loss) between input x and output y (which is a Tensor of target class indices)
:param size_average: size average in a mini-batch
>>> multiLabelMarginCriterion = MultiLabelMarginCriterion(True)
creating: createMultiLabelMarginCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(MultiLabelMarginCriterion, self).__init__(None, bigdl_type,
size_average)
class ParallelCriterion(Criterion):
'''
ParallelCriterion is a weighted sum of other criterions each applied to a different input
and target. Set repeatTarget = true to share the target for criterions.
Use add(criterion[, weight]) method to add criterion. Where weight is a scalar(default 1).
:param repeat_target: Whether to share the target for all criterions.
>>> parallelCriterion = ParallelCriterion(True)
creating: createParallelCriterion
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
>>> parallelCriterion = parallelCriterion.add(mSECriterion)
>>> parallelCriterion = parallelCriterion.add(mSECriterion)
'''
def __init__(self,
repeat_target=False,
bigdl_type="float"):
super(ParallelCriterion, self).__init__(None, bigdl_type,
repeat_target)
def add(self, criterion, weight=1.0):
self.value.add(criterion.value, weight)
return self
class KLDCriterion(Criterion):
'''
Computes the KL-divergence of the Gaussian distribution.
>>> KLDCriterion = KLDCriterion()
creating: createKLDCriterion
'''
def __init__(self, bigdl_type="float"):
super(KLDCriterion, self).__init__(None, bigdl_type)
class GaussianCriterion(Criterion):
'''
Computes the log-likelihood of a sample x given a Gaussian distribution p.
>>> GaussianCriterion = GaussianCriterion()
creating: createGaussianCriterion
'''
def __init__(self, bigdl_type="float"):
super(GaussianCriterion, self).__init__(None, bigdl_type)
class SmoothL1Criterion(Criterion):
'''
Creates a criterion that can be thought of as a smooth version of the AbsCriterion.
It uses a squared term if the absolute element-wise error falls below 1.
It is less sensitive to outliers than the MSECriterion and in some
cases prevents exploding gradients (e.g. see "Fast R-CNN" paper by Ross Girshick).
```
| 0.5 * (x_i - y_i)^2^, if |x_i - y_i| < 1
loss(x, y) = 1/n \sum |
| |x_i - y_i| - 0.5, otherwise
```
If x and y are d-dimensional Tensors with a total of n elements,
the sum operation still operates over all the elements, and divides by n.
The division by n can be avoided if one sets the internal variable sizeAverage to false
:param size_average: whether to average the loss
>>> smoothL1Criterion = SmoothL1Criterion(True)
creating: createSmoothL1Criterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(SmoothL1Criterion, self).__init__(None, bigdl_type,
size_average)
class SmoothL1CriterionWithWeights(Criterion):
'''
a smooth version of the AbsCriterion
It uses a squared term if the absolute element-wise error falls below 1.
It is less sensitive to outliers than the MSECriterion and in some cases
prevents exploding gradients (e.g. see "Fast R-CNN" paper by Ross Girshick).
```
d = (x - y) * w_in
loss(x, y, w_in, w_out)
| 0.5 * (sigma * d_i)^2 * w_out if |d_i| < 1 / sigma / sigma
= 1/n \sum |
| (|d_i| - 0.5 / sigma / sigma) * w_out otherwise
```
>>> smoothL1CriterionWithWeights = SmoothL1CriterionWithWeights(1e-5, 1)
creating: createSmoothL1CriterionWithWeights
'''
def __init__(self,
sigma,
num=0,
bigdl_type="float"):
super(SmoothL1CriterionWithWeights, self).__init__(None, bigdl_type,
sigma,
num)
class SoftmaxWithCriterion(Criterion):
'''
Computes the multinomial logistic loss for a one-of-many classification task,
passing real-valued predictions through a softmax to get a probability distribution over classes.
It should be preferred over separate SoftmaxLayer + MultinomialLogisticLossLayer
as its gradient computation is more numerically stable.
:param ignoreLabel: (optional) Specify a label value thatshould be ignored when computing the loss.
:param normalizeMode: How to normalize the output loss.
>>> softmaxWithCriterion = SoftmaxWithCriterion()
creating: createSoftmaxWithCriterion
>>> softmaxWithCriterion = SoftmaxWithCriterion(1, "FULL")
creating: createSoftmaxWithCriterion
'''
def __init__(self,
ignore_label=None,
normalize_mode="VALID",
bigdl_type="float"):
super(SoftmaxWithCriterion, self).__init__(None, bigdl_type,
ignore_label,
normalize_mode)
class TimeDistributedCriterion(Criterion):
'''
This class is intended to support inputs with 3 or more dimensions.
Apply Any Provided Criterion to every temporal slice of an input.
:param criterion: embedded criterion
:param size_average: whether to divide the sequence length
>>> td = TimeDistributedCriterion(ClassNLLCriterion())
creating: createClassNLLCriterion
creating: createTimeDistributedCriterion
'''
def __init__(self, criterion, size_average=False, bigdl_type="float"):
super(TimeDistributedCriterion, self).__init__(
None, bigdl_type, criterion, size_average)
class CrossEntropyCriterion(Criterion):
"""
This criterion combines LogSoftMax and ClassNLLCriterion in one single class.
:param weights: A tensor assigning weight to each of the classes
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> cec = CrossEntropyCriterion(weights)
creating: createCrossEntropyCriterion
>>> cec = CrossEntropyCriterion()
creating: createCrossEntropyCriterion
"""
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(CrossEntropyCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(
weights),
size_average)
class BCECriterion(Criterion):
'''
Creates a criterion that measures the Binary Cross Entropy
between the target and the output
:param weights: weights for each class
:param sizeAverage: whether to average the loss or not
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> bCECriterion = BCECriterion(weights)
creating: createBCECriterion
>>> bCECriterion = BCECriterion()
creating: createBCECriterion
'''
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(BCECriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average)
class MultiLabelSoftMarginCriterion(Criterion):
'''
A MultiLabel multiclass criterion based on sigmoid:
the loss is:
```
l(x,y) = - sum_i y[i] * log(p[i]) + (1 - y[i]) * log (1 - p[i])
```
where p[i] = exp(x[i]) / (1 + exp(x[i]))
and with weights:
```
l(x,y) = - sum_i weights[i] (y[i] * log(p[i]) + (1 - y[i]) * log (1 - p[i]))
```
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> multiLabelSoftMarginCriterion = MultiLabelSoftMarginCriterion(weights)
creating: createMultiLabelSoftMarginCriterion
>>> multiLabelSoftMarginCriterion = MultiLabelSoftMarginCriterion()
creating: createMultiLabelSoftMarginCriterion
'''
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(MultiLabelSoftMarginCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average)
class MultiMarginCriterion(Criterion):
'''
Creates a criterion that optimizes a multi-class classification hinge loss (margin-based loss)
between input x and output y (which is a target class index).
:param p:
:param weights:
:param margin:
:param size_average:
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> multiMarginCriterion = MultiMarginCriterion(1,weights)
creating: createMultiMarginCriterion
>>> multiMarginCriterion = MultiMarginCriterion()
creating: createMultiMarginCriterion
'''
def __init__(self,
p=1,
weights=None,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(MultiMarginCriterion, self).__init__(None, bigdl_type,
p,
JTensor.from_ndarray(weights),
margin,
size_average)
class SoftMarginCriterion(Criterion):
"""
Creates a criterion that optimizes a two-class classification logistic loss
between input x (a Tensor of dimension 1) and output y (which is a tensor
containing either 1s or -1s).
```
loss(x, y) = sum_i (log(1 + exp(-y[i]*x[i]))) / x:nElement()
```
:param sizeaverage: The normalization by the number of elements in the inputcan be disabled by setting
>>> softMarginCriterion = SoftMarginCriterion(False)
creating: createSoftMarginCriterion
>>> softMarginCriterion = SoftMarginCriterion()
creating: createSoftMarginCriterion
"""
def __init__(self,
size_average=True,
bigdl_type="float"):
super(SoftMarginCriterion, self).__init__(None, bigdl_type, size_average)
class DiceCoefficientCriterion(Criterion):
'''
The Dice-Coefficient criterion
input: Tensor,target: Tensor
```
return: 2 * (input intersection target)
1 - ----------------------------------
input union target
```
>>> diceCoefficientCriterion = DiceCoefficientCriterion(size_average = True, epsilon = 1.0)
creating: createDiceCoefficientCriterion
>>> diceCoefficientCriterion = DiceCoefficientCriterion()
creating: createDiceCoefficientCriterion
'''
def __init__(self,
size_average=True,
epsilon=1.0,
bigdl_type="float"):
super(DiceCoefficientCriterion, self).__init__(None, bigdl_type,
size_average,
epsilon)
class L1Cost(Criterion):
'''
compute L1 norm for input, and sign of input
>>> l1Cost = L1Cost()
creating: createL1Cost
'''
def __init__(self,
bigdl_type="float"):
super(L1Cost, self).__init__(None, bigdl_type)
class CosineProximityCriterion(Criterion):
'''
compute the negative of the mean cosine proximity between predictions and targets.
```
x'(i) = x(i) / sqrt(max(sum(x(i)^2), 1e-12))
y'(i) = y(i) / sqrt(max(sum(x(i)^2), 1e-12))
cosine_proximity(x, y) = sum_i(-1 * x'(i) * y'(i))
```
>>> cosineProximityCriterion = CosineProximityCriterion()
creating: createCosineProximityCriterion
'''
def __init__(self,
bigdl_type="float"):
super(CosineProximityCriterion, self).__init__(None, bigdl_type)
def _test():
import doctest
from pyspark import SparkContext
from bigdl.nn import criterion
from bigdl.util.common import init_engine
from bigdl.util.common import create_spark_conf
globs = criterion.__dict__.copy()
sc = SparkContext(master="local[4]", appName="test criterion",
conf=create_spark_conf())
globs['sc'] = sc
init_engine()
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| jenniew/BigDL | pyspark/bigdl/nn/criterion.py | Python | apache-2.0 | 28,129 | [
"Gaussian"
] | bab341a3e4b0f70fc4374a0e5f4ac7bc01ef136b5449cd563e7d9de5bb6b980b |
import os
import sys
import os.path as op
import pysam
from bcbio.utils import file_exists, safe_makedir, chdir, get_perl_exports
from bcbio.provenance import do
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
def run(data):
config = data[0][0]['config']
work_dir = dd.get_work_dir(data[0][0])
genome = dd.get_ref_file(data[0][0])
mirdeep2 = os.path.join(os.path.dirname(sys.executable), "miRDeep2.pl")
perl_exports = get_perl_exports()
hairpin, mature, species = "none", "none", "na"
rfam_file = dd.get_mirdeep2_file(data[0][0])
if file_exists(dd.get_mirbase_hairpin(data[0][0])):
species = dd.get_species(data[0][0])
hairpin = dd.get_mirbase_hairpin(data[0][0])
mature = dd.get_mirbase_mature(data[0][0])
bam_file = op.join(work_dir, "align", "seqs.bam")
seqs_dir = op.join(work_dir, "seqcluster", "prepare")
collapsed = op.join(seqs_dir, "seqs.ma")
out_dir = op.join(work_dir, "mirdeep2")
out_file = op.join(out_dir, "result_res.csv")
safe_makedir(out_dir)
with chdir(out_dir):
collapsed, bam_file = _prepare_inputs(collapsed, bam_file, out_dir)
cmd = ("{perl_exports} && perl {mirdeep2} {collapsed} {genome} {bam_file} {mature} none {hairpin} -f {rfam_file} -r simple -c -d -P -t {species} -z res").format(**locals())
if file_exists(mirdeep2) and not file_exists(out_file) and file_exists(rfam_file):
do.run(cmd.format(**locals()), "Running mirdeep2.")
if file_exists(out_file):
novel_db = _parse_novel(out_file, dd.get_species(data[0][0]))
return novel_db
def _prepare_inputs(ma_fn, bam_file, out_dir):
"""
Convert to fastq with counts
"""
fixed_fa = os.path.join(out_dir, "file_reads.fa")
count_name =dict()
with file_transaction(fixed_fa) as out_tx:
with open(out_tx, 'w') as out_handle:
with open(ma_fn) as in_handle:
h = in_handle.next()
for line in in_handle:
cols = line.split("\t")
name_with_counts = "%s_x%s" % (cols[0], sum(map(int, cols[2:])))
count_name[cols[0]] = name_with_counts
print >>out_handle, ">%s\n%s" % (name_with_counts, cols[1])
fixed_bam = os.path.join(out_dir, "align.bam")
bam_handle = pysam.AlignmentFile(bam_file, "rb")
with pysam.AlignmentFile(fixed_bam, "wb", template=bam_handle) as out_handle:
for read in bam_handle.fetch():
read.query_name = count_name[read.query_name]
out_handle.write(read)
return fixed_fa, fixed_bam
def _parse_novel(csv_file, sps="new"):
"""Create input of novel miRNAs from miRDeep2"""
read = 0
seen = set()
safe_makedir("novel")
with open("novel/hairpin.fa", "w") as fa_handle, open("novel/miRNA.str", "w") as str_handle:
with open(csv_file) as in_handle:
for line in in_handle:
if line.startswith("mature miRBase miRNAs detected by miRDeep2"):
break
if line.startswith("novel miRNAs predicted"):
read = 1
line = in_handle.next()
continue
if read and line.strip():
cols = line.strip().split("\t")
name, start, score = cols[0], cols[16], cols[1]
if score < 1:
continue
m5p, m3p, pre = cols[13], cols[14], cols[15].replace('u','t').upper()
m5p_start = cols[15].find(m5p) + 1
m3p_start = cols[15].find(m3p) + 1
m5p_end = m5p_start + len(m5p) - 1
m3p_end = m3p_start + len(m3p) - 1
if m5p in seen:
continue
print >>fa_handle, (">{sps}-{name} {start}\n{pre}").format(**locals())
print >>str_handle, (">{sps}-{name} ({score}) [{sps}-{name}-5p:{m5p_start}-{m5p_end}] [{sps}-{name}-3p:{m3p_start}-{m3p_end}]").format(**locals())
seen.add(m5p)
return op.abspath("novel")
| Cyberbio-Lab/bcbio-nextgen | bcbio/srna/mirdeep.py | Python | mit | 4,242 | [
"pysam"
] | 14cc4e468a1ec4163eedd4a8f91bb981843633427fdbfb512870b8e01a7b3d63 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Plotting function helper script
'''
from matplotlib.colors import LogNorm # , SymLogNorm
from mpi4py import MPI
import analysis_params
import plotting_helpers as phlp
import neuron
import LFPy
from hybridLFPy import helpers
import h5py
import glob
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.collections import PolyCollection
from matplotlib.path import Path
from matplotlib import patches
import matplotlib.pyplot as plt
from builtins import open, zip
import os
import numpy as np
if 'DISPLAY' not in os.environ:
import matplotlib
matplotlib.use('Agg')
import matplotlib.style
matplotlib.style.use('classic')
###################################
# Initialization of MPI stuff #
###################################
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
######################################
### OUTSIDE SCOPE DEFINITIONS ###
######################################
######################################
### FUNCTIONS FOR FILLING AXES ###
######################################
def network_sketch(ax, highlight=None, labels=True, yscaling=1.):
'''
highlight : None or string
if string, then only the label of this population is set and the box is highlighted
'''
name_to_id_mapping = {'L6E': (0, 0),
'L6I': (0, 1),
'L5E': (1, 0),
'L5I': (1, 1),
'L4E': (2, 0),
'L4I': (2, 1),
'L23E': (3, 0),
'L23I': (3, 1)
}
showgrid = False # switch on/off grid
# sketch parameters
layer_x = 0.1 # x position of left boundary of cortex layers
layer6_y = 0.2 * yscaling # y position of lower boundary of layer 6
layer_width = 0.65 # width of cortex layers
layer_height = 0.21 * yscaling # height of cortex layers
layer_colors = ['0.9', '0.8', '0.9', '0.8'] # layer colors
c_pop_size = 0.15 # cortex population size
c_pop_dist = 0.17 # distance between cortex populations
t_pop_size = 0.15 # thalamus population size
t_pop_y = 0.0 # y position of lower thalamus boundary
axon_cell_sep = 0.04 # distance between axons and popualations
# y position of cortico-cortical synapses (relative to cortex population)
cc_input_y = 0.6 * yscaling
# y position of thalamo-cortical synapses (relative to cortex population)
tc_input_y = 0.4 * yscaling
# color of excitatory axons/synapses
exc_clr = 'k' if analysis_params.bw else analysis_params.colorE
# color of inhibitory axons/synapses
inh_clr = 'gray' if analysis_params.bw else analysis_params.colorI
lw_pop = 0.5 # linewidth for populations
lw_axons = 0.4 # linewidth for axons
arrow_size = 0.013 # arrow size
conn_radius = 0.005 # radius of connector marker
legend_length = 0.07 # length of legend arrows
colors = phlp.get_colors(8)[::-1] # colors of each population
fontdict1 = {'fontsize': 6, # population name
'weight': 'normal',
'horizontalalignment': 'center',
'verticalalignment': 'center'}
fontdict2 = {'fontsize': 6, # cortico-cortical input
'weight': 'normal',
'horizontalalignment': 'center',
'verticalalignment': 'center'}
fontdict3 = {'fontsize': 6, # legend
'weight': 'normal',
'horizontalalignment': 'left',
'verticalalignment': 'center'}
##########################################################################
def draw_box(
ax,
pos,
lw=1.,
ls='solid',
eclr='k',
fclr='w',
zorder=0,
clip_on=False,
boxstyle=patches.BoxStyle(
"Round",
pad=0.0),
padadjust=0.):
'''Draws a rectangle.'''
rect = patches.FancyBboxPatch(
(pos[0] + padadjust,
pos[1] + padadjust),
pos[2] - 2 * padadjust,
pos[3] - 2 * padadjust,
ec=eclr,
fc=fclr,
lw=lw,
ls=ls,
zorder=zorder,
clip_on=clip_on,
boxstyle=boxstyle)
ax.add_patch(rect)
def draw_circle(
ax,
xy,
radius,
lw=1.,
ls='solid',
eclr='k',
fclr='w',
zorder=0):
'''Draws a circle.'''
circ = plt.Circle((xy[0], xy[1]), radius=radius,
ec=eclr, fc=fclr, lw=lw, ls=ls, zorder=zorder)
ax.add_patch(circ)
def put_text(ax, xy, txt, clr, fontdict, zorder=10):
'''Puts text to a specific position.'''
ax.text(xy[0], xy[1], txt, fontdict=fontdict, color=clr, zorder=zorder)
def draw_line(ax, path, lw=1., ls='solid', lclr='k', zorder=0):
'''Draws a path.'''
#pth = path.Path(np.array(path))
pth = Path(np.array(path))
patch = patches.PathPatch(
pth,
fill=False,
lw=lw,
ls=ls,
ec=lclr,
fc=lclr,
zorder=zorder)
ax.add_patch(patch)
def draw_arrow(
ax,
path,
lw=1.0,
ls='solid',
lclr='k',
arrow_size=0.025,
zorder=0):
'''Draws a path with an arrow at the end. '''
x = path[-2][0]
y = path[-2][1]
dx = path[-1][0] - path[-2][0]
dy = path[-1][1] - path[-2][1]
D = np.array([dx, dy])
D = D / np.sqrt(D[0]**2 + D[1]**2)
path2 = np.array(path).copy()
path2[-1, :] = path2[-1, :] - arrow_size * D
pth = Path(np.array(path2))
patch = patches.PathPatch(
pth,
fill=False,
lw=lw,
ls=ls,
ec=lclr,
fc=lclr,
zorder=zorder)
ax.add_patch(patch)
arr = patches.FancyArrow(
x, y, dx, dy,
length_includes_head=True, width=0.0, head_width=arrow_size,
overhang=0.2, ec=lclr, fc=lclr, linewidth=0)
ax.add_patch(arr)
##################################################
# populations
# cortex
layer_pos = []
c_pop_pos = []
for i in range(4):
# cortex layers
layer_pos += [[layer_x, layer6_y + i * layer_height *
yscaling, layer_width, layer_height]] # layer positions
draw_box(ax, layer_pos[i], lw=0., fclr=layer_colors[i], zorder=0)
# cortex populations
l_margin = (layer_width - 2. * c_pop_size - c_pop_dist) / 2.
b_margin = (layer_height - c_pop_size) / 2.
# positions of cortex populations
c_pop_pos += [[[layer_pos[i][0] + l_margin,
layer_pos[i][1] + b_margin,
c_pop_size, c_pop_size], # E
[layer_pos[i][0] + l_margin + c_pop_size + c_pop_dist,
layer_pos[i][1] + b_margin,
c_pop_size, c_pop_size]]] # I
# draw_box(ax,c_pop_pos[i][0],lw=lw_pop,eclr='k',fclr='w',zorder=2) ## E
# draw_box(ax,c_pop_pos[i][1],lw=lw_pop,eclr='k',fclr='w',zorder=2) ##
# I
draw_box(ax,
c_pop_pos[i][0],
lw=lw_pop,
eclr='k',
fclr=colors[i * 2 + 1],
zorder=2,
boxstyle=patches.BoxStyle("Round",
pad=0.02),
padadjust=0.02) # E
draw_box(ax,
c_pop_pos[i][1],
lw=lw_pop,
eclr='k',
fclr=colors[i * 2],
zorder=2,
boxstyle=patches.BoxStyle("Round",
pad=0.02),
padadjust=0.02) # I
# thalamus
c_center_x = layer_x + layer_width / 2. # x position of cortex center
t_pos = [
c_center_x - t_pop_size / 2.,
t_pop_y * yscaling,
t_pop_size,
t_pop_size] # thalamus position
# draw_box(ax,t_pos,lw=lw_pop,eclr='k',fclr='w',zorder=2) ## Th
draw_box(
ax,
t_pos,
lw=lw_pop,
eclr='k',
fclr='k',
zorder=2,
boxstyle=patches.BoxStyle(
"Round",
pad=0.02),
padadjust=0.02) # Th
##################################################
# intracortical axons
axon_x_dist = (c_pop_dist - 2. * axon_cell_sep) / 7.
assert(axon_x_dist > 0.)
axon_y_dist = c_pop_size / 9. # *yscaling
c_axon_x = []
c_axon_y = []
# x positions of vertical intracortical axons
for i in range(4): # pre layer
exc = c_pop_pos[i][0][0] + c_pop_size + \
axon_cell_sep + i * axon_x_dist # E
inh = exc + 4. * axon_x_dist # I
c_axon_x += [[exc, inh]]
# y positions of horizontal intracortical axons
for i in range(4): # post layer
c_axon_y += [[]]
for j in range(4): # pre layer
exc = c_pop_pos[i][0][1] + (j + 1.) * axon_y_dist # E
inh = c_pop_pos[i][0][1] + c_pop_size - (j + 1.) * axon_y_dist # I
c_axon_y[i] += [[exc, inh]]
# vertical intracortical axons
for i in range(4):
draw_line(ax,
[[c_axon_x[i][0],
c_axon_y[0][i][0]],
[c_axon_x[i][0],
c_axon_y[-1][i][0]]],
lw=lw_axons,
ls='solid',
lclr=exc_clr,
zorder=1)
draw_line(ax,
[[c_axon_x[i][1],
c_axon_y[0][i][1]],
[c_axon_x[i][1],
c_axon_y[-1][i][1]]],
lw=lw_axons,
ls='solid',
lclr=inh_clr,
zorder=0)
# horizontal intracortical axons
for i in range(4): # post layer
for j in range(4): # pre layer
path = [[c_axon_x[j][0], c_axon_y[i][j][0]], [
c_pop_pos[i][0][0] + c_pop_size, c_axon_y[i][j][0]]]
draw_arrow(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
arrow_size=arrow_size,
zorder=1)
path = [[c_axon_x[j][0], c_axon_y[i][j][0]],
[c_pop_pos[i][1][0], c_axon_y[i][j][0]]]
draw_arrow(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
arrow_size=arrow_size,
zorder=1)
path = [[c_axon_x[j][1], c_axon_y[i][j][1]],
[c_pop_pos[i][1][0], c_axon_y[i][j][1]]]
draw_arrow(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=inh_clr,
arrow_size=arrow_size,
zorder=0)
path = [[c_axon_x[j][1], c_axon_y[i][j][1]], [
c_pop_pos[i][0][0] + c_pop_size, c_axon_y[i][j][1]]]
draw_arrow(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=inh_clr,
arrow_size=arrow_size,
zorder=0)
# connector markers
draw_circle(ax, [c_axon_x[j][0], c_axon_y[i][j][0]],
conn_radius, lw=0, fclr=exc_clr, zorder=0)
draw_circle(ax, [c_axon_x[j][1], c_axon_y[i][j][1]],
conn_radius, lw=0, fclr=inh_clr, zorder=0)
# cell outputs
for i in range(4):
path = [[c_pop_pos[i][0][0] +
c_pop_size /
2., c_pop_pos[i][0][1]], [c_pop_pos[i][0][0] +
c_pop_size /
2., c_pop_pos[i][0][1] -
axon_y_dist], [c_axon_x[i][0], c_pop_pos[i][0][1] -
axon_y_dist]]
draw_line(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
zorder=1) # excitatory
draw_circle(ax, path[-1], conn_radius, lw=0,
fclr=exc_clr, zorder=0) # connector
path = [[c_pop_pos[i][1][0] + c_pop_size / 2., c_pop_pos[i][1][1]],
[c_pop_pos[i][1][0] + c_pop_size / 2., c_pop_pos[i][1][1] - axon_y_dist],
[c_axon_x[-1 - i][1], c_pop_pos[i][1][1] - axon_y_dist]]
draw_line(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=inh_clr,
zorder=1) # inhibitory
draw_circle(ax, path[-1], conn_radius, lw=0,
fclr=inh_clr, zorder=0) # connector
# remaining first segments for L6
path = [[c_axon_x[0][0], c_pop_pos[0][0][1] - axon_y_dist],
[c_axon_x[0][0], c_axon_y[0][0][0]]]
draw_line(ax, path, lw=lw_axons, ls='solid', lclr=exc_clr, zorder=0)
path = [[c_axon_x[-1][1], c_pop_pos[0][1][1] - axon_y_dist],
[c_axon_x[-1][1], c_axon_y[0][0][1]]]
draw_line(ax, path, lw=lw_axons, ls='solid', lclr=inh_clr, zorder=0)
##################################################
# cortico-cortical axons
# horizontal branch in L1
path = [[0.,
c_pop_pos[-1][0][1] + c_pop_size + axon_cell_sep],
[c_pop_pos[-1][1][0] + c_pop_size + axon_cell_sep,
c_pop_pos[-1][0][1] + c_pop_size + axon_cell_sep]]
draw_line(ax, path, lw=lw_axons, ls='solid', lclr=exc_clr, zorder=1)
# vertical branches
path = [[c_pop_pos[-1][0][0] - axon_cell_sep,
c_pop_pos[-1][0][1] + c_pop_size + axon_cell_sep],
[c_pop_pos[-1][0][0] - axon_cell_sep,
c_pop_pos[0][0][1] + cc_input_y * c_pop_size]]
draw_line(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
zorder=1) # cc input to exc pop
draw_circle(
ax,
path[0],
conn_radius,
lw=0,
fclr=exc_clr,
zorder=0) # connector
path = [[c_pop_pos[-1][1][0] + c_pop_size + axon_cell_sep,
c_pop_pos[-1][0][1] + c_pop_size + axon_cell_sep],
[c_pop_pos[-1][1][0] + c_pop_size + axon_cell_sep,
c_pop_pos[0][0][1] + cc_input_y * c_pop_size]]
draw_line(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
zorder=1) # cc input to inh pop
draw_circle(
ax,
path[0],
conn_radius,
lw=0,
fclr=exc_clr,
zorder=0) # connector
# horizontal branches (arrows)
for i in range(4):
# cc input to excitatory populations
path = [[c_pop_pos[-1][0][0] - axon_cell_sep,
c_pop_pos[i][0][1] + cc_input_y * c_pop_size],
[c_pop_pos[-1][0][0],
c_pop_pos[i][0][1] + cc_input_y * c_pop_size],
]
draw_arrow(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
arrow_size=arrow_size,
zorder=0)
draw_circle(
ax,
path[0],
conn_radius,
lw=0,
fclr=exc_clr,
zorder=0) # connector
# cc input to inhibitory populations
path = [[c_pop_pos[-1][1][0] + c_pop_size + axon_cell_sep,
c_pop_pos[i][0][1] + cc_input_y * c_pop_size],
[c_pop_pos[-1][1][0] + c_pop_size,
c_pop_pos[i][0][1] + cc_input_y * c_pop_size]]
draw_arrow(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
arrow_size=arrow_size,
zorder=0)
draw_circle(
ax,
path[0],
conn_radius,
lw=0,
fclr=exc_clr,
zorder=0) # connector
##################################################
# thalamo-cortical axons
path = [[t_pos[0] + t_pop_size / 2., t_pos[1] + t_pop_size],
[t_pos[0] + t_pop_size / 2., t_pos[1] + t_pop_size + axon_y_dist]]
draw_line(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
zorder=1) # thalamic output
draw_circle(ax, path[-1], conn_radius, lw=0,
fclr=exc_clr, zorder=0) # connector
path = [[c_pop_pos[0][0][0] -
(axon_cell_sep +
axon_y_dist), t_pos[1] +
t_pop_size +
axon_y_dist], [c_pop_pos[0][1][0] +
c_pop_size +
(axon_cell_sep +
axon_y_dist), t_pos[1] +
t_pop_size +
axon_y_dist]]
draw_line(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
zorder=1) # horizontal branch
path = [[c_pop_pos[0][0][0] -
(axon_cell_sep +
axon_y_dist), t_pos[1] +
t_pop_size +
axon_y_dist], [c_pop_pos[0][0][0] -
(axon_cell_sep +
axon_y_dist), c_pop_pos[2][0][1] +
tc_input_y *
c_pop_size]]
draw_line(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
zorder=1) # left vertical branch
path = [[c_pop_pos[0][1][0] +
c_pop_size +
(axon_cell_sep +
axon_y_dist), t_pos[1] +
t_pop_size +
axon_y_dist], [c_pop_pos[0][1][0] +
c_pop_size +
(axon_cell_sep +
axon_y_dist), c_pop_pos[2][0][1] +
tc_input_y *
c_pop_size]]
draw_line(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
zorder=1) # right vertical branch
path = [[c_pop_pos[0][0][0] -
(axon_cell_sep +
axon_y_dist), c_pop_pos[2][0][1] +
tc_input_y *
c_pop_size], [c_pop_pos[0][0][0], c_pop_pos[2][0][1] +
tc_input_y *
c_pop_size], ]
draw_arrow(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
arrow_size=arrow_size,
zorder=1) # Th -> L4E synapses (arrows)
draw_circle(
ax,
path[0],
conn_radius,
lw=0,
fclr=exc_clr,
zorder=0) # connector
path = [[c_pop_pos[0][0][0] -
(axon_cell_sep +
axon_y_dist), c_pop_pos[0][0][1] +
tc_input_y *
c_pop_size], [c_pop_pos[0][0][0], c_pop_pos[0][0][1] +
tc_input_y *
c_pop_size], ]
draw_arrow(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
arrow_size=arrow_size,
zorder=1) # Th -> L6E synapses (arrows)
draw_circle(
ax,
path[0],
conn_radius,
lw=0,
fclr=exc_clr,
zorder=0) # connector
path = [[c_pop_pos[0][1][0] +
c_pop_size +
(axon_cell_sep +
axon_y_dist), c_pop_pos[2][0][1] +
tc_input_y *
c_pop_size], [c_pop_pos[0][1][0] +
c_pop_size, c_pop_pos[2][0][1] +
tc_input_y *
c_pop_size], ]
draw_arrow(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
arrow_size=arrow_size,
zorder=1) # Th -> L4I synapses (arrows)
draw_circle(
ax,
path[0],
conn_radius,
lw=0,
fclr=exc_clr,
zorder=0) # connector
path = [[c_pop_pos[0][1][0] +
c_pop_size +
(axon_cell_sep +
axon_y_dist), c_pop_pos[0][0][1] +
tc_input_y *
c_pop_size], [c_pop_pos[0][1][0] +
c_pop_size, c_pop_pos[0][0][1] +
tc_input_y *
c_pop_size], ]
draw_arrow(
ax,
path,
lw=lw_axons,
ls='solid',
lclr=exc_clr,
arrow_size=arrow_size,
zorder=1) # Th -> L6I synapses (arrows)
draw_circle(
ax,
path[0],
conn_radius,
lw=0,
fclr=exc_clr,
zorder=0) # connector
if labels:
##################################################
# legend
legend_x = [
t_pos[0] +
t_pop_size +
axon_cell_sep,
t_pos[0] +
t_pop_size +
axon_cell_sep +
legend_length]
legend_y = [t_pos[1], (t_pos[1] + 2 * t_pop_size / 3)]
draw_arrow(ax,
[[legend_x[0],
legend_y[1]],
[legend_x[1],
legend_y[1]]],
lw=lw_axons,
ls='solid',
lclr=exc_clr,
arrow_size=arrow_size,
zorder=1)
draw_arrow(ax,
[[legend_x[0],
legend_y[0]],
[legend_x[1],
legend_y[0]]],
lw=lw_axons,
ls='solid',
lclr=inh_clr,
arrow_size=arrow_size,
zorder=1)
##################################################
# population names
put_text(ax, [t_pos[0] + t_pop_size / 2.,
(t_pos[1] + t_pop_size / 2.)], r'TC', 'w', fontdict1)
put_text(ax,
[c_pop_pos[0][0][0] + c_pop_size / 2.,
c_pop_pos[0][0][1] + c_pop_size / 2.],
r'L6E',
'w' if analysis_params.bw else 'k',
fontdict1)
put_text(ax,
[c_pop_pos[0][1][0] + c_pop_size / 2.,
c_pop_pos[0][1][1] + c_pop_size / 2.],
r'L6I',
'w' if analysis_params.bw else 'k',
fontdict1)
put_text(ax,
[c_pop_pos[1][0][0] + c_pop_size / 2.,
c_pop_pos[1][0][1] + c_pop_size / 2.],
r'L5E',
'w' if analysis_params.bw else 'k',
fontdict1)
put_text(ax,
[c_pop_pos[1][1][0] + c_pop_size / 2.,
c_pop_pos[1][1][1] + c_pop_size / 2.],
r'L5I',
'w' if analysis_params.bw else 'k',
fontdict1)
put_text(ax,
[c_pop_pos[2][0][0] + c_pop_size / 2.,
c_pop_pos[2][0][1] + c_pop_size / 2.],
r'L4E',
'w' if analysis_params.bw else 'k',
fontdict1)
put_text(ax,
[c_pop_pos[2][1][0] + c_pop_size / 2.,
c_pop_pos[2][1][1] + c_pop_size / 2.],
r'L4I',
'w' if analysis_params.bw else 'k',
fontdict1)
put_text(ax,
[c_pop_pos[3][0][0] + c_pop_size / 2.,
c_pop_pos[3][0][1] + c_pop_size / 2.],
r'L23E',
'w' if analysis_params.bw else 'k',
fontdict1)
put_text(ax,
[c_pop_pos[3][1][0] + c_pop_size / 2.,
c_pop_pos[3][1][1] + c_pop_size / 2.],
r'L23I',
'w' if analysis_params.bw else 'k',
fontdict1)
put_text(ax,
[c_pop_pos[-1][0][0],
c_pop_pos[-1][0][1] + c_pop_size + 1.7 * axon_cell_sep + 0.01],
r'cortico-cortical input',
'k',
fontdict2)
put_text(ax, [legend_x[1] + axon_y_dist, legend_y[1]],
r'excitatory', 'k', fontdict3)
put_text(ax, [legend_x[1] + axon_y_dist, legend_y[0]],
r'inhibitory', 'k', fontdict3)
##################################################
# layer names
put_text(ax, [0.2 * c_pop_pos[0][0][0], c_pop_pos[0][1]
[1] + c_pop_size / 2.], r'L6', 'k', fontdict1)
put_text(ax, [0.2 * c_pop_pos[1][0][0], c_pop_pos[1][1]
[1] + c_pop_size / 2.], r'L5', 'k', fontdict1)
put_text(ax, [0.2 * c_pop_pos[2][0][0], c_pop_pos[2][1]
[1] + c_pop_size / 2.], r'L4', 'k', fontdict1)
put_text(ax, [0.2 * c_pop_pos[3][0][0], c_pop_pos[3][1]
[1] + c_pop_size / 2.], r'L2/3', 'k', fontdict1)
if highlight is not None:
ids = name_to_id_mapping[highlight]
fontdict1['fontsize'] = 4
put_text(ax, [c_pop_pos[ids[0]][ids[1]][0] +
c_pop_size /
2., c_pop_pos[ids[0]][ids[1]][1] +
c_pop_size /
2.], highlight, 'k', fontdict1)
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.axis(ax.axis('equal'))
return ax
def plot_population(ax,
params,
aspect='tight',
isometricangle=0,
plot_somas=True, plot_morphos=False,
num_unitsE=1, num_unitsI=1,
clip_dendrites=False,
main_pops=True,
Y=None,
big=True,
title='cell positions',
rasterized=True):
'''
Plot the geometry of the column model, optionally with somatic locations
and optionally with reconstructed neurons
kwargs:
::
ax : matplotlib.axes.AxesSubplot
aspect : str
matplotlib.axis argument
isometricangle : float
pseudo-3d view angle
plot_somas : bool
plot soma locations
plot_morphos : bool
plot full morphologies
num_unitsE : int
number of excitatory morphos plotted per population
num_unitsI : int
number of inhibitory morphos plotted per population
clip_dendrites : bool
draw dendrites outside of axis
mainpops : bool
if True, plot only main pops, e.g. b23 and nb23 as L23I
Y : None, or string
if not None, plot only soma locations of Y
big : bool
if False: leave out labels and reduce marker size
return:
::
axis : list
the plt.axis() corresponding to input aspect
'''
name_to_id_mapping = {'L6E': (3, 0),
'L6I': (3, 1),
'L5E': (2, 0),
'L5I': (2, 1),
'L4E': (1, 0),
'L4I': (1, 1),
'L23E': (0, 0),
'L23I': (0, 1)
}
# DRAW OUTLINE OF POPULATIONS
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
# contact points
if big:
ax.plot(params.electrodeParams['x'],
params.electrodeParams['z'],
marker='o', markersize=2, color='k', zorder=0)
else:
ax.plot(params.electrodeParams['x'],
params.electrodeParams['z'],
marker='o', markersize=0.5, color='k', zorder=0)
# outline of electrode
x_0 = np.array(params.populationParams[params.y[0]]['min_r'])[1, 1:-1]
z_0 = np.array(params.populationParams[params.y[0]]['min_r'])[0, 1:-1]
x = np.r_[x_0[-1], x_0[::-1], -x_0[1:], -x_0[-1]]
z = np.r_[100, z_0[::-1], z_0[1:], 100]
ax.fill(x, z, fc='w', lw=0.1, ec='k', zorder=-0.1, clip_on=False)
# outline of populations:
# fetch the population radius from some population
r = params.populationParams['p23']['radius']
theta0 = np.linspace(0, np.pi, 20)
theta1 = np.linspace(np.pi, 2 * np.pi, 20)
zpos = np.r_[params.layerBoundaries[:, 0],
params.layerBoundaries[-1, 1]]
layers = ['L1', 'L2/3', 'L4', 'L5', 'L6']
for i, z in enumerate(params.layerBoundaries.mean(axis=1)):
if big:
ax.text(r, z, ' %s' % layers[i], va='center', ha='left')
for i, zval in enumerate(zpos):
if i == 0:
ax.plot(r * np.cos(theta0),
r * np.sin(theta0) * np.sin(isometricangle) + zval,
color='k', zorder=-r, clip_on=False)
ax.plot(r * np.cos(theta1),
r * np.sin(theta1) * np.sin(isometricangle) + zval,
color='k', zorder=r, clip_on=False)
else:
ax.plot(r * np.cos(theta0),
r * np.sin(theta0) * np.sin(isometricangle) + zval,
color='gray', zorder=-r, clip_on=False)
ax.plot(r * np.cos(theta1),
r * np.sin(theta1) * np.sin(isometricangle) + zval,
color='k', zorder=r, clip_on=False)
ax.plot([-r, -r], [zpos[0], zpos[-1]], 'k', zorder=0, clip_on=False)
ax.plot([r, r], [zpos[0], zpos[-1]], 'k', zorder=0, clip_on=False)
if big:
# plot a horizontal radius scalebar
ax.plot([0, r], [z_0.min()] * 2, 'k', lw=1, zorder=0, clip_on=False)
ax.text(
r /
2.,
z_0.min() -
100,
'$r$ = %i $\\mu$m' %
int(r),
ha='center')
# plot a vertical depth scalebar
ax.plot([-r] * 2, [z_0.min() + 50, z_0.min() - 50],
'k', lw=1, zorder=0, clip_on=False)
ax.text(-r, z_0.min(), r'100 $\mu$m', va='center', ha='right')
ax.set_yticks([])
ax.set_yticklabels([])
# fake ticks:
if big:
for pos in zpos:
ax.text(-r, pos, '$z$=%i-' % int(pos), ha='right', va='center')
ax.set_title(title, va='bottom')
axis = ax.axis(ax.axis(aspect))
def plot_pop_scatter(somapos, marker, colors, i):
# scatter plot setting appropriate zorder for each datapoint by binning
pitch = 100
for lower in np.arange(-600, 601, pitch):
upper = lower + pitch
inds = (somapos[:, 1] >= lower) & (somapos[:, 1] < upper)
if np.any(inds):
if big:
ax.scatter(somapos[inds,
0],
somapos[inds,
2] - somapos[inds,
1] * np.sin(isometricangle),
s=10,
facecolors=colors[i],
edgecolors='gray',
linewidths=0.1,
zorder=lower,
marker=marker,
clip_on=False,
rasterized=rasterized)
else:
ax.scatter(somapos[inds,
0],
somapos[inds,
2] - somapos[inds,
1] * np.sin(isometricangle),
s=3,
facecolors=colors[i],
edgecolors='gray',
linewidths=0.1,
zorder=lower,
marker=marker,
clip_on=False,
rasterized=rasterized)
# DRAW UNITS
pop = next(zip(*params.mapping_Yy))
# plot a symbol in each location with a unit
if plot_somas:
if main_pops:
colors = phlp.get_colors(np.unique(pop).size)
# restructure
E, I = list(zip(*params.y_in_Y))
pops_ = []
if Y is None:
for i in range(len(E)):
pops_.append(E[i])
pops_.append(I[i])
else:
ids = name_to_id_mapping[Y]
if ids[1] == 0:
pops_.append(E[ids[0]])
if ids[1] == 1:
pops_.append(I[ids[0]])
for i, pops in enumerate(pops_):
layer = np.unique(pop)[i]
if layer.rfind('p') >= 0 or layer.rfind('E') >= 0:
marker = '^'
elif layer.rfind('b') >= 0 or layer.rfind('I') >= 0:
marker = '*'
elif layer.rfind('ss') >= 0:
marker = 'o'
else:
raise Exception
# get the somapos
somapos = []
for j, lname in enumerate(pops):
fname = glob.glob(
os.path.join(
params.populations_path,
'%s*somapos.gdf' %
lname))[0]
if j == 0:
somapos = np.loadtxt(fname).reshape((-1, 3))
else:
somapos = np.r_[
'0, 2', somapos, np.loadtxt(fname).reshape(
(-1, 3))]
somapos = somapos[::5, :]
if Y is None:
plot_pop_scatter(somapos, marker, colors, i)
else:
plot_pop_scatter(
somapos, marker, colors, ids[0] * 2 + ids[1])
else:
colors = phlp.get_colors(len(pop))
i = 0
for layer, _, _, _ in params.y_zip_list:
# assign symbol
if layer.rfind('p') >= 0 or layer.rfind('E') >= 0:
marker = '^'
elif layer.rfind('b') >= 0 or layer.rfind('I') >= 0:
marker = '*'
elif layer.rfind('ss') >= 0:
marker = 'x'
else:
raise Exception
# get the somapos
fname = glob.glob(
os.path.join(
params.populations_path,
'%s*somapos.gdf' %
layer))[0]
somapos = np.loadtxt(fname).reshape((-1, 3))
plot_pop_scatter(somapos, marker, colors, i)
i += 1
# plot morphologies in their appropriate locations
if plot_morphos:
if main_pops:
colors = phlp.get_colors(np.unique(pop).size)
# restructure
E, I = list(zip(*params.y_in_Y))
pops_ = []
for i in range(len(E)):
pops_.append(E[i])
pops_.append(I[i])
for i, pops in enumerate(pops_):
layer = np.unique(pop)[i]
# get the somapos and morphos
somapos = []
for j, lname in enumerate(pops):
fname = glob.glob(
os.path.join(
params.populations_path,
'%s*somapos.gdf' %
lname))[0]
if j == 0:
somapos = np.loadtxt(fname).reshape((-1, 3))
else:
somapos = np.r_[
'0, 2', somapos, np.loadtxt(fname).reshape(
(-1, 3))]
# add num_units morphologies per population with a random
# z-rotation
if layer.rfind('p') >= 0 or layer.rfind(
'ss') >= 0 or layer.rfind('E') >= 0:
num_units = num_unitsE
else:
num_units = num_unitsI
if num_units > somapos.shape[0]:
n = somapos.shape[0]
else:
n = num_units
# find some morphos for this population:
morphos = []
for fname in params.m_y:
if fname.rfind(layer) >= 0:
morphos.append(fname)
# plot some units
for j in range(n):
cell = LFPy.Cell(
morphology=os.path.join(
params.PATH_m_y,
np.random.permutation(morphos)[0]),
nsegs_method='lambda_f',
lambda_f=10,
extracellular=False)
cell.set_pos(somapos[j, 0], somapos[j, 1], somapos[j, 2])
cell.set_rotation(z=np.random.rand() * np.pi * 2)
# set up a polycollection
zips = []
for x, z in cell.get_idx_polygons():
zips.append(
list(zip(x, z - somapos[j, 1] * np.sin(isometricangle))))
polycol = PolyCollection(zips,
edgecolors=colors[i],
facecolors=colors[i],
linewidths=(0.5),
zorder=somapos[j, 1],
clip_on=clip_dendrites,
rasterized=rasterized)
ax.add_collection(polycol)
i += 1
else:
colors = phlp.get_colors(len(pop))
i = 0
for layer, morpho, depth, size in params.y_zip_list:
# get the somapos
fname = glob.glob(
os.path.join(
params.populations_path,
'%s*somapos.gdf' %
layer))[0]
somapos = np.loadtxt(fname).reshape((-1, 3))
# add num_units morphologies per population with a random
# z-rotation
if layer.rfind('p') >= 0 or layer.rfind(
'ss') >= 0 or layer.rfind('E') >= 0:
num_units = num_unitsE
else:
num_units = num_unitsI
if num_units > somapos.shape[0]:
n = somapos.shape[0]
else:
n = num_units
# plot some units
for j in range(n):
cell = LFPy.Cell(
morphology=os.path.join(
params.PATH_m_y,
morpho),
nsegs_method='lambda_f',
lambda_f=10,
extracellular=False)
cell.set_pos(somapos[j, 0], somapos[j, 1], somapos[j, 2])
cell.set_rotation(z=np.random.rand() * np.pi * 2)
# set up a polycollection
zips = []
for x, z in cell.get_idx_polygons():
zips.append(
list(zip(x, z - somapos[j, 1] * np.sin(isometricangle))))
polycol = PolyCollection(zips,
edgecolors=colors[i],
facecolors=colors[i],
linewidths=(0.5),
zorder=somapos[j, 1],
clip_on=clip_dendrites,
rasterized=rasterized)
ax.add_collection(polycol)
i += 1
return axis
def plot_signal_sum(ax,
params,
fname='LFPsum.h5',
unit='mV',
scaling_factor=1.,
ylabels=True,
scalebar=True,
vlimround=None,
T=[800,
1000],
ylim=[-1500,
0],
color='k',
fancy=False,
label='',
transient=200,
clip_on=False,
rasterized=True,
**kwargs):
'''
on axes plot the summed LFP contributions
args:
::
ax : matplotlib.axes.AxesSubplot object
fname : str/np.ndarray, path to h5 file or ndim=2 numpy.ndarray
unit : str, scalebar unit
scaling_factor : float, scaling factor (e.g. to scale 10% data set up)
ylabels : bool, show labels on y-axis
scalebar : bool, show scalebar in plot
vlimround : None/float, override autoscaling of data and scalebar
T : list, [tstart, tstop], which timeinterval
ylim : list of floats, see plt.gca().set_ylim
color : str/colorspec tuple, color of shown lines
fancy : bool,
label : str, line labels
rasterized : bool, rasterize line plots if true
kwargs : additional keyword arguments passed to ax.plot()
returns:
::
vlimround : float, scalebar scaling factor, i.e., to match up plots
'''
if isinstance(fname, str) and os.path.isfile(fname):
f = h5py.File(fname, 'r')
# load data
data = f['data'][()]
tvec = np.arange(data.shape[1]) * 1000. / f['srate'][()]
# for mean subtraction
datameanaxis1 = data[:, tvec >= transient].mean(axis=1)
# close dataset
f.close()
elif isinstance(fname, np.ndarray) and fname.ndim == 2:
data = fname
tvec = np.arange(data.shape[1]) * params.dt_output
datameanaxis1 = data[:, tvec >= transient].mean(axis=1)
else:
raise Exception(
'type(fname)={} not str or numpy.ndarray'.format(
type(fname)))
# slice
slica = (tvec <= T[1]) & (tvec >= T[0])
data = data[:, slica]
# subtract mean in each channel
#dataT = data.T - data.mean(axis=1)
dataT = data.T - datameanaxis1
data = dataT.T
# normalize
data = data * scaling_factor
zvec = np.r_[params.electrodeParams['z']]
zvec = np.r_[zvec, zvec[-1] + np.diff(zvec)[-1]]
vlim = abs(data).max()
if vlimround is None:
vlimround = 2.**np.round(np.log2(vlim))
else:
pass
yticklabels = []
yticks = []
if fancy:
colors = phlp.get_colors(data.shape[0])
else:
colors = [color] * data.shape[0]
for i, z in enumerate(params.electrodeParams['z']):
if i == 0:
ax.plot(tvec[slica], data[i] * 100 / vlimround + z,
color=colors[i], rasterized=rasterized, label=label,
clip_on=clip_on, **kwargs)
else:
ax.plot(tvec[slica], data[i] * 100 / vlimround + z,
color=colors[i], rasterized=rasterized, clip_on=clip_on,
**kwargs)
yticklabels.append('ch. %i' % (i + 1))
yticks.append(z)
if scalebar:
ax.plot([tvec[slica][-1], tvec[slica][-1]],
[-1300, -1400], lw=2, color='k', clip_on=False)
ax.text(tvec[slica][-1] + np.diff(T) * 0.02, -1350,
r'%g %s' % (vlimround, unit),
color='k', rotation='vertical',
va='center')
ax.axis(ax.axis('tight'))
ax.yaxis.set_ticks(yticks)
if ylabels:
ax.yaxis.set_ticklabels(yticklabels)
else:
ax.yaxis.set_ticklabels([])
for loc, spine in ax.spines.items():
if loc in ['right', 'top']:
spine.set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlabel(r'$t$ (ms)', labelpad=0.1)
ax.set_ylim(ylim)
ax.set_xlim(T)
return vlimround
def plotMorphologyTable(fig, params, rasterized=True):
ax = fig.add_axes([0.075, 0.05, 0.925, 0.9])
# using colors assosiated with each main postsyn population
colors = phlp.get_colors(len(params.Y))
layers = ['L1', 'L2/3', 'L4', 'L5', 'L6']
# cell composition
composition = params.N_y.astype(float) / params.N_y.sum() * 100
morphotypes = [
'p23',
'i23',
'i23',
'p4',
'ss4',
'ss4',
'i4',
'i4',
'p5v1',
'p5v2',
'i5',
'i5',
'p6',
'p5v1',
'i5',
'i5',
]
y_zip_list = list(zip(params.y,
params.m_y,
params.depths,
params.N_y,
composition,
morphotypes))
xpos = 300
xvec = [xpos]
COUNTER = 0
COUNTER_J = 0
prevpop = None
totnsegs = []
for layerind, morpho, depth, size, relsize, mtype in y_zip_list:
fil = os.path.join(params.PATH_m_y, morpho)
neuron.h('forall delete_section()')
cell = LFPy.Cell(fil,
# nsegs_method='lambda_f',
#lambda_f = 10,
pt3d=False,
**params.cellParams)
cell.set_pos(xpos, 0, depth)
upperbound = params.layerBoundaries[0, 0]
totnsegs.append(cell.totnsegs)
zips = []
for x, z in cell.get_idx_polygons():
zips.append(list(zip(x, z)))
if COUNTER > 0 and prevpop != morpho.split('_')[0]:
COUNTER_J += 1
prevpop = morpho.split('_')[0]
polycol = PolyCollection(zips,
linewidths=0.5,
edgecolors=colors[COUNTER_J],
facecolors=colors[COUNTER_J],
rasterized=rasterized)
ax.add_collection(polycol)
xpos += 300
xvec = np.r_[xvec, xpos]
COUNTER += 1
xvec = xvec[:-1]
ax.hlines(params.layerBoundaries[:, 0], 0, xpos - 100, linestyles='dotted')
ax.hlines(params.layerBoundaries[-1, -1],
0, xpos - 100, linestyles='dotted')
ax.set_ylabel(r'depth ($\mu$m)')
ax.set_yticks(np.r_[params.layerBoundaries[:, 0],
params.layerBoundaries[-1, -1]])
ax.set_xticks([])
for i, z in enumerate(params.layerBoundaries.mean(axis=1)):
ax.text(-50, z, layers[i], verticalalignment='center')
for loc, spine in ax.spines.items():
spine.set_color('none') # don't draw spine
ax.yaxis.set_ticks_position('left')
ax.axis(ax.axis('equal'))
# plot annotations
i = 0
j = 0
xpos = 150
#prevcolor = None
prevpop = None
for layerind, morpho, depth, size, relsize, mtype in y_zip_list:
pop = morpho.split('_')[0]
ax.text(xpos + 30, 300, '{:.1f}%'.format(relsize), ha='left')
if i > 0 and prevpop != pop:
ax.vlines(xpos, -1800, 900,
clip_on=False)
j += 1
if j > 7: # HACK
j = 7
bigsize = np.array(params.full_scale_num_neurons).flatten()[j]
if prevpop != pop:
ax.text(xpos + 30, 800, pop, ha='left', clip_on=False,)
ax.text(xpos + 30, 700, bigsize, ha='left', clip_on=False)
ax.text(xpos + 30, 100, size, ha='left', clip_on=False)
ax.text(xpos + 30, 200,
'{:.1f}%'.format(100 * float(size) / bigsize),
ha='left')
# ax.text(xpos+30, 400, morpho.split('_', 1)[1].split('.hoc')[0][:8],
# ha='left', clip_on=False)
ax.text(xpos + 30, 400, '{}'.format(totnsegs[i]))
ax.text(xpos + 30, 500, mtype,
ha='left', clip_on=False)
ax.text(xpos + 30, 600, layerind, ha='left', clip_on=False)
prevpop = pop
xpos += 300
i += 1
ax.text(90, 800, r'Population $Y$:', ha='right', clip_on=False)
ax.text(90, 700, r'Pop. size $N_Y$:', ha='right', clip_on=False)
ax.text(90, 600, r'Cell type $y$:', ha='right', clip_on=False)
ax.text(90, 500, r'Morphology $M_y$:', ha='right', clip_on=False)
ax.text(90, 400, r'Segments $n_\mathrm{comp}$:', ha='right', clip_on=False)
ax.text(90, 300, r'Occurrence $F_y$:', ha='right', clip_on=False)
ax.text(90, 200, r'Rel. Occurr. $F_{yY}$:', ha='right', clip_on=False)
ax.text(90, 100, r'Cell count $N_y$:', ha='right', clip_on=False)
ax.axis(ax.axis('equal'))
return fig
def getMeanInpCurrents(params, numunits=100,
filepattern=os.path.join('simulation_output_default',
'population_input_spikes*')):
'''return a dict with the per population mean and std synaptic current,
averaging over numcells recorded units from each population in the
network
Returned currents are in unit of nA.
'''
# convolution kernels
x = np.arange(100) * params.dt
kernel = np.exp(-x / params.model_params['tau_syn_ex'])
# number of external inputs:
K_bg = np.array(sum(params.K_bg, []))
# compensate for DC CC connections if we're using that
iDC = K_bg * params.dc_amplitude * 1E-3 # unit ????
data = {}
# loop over network-populations
for i, Y in enumerate(params.Y):
if i % SIZE == RANK:
# file to open
fname = glob.glob(filepattern + '*' + Y + '*')[0]
print(fname)
# read in read data and assess units, up to numunits
rawdata = np.array(helpers.read_gdf(fname))
units = np.unique(rawdata[:, 0])
if numunits > units.size:
numcells = units.size
else:
numcells = numunits
units = units[:numcells]
# churn through data and extract the input currents per cell
for j, unit in enumerate(units):
slc = rawdata[:, 0] == unit
# just the spikes:
if j == 0:
dataslc = rawdata[slc, 2:]
else:
dataslc = np.r_['0,3', dataslc, rawdata[slc, 2:]]
# fix the datatype, it may be object
dataslc = dataslc.astype(float)
#fill in data-structure
data.update({
Y: {
'E': np.convolve(dataslc[:, :, 0].mean(axis=0),
kernel, 'same') * 1E-3 + float(iDC[i]),
'I': np.convolve(dataslc[:, :, 1].mean(axis=0),
kernel, 'same') * 1E-3,
'tvec': rawdata[slc, 1],
'numunits': numunits,
}
})
data = COMM.allgather(data)
return {k: v for d in data for k, v in list(d.items())}
def getMeanVoltages(params, numunits=100,
filepattern=os.path.join('simulation_output_default',
'voltages')):
'''return a dict with the per population mean and std synaptic current,
averaging over numcells recorded units from each population in the
network
Returned currents are in unit of nA.
'''
data = {}
# loop over network-populations
for i, Y in enumerate(params.Y):
if i % SIZE == RANK:
# read in read data and assess units, up to numunits
fname = glob.glob(filepattern + '*' + Y + '*')[0]
print(fname)
rawdata = np.array(helpers.read_gdf(fname))
units = np.unique(rawdata[:, 0])
if numunits > units.size:
numcells = units.size
else:
numcells = numunits
units = units[:numcells]
# churn through data and extract the per cell voltages
for j, unit in enumerate(units):
slc = rawdata[:, 0] == unit
# just the spikes:
if j == 0:
dataslc = rawdata[slc, 2:]
else:
dataslc = np.r_['0,3', dataslc, rawdata[slc, 2:]]
# fix the datatype, it may be object
dataslc = dataslc.astype(float)
#fill in data-structure
data.update({
Y: {
'data': dataslc[:, :, 0].mean(axis=0),
'std': dataslc[:, :, 0].std(axis=0),
'sample': dataslc[0, :, 0],
'tvec': rawdata[slc, 1],
'numunits': numunits,
}
})
data = COMM.allgather(data)
return {k: v for d in data for k, v in list(d.items())}
def plot_signal_sum_colorplot(ax,
params,
fname='LFPsum.h5',
unit='mV',
N=1,
ylabels=True,
T=[800,
1000],
ylim=[-1500,
0],
fancy=False,
colorbar=True,
cmap='spectral_r',
absmax=None,
transient=200,
rasterized=True,
scaling_factor=1.):
'''
on colorplot and as background plot the summed CSD contributions
args:
::
ax : matplotlib.axes.AxesSubplot object
T : list, [tstart, tstop], which timeinterval
ylims : list, set range of yaxis to scale with other plots
fancy : bool,
N : integer, set to number of LFP generators in order to get the normalized signal
'''
f = h5py.File(fname, 'r')
data = f['data'][()] * scaling_factor
tvec = np.arange(data.shape[1]) * 1000. / f['srate'][()]
# for mean subtraction
datameanaxis1 = data[:, tvec >= transient].mean(axis=1)
# slice
slica = (tvec <= T[1]) & (tvec >= T[0])
data = data[:, slica]
# subtract mean
#dataT = data.T - data.mean(axis=1)
dataT = data.T - datameanaxis1
data = dataT.T
# normalize
data = data / N
zvec = params.electrodeParams['z']
if fancy:
colors = phlp.get_colors(data.shape[0])
else:
colors = ['k'] * data.shape[0]
if absmax is None:
absmax = abs(np.array([data.max(), data.min()])).max()
im = ax.pcolormesh(tvec[slica],
zvec,
data,
rasterized=rasterized,
vmax=absmax,
vmin=-absmax,
cmap=cmap,
shading='auto')
ax.set_yticks(params.electrodeParams['z'])
if ylabels:
yticklabels = ['ch. %i' %
(i + 1) for i in np.arange(len(params.electrodeParams['z']))]
ax.set_yticklabels(yticklabels)
else:
ax.set_yticklabels([])
if colorbar:
# colorbar
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im, cax=cax)
cbar.set_label(unit, labelpad=0.1)
plt.axis('tight')
ax.set_ylim(ylim)
ax.set_xlim(T)
f.close()
return im
def calc_signal_power(
params,
fname,
transient=200,
Df=None,
mlab=True,
NFFT=1000,
noverlap=0,
window=plt.mlab.window_hanning):
'''
calculates power spectrum of sum signal for all channels
'''
if isinstance(fname, str) and os.path.isfile(fname):
# open file
f = h5py.File(fname, 'r')
data = f['data'][()]
srate = f['srate'][()]
tvec = np.arange(data.shape[1]) * 1000. / srate
f.close()
elif isinstance(fname, np.ndarray):
data = fname
srate = 1000.
tvec = np.arange(data.shape[1]) * 1000. / srate
else:
raise Exception('{} not a file or array'.format(fname))
# slice
slica = (tvec >= transient)
data = data[:, slica]
# subtract mean
dataT = data.T - data.mean(axis=1)
data = dataT.T
# extract PSD
PSD = []
for i in np.arange(len(params.electrodeParams['z'])):
if mlab:
Pxx, freqs = plt.mlab.psd(
data[i], NFFT=NFFT, Fs=srate, noverlap=noverlap, window=window)
else:
[freqs, Pxx] = helpers.powerspec([data[i, ]], tbin=1.,
Df=Df, pointProcess=False)
mask = np.where(freqs >= 0.)
freqs = freqs[mask]
Pxx = Pxx.flatten()
Pxx = Pxx[mask]
Pxx = Pxx / tvec[tvec >= transient].size**2
PSD += [Pxx.flatten()]
PSD = np.array(PSD)
return freqs, PSD
def plot_signal_power_colorplot(ax, params, fname, transient=200, Df=None,
mlab=True, NFFT=1000,
window=plt.mlab.window_hanning,
noverlap=0,
cmap=plt.cm.get_cmap('jet', 21),
vmin=None,
vmax=None):
'''
on axes plot the LFP power spectral density
The whole signal duration is used.
args:
::
ax : matplotlib.axes.AxesSubplot object
fancy : bool,
'''
zvec = np.r_[params.electrodeParams['z']]
# labels
yticklabels = []
yticks = []
for i, kk in enumerate(params.electrodeParams['z']):
yticklabels.append('ch. %i' % (i + 1))
yticks.append(kk)
freqs, PSD = calc_signal_power(params, fname=fname, transient=transient, Df=Df,
mlab=mlab, NFFT=NFFT,
window=window, noverlap=noverlap)
# plot only above 1 Hz
inds = freqs >= 1 # frequencies greater than 4 Hz
im = ax.pcolormesh(freqs[inds], zvec, PSD[:, inds],
rasterized=True,
norm=LogNorm(vmin=vmin, vmax=vmax),
cmap=cmap, shading='auto')
ax.yaxis.set_ticks(yticks)
ax.yaxis.set_ticklabels(yticklabels)
ax.semilogx()
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlabel(r'$f$ (Hz)', labelpad=0.1)
ax.axis(ax.axis('tight'))
return im
def plotPowers(ax, params, popkeys, dataset, linestyles, linewidths,
transient=200, SCALING_POSTFIX='', markerstyles=None,
scaling_factor=1.):
'''plot power (variance) as function of depth for total and separate
contributors
Plot variance of sum signal
'''
colors = phlp.get_colors(len(popkeys))
depth = params.electrodeParams['z']
zpos = np.r_[params.layerBoundaries[:, 0],
params.layerBoundaries[-1, 1]]
for i, layer in enumerate(popkeys):
f = h5py.File(
os.path.join(
params.populations_path, '%s_population_%s' %
(layer, dataset) + SCALING_POSTFIX + '.h5'), 'r')
data = f['data'][()] * scaling_factor
ax.semilogx(data[:, transient:].var(axis=1), depth,
color=colors[i],
ls=linestyles[i],
lw=linewidths[i],
marker=None if markerstyles is None else markerstyles[i],
markersize=2.5,
markerfacecolor=colors[i],
markeredgecolor=colors[i],
label=layer,
clip_on=True
)
f.close()
f = h5py.File(
os.path.join(
params.savefolder,
'%s_sum' %
dataset +
SCALING_POSTFIX +
'.h5'),
'r')
data = f['data'][()] * scaling_factor
ax.plot(data[:, transient:].var(axis=1), depth,
'k', label='SUM', lw=1.25, clip_on=False)
f.close()
ax.set_yticks(zpos)
ax.set_yticklabels([])
# ax.set_xscale('log')
try: # numticks arg only exists for latest matplotlib version
ax.xaxis.set_major_locator(plt.LogLocator(
base=10, subs=np.linspace(-10, 10, 2), numticks=6))
except BaseException:
ax.xaxis.set_major_locator(plt.LogLocator(
base=10, subs=np.linspace(-10, 10, 2)))
ax.xaxis.set_minor_locator(plt.LogLocator(base=10, subs=[1.]))
ax.axis('tight')
def plotting_correlation(
params,
x0,
x1,
ax,
lag=20.,
scaling=None,
normalize=True,
color='k',
unit=r'$cc=%.3f$',
title='firing_rate vs LFP',
scalebar=True,
**kwargs):
''' mls
on axes plot th
e correlation between x0 and x1
args:
::
x0 : first dataset
x1 : second dataset - the LFP usually here
ax : matplotlib.axes.AxesSubplot object
title : text to be used as current axis object title
normalize : if True, signals are z-scored before applying np.correlate
unit : unit for scalebar
'''
zvec = np.r_[params.electrodeParams['z']]
zvec = np.r_[zvec, zvec[-1] + np.diff(zvec)[-1]]
xcorr_all = np.zeros(
(params.electrodeParams['z'].size, x0.shape[-1]), dtype=float)
if normalize:
for i, z in enumerate(params.electrodeParams['z']):
if x0.ndim == 1:
x2 = x1[i, ]
xcorr1 = np.correlate(helpers.normalize(x0),
helpers.normalize(x2), 'same') / x0.size
elif x0.ndim == 2:
xcorr1 = np.correlate(helpers.normalize(
x0[i, ]), helpers.normalize(x1[i, ]), 'same') / x0.shape[-1]
xcorr_all[i, :] = xcorr1
else:
for i, z in enumerate(params.electrodeParams['z']):
if x0.ndim == 1:
x2 = x1[i, ]
xcorr1 = np.correlate(x0, x2, 'same')
elif x0.ndim == 2:
xcorr1 = np.correlate(x0[i, ], x1[i, ], 'same')
xcorr_all[i, :] = xcorr1
# Find limits for the plot
if scaling is None:
vlim = abs(xcorr_all).max()
vlimround = 2.**np.round(np.log2(vlim))
else:
vlimround = scaling
yticklabels = []
yticks = []
# temporal slicing
lagvector = np.arange(-lag, lag + 1).astype(int)
inds = lagvector + x0.shape[-1] // 2
for i, z in enumerate(params.electrodeParams['z']):
ax.plot(lagvector, xcorr_all[i, inds[::-1]] * 100. / vlimround + z,
clip_on=True, rasterized=False, color=color, **kwargs)
yticklabels.append('ch. %i' % (i + 1))
yticks.append(z)
phlp.remove_axis_junk(ax)
ax.set_title(title, va='center')
ax.set_xlabel(r'$\tau$ (ms)', labelpad=0.1)
ax.set_xlim(-lag, lag)
ax.set_ylim(z - 100, 100)
axis = ax.axis()
ax.vlines(
0,
axis[2],
axis[3],
'k' if analysis_params.bw else 'k',
'dotted',
lw=0.25)
ax.yaxis.set_ticks(yticks)
ax.yaxis.set_ticklabels(yticklabels)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# Create a scaling bar
if scalebar:
ax.plot([lag, lag],
[-1500, -1400], lw=2, color='k', clip_on=False)
ax.text(lag * 1.04, -1450, unit % vlimround,
rotation='vertical', va='center')
return xcorr_all[:, inds[::-1]], vlimround
| espenhgn/hybridLFPy | examples/Hagen_et_al_2016_cercor/plot_methods.py | Python | gpl-3.0 | 64,229 | [
"NEURON"
] | 19f7912bb39df56169625e0e4cf105caa8cadcbfa8e8355f3ea29c1a117806e5 |
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import argument
from chainer.utils import type_check
class Gaussian(function_node.FunctionNode):
"""Gaussian sampling function.
.. note::
In forward calculation, this function takes a mean and the logarithm of
a variance as inputs, and draws a sample from a Gaussian distribution
accordingly.
"""
def __init__(self, eps=None):
# When ``eps`` is set to None, per-instance noise that is generated
# once during its first forward pass and then reused in subsequent
# calls.
self.eps = eps
def check_type_forward(self, in_types):
type_check.argname(in_types, ('mean', 'ln_var'))
m_type, v_type = in_types
type_check.expect(
m_type.dtype.kind == 'f',
m_type.dtype == v_type.dtype,
m_type.shape == v_type.shape,
)
def forward_cpu(self, inputs):
self.retain_inputs((1,))
mean, ln_var = inputs
if self.eps is None:
self.eps = (
numpy.random.standard_normal(ln_var.shape)
.astype(mean.dtype, copy=False)
)
self.noise = numpy.exp(ln_var * mean.dtype.type(0.5)) * self.eps
return utils.force_array(mean + self.noise),
def forward_gpu(self, inputs):
self.retain_inputs((1,))
mean, ln_var = inputs
if self.eps is None:
if mean.dtype != numpy.float16:
self.eps = cuda.cupy.random.standard_normal(
ln_var.shape, dtype=mean.dtype)
else:
# Draw samples in FP32 then cast them to FP16 because
# cupy.random does not support FP16 currently.
self.eps = cuda.cupy.random.standard_normal(
ln_var.shape, dtype=numpy.float32).astype(numpy.float16)
self.noise = cuda.cupy.empty_like(mean)
self.noise = cuda.elementwise(
'T v, T e', 'T noise',
'noise = exp(v / 2) * e',
'gaussian_forward'
)(ln_var, self.eps)
return mean + self.noise,
def backward(self, indexes, grad_outputs):
ln_var, = self.get_retained_inputs()
gy, = grad_outputs
ret = []
if 0 in indexes:
ret.append(gy)
if 1 in indexes:
noise = chainer.functions.exp(ln_var * 0.5) * self.eps
ret.append(gy * noise * 0.5)
return ret
def gaussian(mean, ln_var, **kwargs):
"""gaussian(mean, ln_var, *, eps=None, return_eps=False)
Gaussian sampling function.
This function takes a mean :math:`\\mu` and the logarithm of a variance
:math:`\\log(\\sigma^2)` as inputs and outputs a sample drawn from a
Gaussian distribution :math:`N(\\mu, \\sigma)`.
The inputs must have the same shape.
Args:
mean (~chainer.Variable):
Input variable representing the mean :math:`\\mu`.
ln_var (~chainer.Variable):
Input variable representing the logarithm of a variance
:math:`\\log(\\sigma^2)`.
eps (`ndarray` or None):
The eps value to be used.
You do not have to specify this value, unless you need to make
results deterministic.
If ``eps`` is not specified or set to ``None``, an eps value will
be generated randomly.
The shape and dtype must be the same as ``ln_var`` and should be
on the same device.
return_eps (bool):
If ``True``, the eps value used in this function is returned
together with the output variable.
The returned eps can later be reused by passing it to the ``eps``
argument.
Returns:
~chainer.Variable or tuple:
When ``return_eps`` is ``False`` (default), returns the output
variable with the shape of ``mean`` and/or ``ln_var``.
When ``True``, returns the tuple of the output variable and eps
(`ndarray`).
The eps will be on the same device as the input (``ln_var``).
"""
eps = None
return_eps = False
if kwargs:
eps, return_eps = argument.parse_kwargs(
kwargs, ('eps', eps), ('return_eps', return_eps))
func = Gaussian(eps)
out = func.apply((mean, ln_var))[0]
if return_eps:
return out, func.eps
return out
| rezoo/chainer | chainer/functions/noise/gaussian.py | Python | mit | 4,523 | [
"Gaussian"
] | 667e6a33a78c0ad3b72108f6a9bd7eed22e76b258555caffac866598f0b54376 |
"""
def getAllRelatedClasses(root,classfqn):
classobj = getTypeOf(root,classfqn)
rootClasses = _getRootClasses(classobj)
#print rootClasses
relatedClasses = [] + rootClasses
for rootClass in rootClasses:
relatedClasses += _getAllSubClasses(rootClass,root)
return relatedClasses
def _getRootClasses(klass):
if klass is None: # i.e. dont have base class in our ast
return None
if klass.getBaseClassNames() == []: # i.e. is a root class
return[klass]
else:
rootclasses = []
for base in klass.getBaseClassNames():
baseclass = getTypeOf(klass,base)
rootclass = _getRootClasses(baseclass)
if rootclass is None: # base class not in our ast
rootclass = [klass]
rootclasses+=rootclass
return rootclasses
def _getAllSubClasses(baseclass, root, subclasses = []):
class ClassVisitor:
def visitSource(self,node):
self.visit(node.fastparseroot)
def visitClass(self, node):
for basename in node.getBaseClassNames():
if basename.find(baseclass.name) != -1 and \
getTypeOf(node,basename) == baseclass:
subclasses.append(node)
_getAllSubClasses(node,root,subclasses)
for child in node.getChildNodes():
self.visit(child)
walk(root, ClassVisitor())
return subclasses
"""
| srusskih/SublimeBicycleRepair | bike/query/getAllRelatedClasses.py | Python | mit | 1,498 | [
"VisIt"
] | d1c4233dec086e430e9d647735cda888237e93f69a78101547eb688536880162 |
#!/usr/bin/python
#
# Copyright (C) 2014, Jaguar Land Rover
#
# This program is licensed under the terms and conditions of the
# Mozilla Public License, version 2.0. The full text of the
# Mozilla Public License is at https://www.mozilla.org/MPL/2.0/
#
#
# Simple RVI service caller
#
import sys
from rvilib import RVI
import threading
import time
import getopt
def usage():
print "Usage:", sys.argv[0], "[-n RVI-node] service key=val ..."
print " RVI-node DNS name or IP of host running RVI. "
print " default: http://localhost:8801"
print " service Service to invoke in RVI."
print " key=val Named arguments to provide to service."
print
print "Example: ./callrvi.py -n http://rvi1.nginfotpdx.net:8801 \\"
print " jlr.com/vin/aaron/4711/test/ping \\"
print " arg1=val1 arg2=val2"
sys.exit(255)
#
# Check that we have the correct arguments
#
opts, args= getopt.getopt(sys.argv[1:], "n:")
rvi_node = "http://localhost:8801"
for o, a in opts:
if o == "-n":
rvi_node = a
else:
usage()
if len(args) < 1:
usage()
# Construct a dictionary from the provided paths.
i = 0
service = args[0]
rvi_args = {}
for i in args[1:]:
print i
[k, v] = i.split('=')
rvi_args[k] = v
#
# Setup an outbound JSON-RPC connection to the backend RVI node
# Service Edge.
#
rvi = RVI(rvi_node)
print "RVI Node: ", rvi_node
print "Service: ", service
print "args: ", rvi_args
#
# Send the messge.
#
rvi.message(service, rvi_args)
| afan1/rvi_core | python/rvi_call.py | Python | mpl-2.0 | 1,624 | [
"Jaguar"
] | 117ef2b5174f9da775d9843fc16437c9b7558a4de161034343f9d9d27047e1b8 |
# coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytree for a lower triangular Cholesky factored covariance matrix."""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from ott.geometry import costs
from ott.geometry import matrix_square_root
from ott.tools.gaussian_mixture import linalg
@jax.tree_util.register_pytree_node_class
class ScaleTriL:
"""Pytree for a lower triangular Cholesky-factored covariance matrix."""
def __init__(self, params: jnp.ndarray, size: int):
self._params = params
self._size = size
@classmethod
def from_points_and_weights(
cls,
points: jnp.ndarray,
weights: jnp.ndarray,
) -> Tuple[jnp.ndarray, 'ScaleTriL']:
"""Get a mean and a ScaleTriL from a set of points and weights."""
mean, cov = linalg.get_mean_and_cov(points=points, weights=weights)
return mean, cls.from_covariance(cov)
@classmethod
def from_random(
cls,
key: jnp.ndarray,
n_dimensions: int,
stdev: Optional[float] = 0.1,
dtype: jnp.dtype = jnp.float32,
) -> 'ScaleTriL':
"""Construct a random ScaleTriL.
Args:
key: pseudo-random number generator key
n_dimensions: number of dimensions
stdev: desired standard deviation (around 0) for the log eigenvalues
dtype: data type for the covariance matrix
Returns:
A ScaleTriL.
"""
# generate a random orthogonal matrix
key, subkey = jax.random.split(key)
q = linalg.get_random_orthogonal(key=subkey, dim=n_dimensions, dtype=dtype)
# generate random eigenvalues
eigs = stdev * jnp.exp(
jax.random.normal(key=key, shape=(n_dimensions,), dtype=dtype))
# random positive definite matrix
sigma = jnp.matmul(
jnp.expand_dims(eigs, -2) * q, jnp.transpose(q))
# cholesky factorization
chol = jnp.linalg.cholesky(sigma)
# flatten
m = linalg.apply_to_diag(chol, jnp.log)
flat = linalg.tril_to_flat(m)
return cls(params=flat, size=n_dimensions)
@classmethod
def from_cholesky(
cls,
cholesky: jnp.ndarray
) -> 'ScaleTriL':
"""Construct ScaleTriL from a Cholesky factor of a covariance matrix."""
m = linalg.apply_to_diag(cholesky, jnp.log)
flat = linalg.tril_to_flat(m)
return cls(params=flat, size=cholesky.shape[-1])
@classmethod
def from_covariance(
cls,
covariance: jnp.ndarray,
) -> 'ScaleTriL':
"""Construct ScaleTriL from a covariance matrix."""
cholesky = jnp.linalg.cholesky(covariance)
return cls.from_cholesky(cholesky)
@property
def params(self) -> jnp.ndarray:
"""Internal representation."""
return self._params
@property
def size(self) -> int:
"""Size of the covariance matrix."""
return self._size
@property
def dtype(self):
"""Data type of the covariance matrix."""
return self._params.dtype
def cholesky(self) -> jnp.ndarray:
"""Get a lower triangular Cholesky factor for the covariance matrix."""
m = linalg.flat_to_tril(self._params, size=self._size)
return linalg.apply_to_diag(m, jnp.exp)
def covariance(self) -> jnp.ndarray:
"""Get the covariance matrix."""
cholesky = self.cholesky()
return jnp.matmul(cholesky, jnp.transpose(cholesky))
def covariance_sqrt(self) -> jnp.ndarray:
"""Get the square root of the covariance matrix."""
return linalg.matrix_powers(self.covariance(), (0.5,))[0]
def log_det_covariance(self) -> jnp.ndarray:
"""Get the log of the determinant of the covariance matrix."""
diag = jnp.diagonal(self.cholesky(), axis1=-2, axis2=-1)
return 2. * jnp.sum(jnp.log(diag), axis=-1)
def centered_to_z(self, x_centered: jnp.ndarray) -> jnp.ndarray:
"""Map centered points to standardized centered points (i.e. cov(z) = I)."""
return linalg.invmatvectril(
m=self.cholesky(), x=x_centered, lower=True)
def z_to_centered(self, z: jnp.ndarray) -> jnp.ndarray:
"""Scale standardized points to points with the specified covariance."""
return jnp.transpose(jnp.matmul(self.cholesky(), jnp.transpose(z)))
def w2_dist(
self,
other: 'ScaleTriL') -> jnp.ndarray:
r"""Wasserstein distance W_2^2 to another Gaussian with same mean.
Args:
other: Scale for the other Gaussian
Returns:
The W_2^2 distance
"""
dimension = self.size
def _flatten_cov(cov: jnp.ndarray) -> jnp.ndarray:
cov = cov.reshape(cov.shape[:-2] + (dimension * dimension,))
return jnp.concatenate([jnp.zeros(dimension), cov], axis=-1)
x0 = _flatten_cov(self.covariance())
x1 = _flatten_cov(other.covariance())
cost_fn = costs.Bures(dimension=dimension)
return (cost_fn.norm(x0) + cost_fn.norm(x1) +
cost_fn.pairwise(x0, x1))[..., 0]
def transport(
self,
dest_scale: 'ScaleTriL',
points: jnp.ndarray
) -> jnp.ndarray:
"""Transport between 0-mean normal w/ current scale to one w/ dest_scale.
Args:
dest_scale: destination Scale
points: points to transport
Returns:
Points transported to a Gaussian with the new scale.
"""
sqrt0, sqrt0_inv = linalg.matrix_powers(self.covariance(), (0.5, -0.5))
sigma1 = dest_scale.covariance()
m = matrix_square_root.sqrtm_only(
jnp.matmul(sqrt0, jnp.matmul(sigma1, sqrt0)))
m = jnp.matmul(sqrt0_inv, jnp.matmul(m, sqrt0_inv))
return jnp.transpose(jnp.matmul(m, jnp.transpose(points)))
def tree_flatten(self):
children = (self.params,)
aux_data = {'size': self.size}
return children, aux_data
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(*children, **aux_data)
def __repr__(self):
class_name = type(self).__name__
children, aux = self.tree_flatten()
return '{}({})'.format(
class_name, ', '.join([repr(c) for c in children] +
[f'{k}: {repr(v)}' for k, v in aux.items()]))
def __hash__(self):
return jax.tree_util.tree_flatten(self).__hash__()
def __eq__(self, other):
return jax.tree_util.tree_flatten(self) == jax.tree_util.tree_flatten(other)
| google-research/ott | ott/tools/gaussian_mixture/scale_tril.py | Python | apache-2.0 | 6,657 | [
"Gaussian"
] | 92421b4cb264f26ba62d0d6fb5fb024c57cd4ce2085d0aaef13d57001a204269 |
#!/usr/bin/env python
"""A kernel that creates a new ASCII file with a given size and name.
"""
__author__ = "The ExTASY project <vivek.balasubramanian@rutgers.edu>"
__copyright__ = "Copyright 2015, http://www.extasy-project.org/"
__license__ = "MIT"
from copy import deepcopy
from radical.ensemblemd.exceptions import ArgumentError
from radical.ensemblemd.exceptions import NoKernelConfigurationError
from radical.ensemblemd.engine import get_engine
from radical.ensemblemd.kernel_plugins.kernel_base import KernelBase
# ------------------------------------------------------------------------------
_KERNEL_INFO = {
"name": "custom.gromacs",
"description": "Creates a new file of given size and fills it with random ASCII characters.",
"arguments": {"--grompp=":
{
"mandatory": True,
"description": "Input parameter filename"
},
"--topol=":
{
"mandatory": True,
"description": "Topology filename"
}
},
"machine_configs":
{
"*": {
"environment" : {"FOO": "bar"},
"pre_exec" : [],
"executable" : "python",
"uses_mpi" : True
},
"xsede.stampede":
{
"environment" : {},
"pre_exec" : ["module load TACC","module load intel/15.0.2","module load boost","module load cxx11","module load gromacs","module load python"],
"executable" : ["python"],
"uses_mpi" : True
},
"epsrc.archer":
{
"environment" : {},
"pre_exec" : ["module load packages-archer","export PATH=$PATH:/work/z01/shared/gromacs-5.1.2/bin","module load python-compute/2.7.6"],
"executable" : ["python"],
"uses_mpi" : True
},
}
}
# ------------------------------------------------------------------------------
#
class kernel_gromacs(KernelBase):
def __init__(self):
super(kernel_gromacs, self).__init__(_KERNEL_INFO)
"""Le constructor."""
# --------------------------------------------------------------------------
#
@staticmethod
def get_name():
return _KERNEL_INFO["name"]
def _bind_to_resource(self, resource_key):
"""(PRIVATE) Implements parent class method.
"""
if resource_key not in _KERNEL_INFO["machine_configs"]:
if "*" in _KERNEL_INFO["machine_configs"]:
# Fall-back to generic resource key
resource_key = "*"
else:
raise NoKernelConfigurationError(kernel_name=_KERNEL_INFO["name"], resource_key=resource_key)
cfg = _KERNEL_INFO["machine_configs"][resource_key]
#change to pmemd.MPI when cores can be set
arguments = ['run.py','--mdp','%s'%self.get_arg("--grompp="),'--gro','start.gro','--top','%s'%self.get_arg('--topol='),'--out','out.gro']
self._executable = cfg["executable"]
self._arguments = arguments
self._environment = cfg["environment"]
self._uses_mpi = cfg["uses_mpi"]
self._pre_exec = cfg["pre_exec"]
# ------------------------------------------------------------------------------
| radical-cybertools/ExTASY | examples/grlsd-on-archer/kernel_defs/gromacs.py | Python | mit | 3,445 | [
"Gromacs"
] | 92f7c7136374cd0206ed59d45d921e8cede50a5b0ece98892e10cb15f71d7b2c |
# Copyright (c) 2013 Per Unneberg
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# NOTE: this test suite only verifies that the commands are formatted
# correctly. No actual spawning of subprocesses is done.
import os
import re
import unittest
import luigi
import logging
from luigi.mock import MockFile
import ratatosk.lib.files.fastq
import ratatosk.lib.tools.samtools
import ratatosk.lib.tools.picard
import ratatosk.lib.tools.gatk
import ratatosk.lib.tools.fastqc
import ratatosk.lib.utils.cutadapt
import ratatosk.lib.utils.misc
import ratatosk.lib.annotation.snpeff
import ratatosk.lib.annotation.annovar
import ratatosk.lib.align.bwa
from ratatosk.config import get_config, get_custom_config
File = MockFile
logging.basicConfig(level=logging.DEBUG)
sample = "sample1"
sample2 = "sample2"
indir = "data"
read1_suffix = "_1"
read2_suffix = "_2"
fastq1 = os.path.join(indir, sample + read1_suffix + ".fastq.gz")
fastq2 = os.path.join(indir, sample + read2_suffix + ".fastq.gz")
ref = os.path.join(indir, "chr11.fa")
sai1 = os.path.join(indir, sample + read1_suffix + ".sai")
sai2 = os.path.join(indir, sample + read2_suffix + ".sai")
sam = os.path.join(indir, sample + ".sam")
bam = os.path.join(indir, sample + ".bam")
sortbam = os.path.join(indir, sample + ".sort.bam")
localconf = "pipeconf.yaml"
local_scheduler = '--local-scheduler'
process = os.popen("ps x -o pid,args | grep ratatoskd | grep -v grep").read() #sometimes have to use grep -v grep
if process:
local_scheduler = None
def _luigi_args(args):
if local_scheduler:
return [local_scheduler] + args
return args
def _prune_luigi_tmp(args):
"""Remove luigi tmp string from file names"""
return [re.sub(r'-luigi-tmp-[0-9]+(\.gz)?', '', x) for x in args]
def setUpModule():
global cnf, custom_cnf
cnf = get_config()
cnf.clear()
cnf.add_config_path(os.path.join(os.path.dirname(__file__), os.pardir, "config", "ratatosk.yaml"))
cnf.add_config_path(localconf)
custom_cnf = get_custom_config()
custom_cnf.clear()
custom_cnf.add_config_path(localconf)
custom_cnf.reload()
def tearDownModule():
cnf.clear()
custom_cnf.clear()
class TestSamtoolsWrappers(unittest.TestCase):
def test_samtools_view(self):
task = ratatosk.lib.tools.samtools.SamToBam(target=bam)
self.assertEqual(['samtools', 'view', '-bSh', 'data/sample1.sam', '>', 'data/sample1.bam'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_sortbam(self):
task = ratatosk.lib.tools.samtools.SortBam(target=sortbam)
self.assertEqual(['samtools', 'sort', 'data/sample1.bam', 'data/sample1.sort'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
class TestMiscWrappers(unittest.TestCase):
def setUp(self):
self.fastqfile = os.path.relpath(os.path.join(os.path.dirname(__file__), 'indir', 'file.fastq.gz'), os.curdir)
MockFile._file_contents = {}
os.makedirs('indir')
with open(self.fastqfile, 'w') as fh:
fh.write("")
def tearDown(self):
if os.path.exists(self.fastqfile):
os.unlink(self.fastqfile)
os.rmdir(os.path.dirname(self.fastqfile))
def test_luigihelp(self):
try:
luigi.run(['-h'], main_task_cls=ratatosk.lib.files.fastq.FastqFileLink)
except:
pass
def test_fastqln(self):
outfile = os.path.join(os.path.dirname(self.fastqfile), os.pardir, 'file.fastq.gz')
luigi.run(_luigi_args(['--use-long-names', '--target', outfile, '--outdir', os.path.dirname(outfile), '--indir', os.path.dirname(self.fastqfile), '--config-file', os.path.relpath(os.path.join(os.path.dirname(__file__), os.pardir, "config", "ratatosk.yaml"), os.curdir)]), main_task_cls=ratatosk.lib.files.fastq.FastqFileLink)
self.assertTrue(os.path.exists(outfile))
self.assertTrue(os.path.exists(os.readlink(outfile)))
os.unlink(outfile)
def test_cutadapt(self):
task = ratatosk.lib.utils.cutadapt.Cutadapt(target=fastq1.replace(".fastq.gz", ".trimmed.fastq.gz"), read1_suffix="_1")
# Needed in order to override pipeconf.yaml. This is a bug;
# setting it in class instantiation should override config
# file settings
task._parent_cls=[ratatosk.lib.utils.cutadapt.InputFastqFile]
self.assertEqual(['cutadapt', '-a', 'AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC', 'data/sample1_1.fastq.gz', '-o', 'data/sample1_1.trimmed.fastq.gz', '>', 'data/sample1_1.trimmed.fastq.cutadapt_metrics'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
# NB: If fastqc is actually run this test will fail since
# DefaultShellJobRunner._fix_paths won't return any tmp_files
# since the output directory exists. This shouldn't affect the
# behaviour of fastqc job tasks though
def test_fastqc(self):
task = ratatosk.lib.tools.fastqc.FastQC(target='data/sample1_1_fastqc')
self.assertEqual(['fastqc', '-o', 'data/sample1_1_fastqc', 'data/sample1_1.fastq.gz'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_resyncmates_after_trim(self):
task = ratatosk.lib.utils.misc.ResyncMates(target=[fastq1.replace(".fastq.gz", ".trimmed.sync.fastq.gz"),
fastq2.replace(".fastq.gz", ".trimmed.sync.fastq.gz")],
parent_task=('ratatosk.lib.utils.cutadapt.Cutadapt','ratatosk.lib.utils.cutadapt.Cutadapt',),
executable="resyncMates.pl")
self.assertEqual(['resyncMates.pl', '-i', 'data/sample1_1.trimmed.fastq.gz', '-j', 'data/sample1_2.trimmed.fastq.gz', '-o', 'data/sample1_1.trimmed.sync.fastq.gz', '-p', 'data/sample1_2.trimmed.sync.fastq.gz'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
class TestBwaWrappers(unittest.TestCase):
def test_bwaaln(self):
task = ratatosk.lib.align.bwa.Aln(target=sai1)
self.assertEqual(['bwa', 'aln', '-t 1', 'data/chr11.fa', 'data/sample1_1.fastq.gz', '>', 'data/sample1_1.sai'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_bwasampe(self):
task = ratatosk.lib.align.bwa.Sampe(target=sam, add_label=(read1_suffix, read2_suffix))
self.assertEqual(
['bwa', 'sampe', '-r', '"@RG\tID:data/sample1\tSM:data/sample1\tPL:Illumina"', 'data/chr11.fa', 'data/sample1_1.sai', 'data/sample1_2.sai', 'data/sample1_1.fastq.gz', 'data/sample1_2.fastq.gz', '>', 'data/sample1.sam'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0])
)
def test_bwaindex(self):
task = ratatosk.lib.align.bwa.Index(target=ref + ".bwt")
self.assertEqual(['bwa', 'index', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
# Temporary target generator
def merge_bam_generator(task):
return ["data/sample1.sort.bam", "data/sample2.sort.bam"]
@unittest.skipIf((os.getenv("PICARD_HOME") is None or os.getenv("PICARD_HOME") == ""), "No Environment PICARD_HOME set; skipping")
class TestPicardWrappers(unittest.TestCase):
def _path(self, exe):
return os.path.join(os.environ["PICARD_HOME"], exe)
def test_picard_sortbam(self):
task = ratatosk.lib.tools.picard.SortSam(target=sortbam)
self.assertEqual(['java', '-Xmx2g', '-jar', self._path('SortSam.jar'), 'SO=coordinate MAX_RECORDS_IN_RAM=750000', 'VALIDATION_STRINGENCY=SILENT', 'INPUT=', 'data/sample1.bam', 'OUTPUT=', 'data/sample1.sort.bam'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_picard_create_sequence_dictionary(self):
task = ratatosk.lib.tools.picard.CreateSequenceDictionary(target="data/chr11.dict")
self.assertEqual(['java', '-Xmx2g', '-jar', self._path('CreateSequenceDictionary.jar'), 'VALIDATION_STRINGENCY=SILENT', 'REFERENCE=', 'data/chr11.fa', 'OUTPUT=', 'data/chr11.dict'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_picard_alignmentmetrics(self):
task = ratatosk.lib.tools.picard.AlignmentMetrics(target=sortbam.replace(".bam", ".align_metrics"), options=['REFERENCE_SEQUENCE={}'.format(ref)])
self.assertEqual(['java', '-Xmx2g', '-jar', self._path('CollectAlignmentSummaryMetrics.jar'), 'REFERENCE_SEQUENCE=data/chr11.fa', 'VALIDATION_STRINGENCY=SILENT', 'INPUT=', 'data/sample1.sort.bam', 'OUTPUT=', 'data/sample1.sort.align_metrics'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_picard_insertmetrics(self):
task = ratatosk.lib.tools.picard.InsertMetrics(target=sortbam.replace(".bam", ".insert_metrics"), options=['REFERENCE_SEQUENCE={}'.format(ref)])
self.assertEqual(['java', '-Xmx2g', '-jar', self._path('CollectInsertSizeMetrics.jar'), 'REFERENCE_SEQUENCE=data/chr11.fa', 'VALIDATION_STRINGENCY=SILENT', 'INPUT=', 'data/sample1.sort.bam', 'OUTPUT=', 'data/sample1.sort.insert_metrics', 'HISTOGRAM_FILE=', 'data/sample1.sort.insert_hist'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_picard_dupmetrics(self):
task = ratatosk.lib.tools.picard.DuplicationMetrics(target=sortbam.replace(".bam", ".dup.bam"))
self.assertEqual(['java', '-Xmx2g', '-jar', self._path('MarkDuplicates.jar'), 'VALIDATION_STRINGENCY=SILENT', 'INPUT=', 'data/sample1.sort.bam', 'OUTPUT=', 'data/sample1.sort.dup.bam', 'METRICS_FILE=', 'data/sample1.sort.dup_metrics'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_picard_hsmetrics(self):
task = ratatosk.lib.tools.picard.HsMetrics(target=sortbam.replace(".bam", ".hs_metrics"))
self.assertEqual(['java', '-Xmx2g', '-jar', self._path('CalculateHsMetrics.jar'), 'VALIDATION_STRINGENCY=SILENT', 'INPUT=', 'data/sample1.sort.bam', 'OUTPUT=', 'data/sample1.sort.hs_metrics', 'BAIT_INTERVALS=', 'data/chr11_baits.interval_list', 'TARGET_INTERVALS=', 'data/chr11_targets.interval_list'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_picard_metrics(self):
task = ratatosk.lib.tools.picard.PicardMetrics(target=sortbam.replace(".bam", ""))
metrics = [ratatosk.lib.tools.picard.InsertMetrics(target=sortbam.replace(".bam", ".insert_metrics")),
ratatosk.lib.tools.picard.HsMetrics(target=sortbam.replace(".bam", ".hs_metrics")),
ratatosk.lib.tools.picard.AlignmentMetrics(target=sortbam.replace(".bam", ".align_metrics"))]
self.assertEqual(task.requires(), metrics)
# NOTE: if no target_generator exists will be impossible to check
# formatting of input. Here create dummy file names
def test_merge_sam_files(self):
mergebam = "data/sample.sort.merge.bam"
task = ratatosk.lib.tools.picard.MergeSamFiles(target=mergebam, target_generator_handler='test.test_wrapper.merge_bam_generator')
self.assertEqual(sorted(['java', '-Xmx2g', '-jar', self._path('MergeSamFiles.jar'), 'SO=coordinate TMP_DIR=./tmp', 'VALIDATION_STRINGENCY=SILENT', 'OUTPUT=', 'data/sample.sort.merge.bam', 'INPUT=', 'data/sample1.sort.bam', 'INPUT=', 'data/sample2.sort.bam']),
sorted(_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0])))
def gatk_vcf_generator(task):
return ["vcf1.vcf", "vcf2.vcf"]
@unittest.skipIf((os.getenv("GATK_HOME") is None or os.getenv("GATK_HOME") == ""), "No environment GATK_HOME set; skipping")
class TestGATKWrappers(unittest.TestCase):
def setUp(self):
self.mergebam = os.path.join(indir, "sample.sort.merge.bam")
self.gatk = os.path.join(os.environ["GATK_HOME"], 'GenomeAnalysisTK.jar')
def test_realigner_target_creator(self):
task = ratatosk.lib.tools.gatk.RealignerTargetCreator(target=self.mergebam.replace(".bam", ".intervals"))
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T RealignerTargetCreator', '-nt 1', '', '-I', 'data/sample.sort.merge.bam', '-o', 'data/sample.sort.merge.intervals', '-R', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_indelrealigner(self):
task = ratatosk.lib.tools.gatk.IndelRealigner(target=self.mergebam.replace(".bam", ".realign.bam"))
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T IndelRealigner', '', '-I', 'data/sample.sort.merge.bam', '-o', 'data/sample.sort.merge.realign.bam', '--targetIntervals', 'data/sample.sort.merge.intervals', '-known data/sample.sort.merge.vcf', '-R', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_base_recalibrator(self):
task = ratatosk.lib.tools.gatk.BaseRecalibrator(target=self.mergebam.replace(".bam", ".realign.recal_data.grp"))
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T BaseRecalibrator', '-I', 'data/sample.sort.merge.realign.bam', '-o', 'data/sample.sort.merge.realign.recal_data.grp', '-R', 'data/chr11.fa', ' -knownSites knownSites1.vcf -knownSites knownSites2.vcf'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_printreads(self):
task = ratatosk.lib.tools.gatk.PrintReads(target=self.mergebam.replace(".bam", ".realign.recal.bam"))
self.assertEqual(
['java', '-Xmx2g', '-jar', self.gatk, '-T PrintReads', '-I', 'data/sample.sort.merge.realign.bam', '-o', 'data/sample.sort.merge.realign.recal.bam', '-BQSR', 'data/sample.sort.merge.realign.recal_data.grp', '-R', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_clipreads(self):
task = ratatosk.lib.tools.gatk.ClipReads(target=self.mergebam.replace(".bam", ".realign.recal.clip.bam"))
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T ClipReads', '--cyclesToTrim 1-5 --clipRepresentation WRITE_NS', '-I', 'data/sample.sort.merge.realign.recal.bam', '-o', 'data/sample.sort.merge.realign.recal.clip.bam', '-R', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_unifiedgenotyper(self):
task = ratatosk.lib.tools.gatk.UnifiedGenotyper(target=self.mergebam.replace(".bam", ".realign.recal.clip.vcf"))
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T UnifiedGenotyper', '-stand_call_conf 30.0 -stand_emit_conf 10.0 --downsample_to_coverage 30 --output_mode EMIT_VARIANTS_ONLY -glm BOTH', '-nt 1', '--dbsnp', 'data/dbsnp132_chr11.vcf', '-I', 'data/sample.sort.merge.realign.recal.clip.bam', '-o', 'data/sample.sort.merge.realign.recal.clip.vcf', '-R', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_unifiedgenotyper_alleles(self):
task = ratatosk.lib.tools.gatk.UnifiedGenotyperAlleles(target=self.mergebam.replace(".bam", ".realign.recal.clip-genotype.vcf"))
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T UnifiedGenotyper', '-stand_call_conf 30.0 -stand_emit_conf 10.0 --downsample_to_coverage 30 --output_mode EMIT_ALL_SITES -glm BOTH', '-nt 1', '--dbsnp', 'data/dbsnp132_chr11.vcf', '--genotyping_mode', 'GENOTYPE_GIVEN_ALLELES', '-I', 'data/sample.sort.merge.realign.recal.clip.bam', '-o', 'data/sample.sort.merge.realign.recal.clip-genotype.vcf', '-R', 'data/chr11.fa','--alleles', 'data/sample.sort.merge.realign.recal.clip.vcf'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_variantfiltration(self):
task = ratatosk.lib.tools.gatk.VariantFiltration(target=self.mergebam.replace(".bam", ".realign.recal.clip.filtered.vcf"),
options=['--clusterWindowSize 10 --clusterSize 3 --filterExpression "MQ0 >= 4 && ((MQ0 / (1.0 * DP)) > 0.1)" --filterName "HARD_TO_VALIDATE" --filterExpression "DP < 10" --filterName "LowCoverage" --filterExpression "QUAL < 30.0" --filterName "VeryLowQual" --filterExpression "QUAL > 30.0 && QUAL < 50.0" --filterName "LowQual" --filterExpression "QD < 1.5" --filterName "LowQD"', '--variant', 'data/sample.sort.merge.realign.recal.clip.vcf'])
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T VariantFiltration','--clusterWindowSize 10 --clusterSize 3 --filterExpression "MQ0 >= 4 && ((MQ0 / (1.0 * DP)) > 0.1)" --filterName "HARD_TO_VALIDATE" --filterExpression "DP < 10" --filterName "LowCoverage" --filterExpression "QUAL < 30.0" --filterName "VeryLowQual" --filterExpression "QUAL > 30.0 && QUAL < 50.0" --filterName "LowQual" --filterExpression "QD < 1.5" --filterName "LowQD"', '--variant', 'data/sample.sort.merge.realign.recal.clip.vcf', '--variant', 'data/sample.sort.merge.realign.recal.clip.vcf', '--out', 'data/sample.sort.merge.realign.recal.clip.filtered.vcf', '-R', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_varianteval(self):
task = ratatosk.lib.tools.gatk.VariantEval(target=self.mergebam.replace(".bam", ".realign.recal.clip.filtered.eval_metrics"))
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T VariantEval', '-ST Filter -l INFO --doNotUseAllStandardModules --evalModule CompOverlap --evalModule CountVariants --evalModule GenotypeConcordance --evalModule TiTvVariantEvaluator --evalModule ValidationReport --stratificationModule Filter', '--dbsnp', 'data/dbsnp132_chr11.vcf', '--eval', 'data/sample.sort.merge.realign.recal.clip.filtered.vcf', '-o', 'data/sample.sort.merge.realign.recal.clip.filtered.eval_metrics', '-R', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_variant_annotator(self):
task = ratatosk.lib.tools.gatk.VariantAnnotator(target=self.mergebam.replace(".bam", "-gatkann.vcf"))
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T VariantAnnotator', '', '--variant', 'data/sample.sort.merge.vcf', '--out', 'data/sample.sort.merge-gatkann.vcf', '-R', 'data/chr11.fa', '-A', 'BaseQualityRankSumTest', '-A', 'DepthOfCoverage', '-A', 'FisherStrand', '-A', 'GCContent', '-A', 'HaplotypeScore', '-A', 'HomopolymerRun', '-A', 'MappingQualityRankSumTest', '-A', 'MappingQualityZero', '-A', 'QualByDepth', '-A', 'ReadPosRankSumTest', '-A', 'RMSMappingQuality'], _prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_GATK_snpeff_variant_annotator(self):
task = ratatosk.lib.tools.gatk.VariantSnpEffAnnotator(target=self.mergebam.replace(".bam", "-annotated.vcf"))
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T VariantAnnotator', '', '--variant', 'data/sample.sort.merge.vcf', '--out', 'data/sample.sort.merge-annotated.vcf', '--snpEffFile', 'data/sample.sort.merge-effects.vcf', '-R', 'data/chr11.fa', '-A', 'SnpEff'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
# def test_combine_variants(self):
# task = ratatosk.lib.tools.gatk.CombineVariants(target=self.mergebam.replace(".bam", "-variants-combined.vcf"), ref='data/chr11.fa',
# target_generator_handler="test.test_wrapper.gatk_vcf_generator")
# self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T CombineVariants', '-V', 'vcf1.vcf', '-V', 'vcf2.vcf', '-o', 'data/sample.sort.merge-variants-combined.vcf', '-R', 'data/chr11.fa'],
# _prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_combine_variants(self):
task = ratatosk.lib.tools.gatk.CombineSplitVariants(target=self.mergebam.replace(".bam", "-variants-combined.vcf"), ref='data/chr11.fa')
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T CombineVariants', '-V', 'data/sample.sort.merge-variants-combined-split/sample.sort.merge-variants-combined-chr11.vcf', '-o', 'data/sample.sort.merge-variants-combined.vcf', '-R', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_select_variants(self):
task = ratatosk.lib.tools.gatk.SelectVariants(target=self.mergebam.replace(".bam", "-snp-all.vcf"))
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T SelectVariants', '--selectTypeToInclude', 'SNP', '--selectTypeToInclude', 'INDEL', '--selectTypeToInclude', 'MIXED', '--selectTypeToInclude', 'MNP', '--selectTypeToInclude', 'SYMBOLIC', '--selectTypeToInclude', 'NO_VARIATION', '--variant', 'data/sample.sort.merge-snp.vcf', '--out', 'data/sample.sort.merge-snp-all.vcf', '-R', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_select_snp_variants(self):
task = ratatosk.lib.tools.gatk.SelectSnpVariants(target=self.mergebam.replace(".bam", "-snp.vcf"))
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T SelectVariants', '--selectTypeToInclude', 'SNP', '--variant', 'data/sample.sort.merge.vcf', '--out', 'data/sample.sort.merge-snp.vcf', '-R', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_select_indel_variants(self):
task = ratatosk.lib.tools.gatk.SelectIndelVariants(target=self.mergebam.replace(".bam", "-indel.vcf"))
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T SelectVariants', '--selectTypeToInclude', 'INDEL', '--selectTypeToInclude', 'MIXED', '--selectTypeToInclude', 'MNP', '--selectTypeToInclude', 'SYMBOLIC', '--selectTypeToInclude', 'NO_VARIATION', '--variant', 'data/sample.sort.merge.vcf', '--out', 'data/sample.sort.merge-indel.vcf', '-R', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_variant_recalibrator(self):
"""Test variant recalibation. Note that commands that require
training data will not work; only JEXL filtering is
applicable"""
task = ratatosk.lib.tools.gatk.VariantRecalibrator(target=self.mergebam.replace(".bam", ".tranches"), ref="data/chr11.fa",
options=["-an", "QD", "-resource:hapmap,VCF,known=false,training=true,truth=true,prior=15.0", "data/hapmap_3.3.vcf"])
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T VariantRecalibrator', '-an', 'QD', '-resource:hapmap,VCF,known=false,training=true,truth=true,prior=15.0', 'data/hapmap_3.3.vcf', '--input', 'data/sample.sort.merge.vcf', '--tranches_file', 'data/sample.sort.merge.tranches', '--mode', 'BOTH', '--recal_file', 'data/sample.sort.merge.recal', '-R', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_variant_snp_recalibrator(self):
task = ratatosk.lib.tools.gatk.VariantSnpRecalibrator(target=self.mergebam.replace(".bam", ".tranches"),
train_hapmap="data/hapmap_3.3.vcf",
ref="data/chr11.fa", dbsnp="data/dbsnp132_chr11.vcf")
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T VariantRecalibrator', '-an', 'QD', '-an', 'HaplotypeScore', '-an', 'MQRankSum', '-an', 'ReadPosRankSum', '-an', 'FS', '-an', 'MQ', '-an', 'DP', '-resource:hapmap,VCF,known=false,training=true,truth=true,prior=15.0', 'data/hapmap_3.3.vcf', '-resource:dbsnp,VCF,known=true,training=false,truth=false,prior=8.0', 'data/dbsnp132_chr11.vcf', '--input', 'data/sample.sort.merge.vcf', '--tranches_file', 'data/sample.sort.merge.tranches', '--mode', 'SNP', '--recal_file', 'data/sample.sort.merge.recal', '-R', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_variant_snp_recalibrator_exome(self):
"""Modified settings for exome. Should contain --maxGaussians"""
task = ratatosk.lib.tools.gatk.VariantSnpRecalibratorExome(target=self.mergebam.replace(".bam", ".tranches"),
train_hapmap="data/hapmap_3.3.vcf",
ref="data/chr11.fa", dbsnp="data/dbsnp132_chr11.vcf")
arglist = _prune_luigi_tmp(task.job_runner()._make_arglist(task)[0])
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T VariantRecalibrator', '-an', 'QD', '-an', 'HaplotypeScore', '-an', 'MQRankSum', '-an', 'ReadPosRankSum', '-an', 'FS', '-an', 'MQ', '--maxGaussians', '4', '--percentBadVariants', '0.05', '-resource:hapmap,VCF,known=false,training=true,truth=true,prior=15.0', 'data/hapmap_3.3.vcf', '-resource:dbsnp,VCF,known=true,training=false,truth=false,prior=8.0', 'data/dbsnp132_chr11.vcf', '--input', 'data/sample.sort.merge.vcf', '--tranches_file', 'data/sample.sort.merge.tranches', '--mode', 'SNP', '--recal_file', 'data/sample.sort.merge.recal', '-R', 'data/chr11.fa'],
arglist)
self.assertIn('--maxGaussians', arglist)
def test_variant_indel_recalibrator(self):
task = ratatosk.lib.tools.gatk.VariantIndelRecalibrator(target=self.mergebam.replace(".bam", ".tranches"),
train_indels="data/Mills_Devine_2hit.indels.vcf",
ref="data/chr11.fa")
self.assertEqual(['java', '-Xmx2g', '-jar', self.gatk, '-T VariantRecalibrator', '-an', 'QD', '-an', 'FS', '-an', 'HaplotypeScore', '-an', 'ReadPosRankSum', '-resource:mills,VCF,known=true,training=true,truth=true,prior=12.0', 'data/Mills_Devine_2hit.indels.vcf', '--input', 'data/sample.sort.merge.vcf', '--tranches_file', 'data/sample.sort.merge.tranches', '--mode', 'INDEL', '--recal_file', 'data/sample.sort.merge.recal', '-R', 'data/chr11.fa'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
# TODO: need to test the command on real data
def test_apply_recalibration(self):
task = ratatosk.lib.tools.gatk.ApplyRecalibration(target=self.mergebam.replace(".bam","-filter.vcf"), ref="data/chr11.fa")
print _prune_luigi_tmp(task.job_runner()._make_arglist(task)[0])
print " ".join(_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
# TODO: test case for multiple parent classes
# Need to set bam and vcf input
def test_readbackedphasing(self):
task = ratatosk.lib.tools.gatk.ReadBackedPhasing(target=self.mergebam.replace(".bam", "-indel-filter.vcf"))
print _prune_luigi_tmp(task.job_runner()._make_arglist(task)[0])
@unittest.skipIf((os.getenv("SNPEFF_HOME") is None or os.getenv("SNPEFF_HOME") == ""), "No environment SNPEFF_HOME set; skipping")
class TestSnpEffWrappers(unittest.TestCase):
@classmethod
def setUpClass(self):
self.bam = os.path.join(indir, "sample.sort.bam")
self.snpeff = os.path.join(os.environ["SNPEFF_HOME"], 'snpEff.jar')
self.config = os.path.join(os.environ["SNPEFF_HOME"], 'snpEff.config')
def test_snpeff(self):
task = ratatosk.lib.annotation.snpeff.snpEff(target=self.bam.replace(".bam", "-effects.vcf"))
self.assertEqual(['java', '-Xmx2g', '-jar', self.snpeff, 'eff', '-1', '-i', 'vcf', '-o', 'vcf', '-c', self.config, 'GRCh37.64', 'data/sample.sort.vcf', '>', 'data/sample.sort-effects.vcf'], _prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_snpeff_txt(self):
task = ratatosk.lib.annotation.snpeff.snpEff(target=self.bam.replace(".bam", "-effects.txt"), suffix=('.txt',))
self.assertEqual(['java', '-Xmx2g', '-jar', self.snpeff, 'eff', '-1', '-i', 'vcf', '-o', 'txt', '-c', self.config, 'GRCh37.64', 'data/sample.sort.vcf', '>', 'data/sample.sort-effects.txt'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
@unittest.skipIf((os.getenv("ANNOVAR_HOME") is None or os.getenv("ANNOVAR_HOME") == ""), "No environment ANNOVAR_HOME set; skipping")
class TestAnnovarWrappers(unittest.TestCase):
@classmethod
def setUpClass(self):
self.bam = os.path.join(indir, "sample.sort.bam")
def _path(self, exe):
return os.path.join(os.environ["ANNOVAR_HOME"], exe)
def test_convert_annovar(self):
task = ratatosk.lib.annotation.annovar.Convert2Annovar(target=self.bam.replace(".bam", "-avinput.txt"))
self.assertEqual([self._path('convert2annovar.pl'), '-format vcf4', 'data/sample.sort.vcf', '--outfile', 'data/sample.sort-avinput.txt'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_summarize_annovar(self):
task = ratatosk.lib.annotation.annovar.SummarizeAnnovar(target=self.bam.replace(".bam", "-avinput.txt.log"))
self.assertEqual([self._path('summarize_annovar.pl'), '-remove', '-buildver hg19', '-verdbsnp 132', '-ver1000g 1000g2011may', 'data/sample.sort-avinput.txt', self._path('humandb')],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
class TestVcfToolsWrappers(unittest.TestCase):
@classmethod
def setUpClass(self):
self.bam = os.path.join(indir, "sample.sort.bam")
# Temporary target generator
def vcf_generator(task):
return ["vcf1.vcf.gz", "vcf2.vcf.gz"]
class TestHtslibWrappers(unittest.TestCase):
@classmethod
def setUpClass(self):
import ratatosk.lib.variation.htslib
self.bam = os.path.join(indir, "sample.sort.bam")
def test_vcf_merge(self):
task = ratatosk.lib.variation.htslib.VcfMerge(target="out.vcfmerge.vcf.gz", target_generator_handler='test.test_wrapper.vcf_generator')
self.assertEqual(['vcf', 'merge', 'vcf1.vcf.gz', 'vcf2.vcf.gz', '>', 'out.vcfmerge.vcf.gz'],
_prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
class TestTabixWrappers(unittest.TestCase):
@classmethod
def setUpClass(self):
import ratatosk.lib.variation.tabix
self.bam = os.path.join(indir, "sample.sort.bam")
def test_bgzip(self):
task = ratatosk.lib.variation.tabix.Bgzip(target=self.bam.replace(".bam", ".vcf.gz"))
self.assertEqual(['bgzip', '-f', 'data/sample.sort.vcf'], _prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_bgunzip(self):
"""Test bgunzip via three different function calls (Bgzip currently not working)"""
# task = ratatosk.lib.variation.tabix.Bgzip(target=self.bam.replace(".bam", ".vcf"), options=["-d"], suffix=".vcf")
# self.assertEqual(['bgzip', '-d', 'data/sample.sort.vcf.gz'], _prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
task = ratatosk.lib.variation.tabix.BgUnzip(target=self.bam.replace(".bam", ".vcf"))
self.assertEqual(['bgzip', '-d', 'data/sample.sort.vcf.gz'], _prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
task = ratatosk.lib.variation.tabix.BgUnzip(target=self.bam.replace(".bam", ".vcf"), options=["-d"])
self.assertEqual(['bgzip', '-d', 'data/sample.sort.vcf.gz'], _prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
def test_tabix(self):
task = ratatosk.lib.variation.tabix.Tabix(target=self.bam.replace(".bam", ".vcf.gz.tbi"))
self.assertEqual(['tabix', 'data/sample.sort.vcf.gz'], _prune_luigi_tmp(task.job_runner()._make_arglist(task)[0]))
| percyfal/ratatosk | test/test_wrapper.py | Python | apache-2.0 | 32,311 | [
"BWA"
] | f97990ee22442ce8e0370e7666ecf2eb39b50400f5f17b09ce2c2fe6b770060b |
import cv2
import numpy as np
import os
import math
from scipy import ndimage
class vp():
def __init__(self, pos, score, numLines):
self.pos = pos
self.score = score
self.numLines = numLines
self.lines = None
class VP_imgLines():
def __init__(self, imgLines, imgLinesPosMap):
self.imgLines = imgLines
self.imgLinesPosMap = imgLinesPosMap
class Plane():
def __init__(self, vline, imgPlaneProb, sourceVP):
self.vline = vline
self.imgPlaneProb = imgPlaneProb
self.score = imgPlaneProb.sum()
self.sourceVP = sourceVP
self.rotPar = [0, 0]
self.dispVec = None
self.numDispVec = None
def vLineFromTwoVP(vp1, vp2):
A = np.vstack([vp1, vp2])
u, s, v = np.linalg.svd(A)
vLine = v.T[:, -1]
vLine /= vLine[2]
return vLine
def gaussian_kernel(ksize, sigma):
kernel = np.zeros((ksize[0], ksize[1]), dtype=np.float)
halfsizeX = ksize[0] // 2
halfsizeY = ksize[1] // 2
sigma2 = 2 * sigma * sigma
n_halfsizeX = halfsizeX
n_halfsizeY = halfsizeY
half_one_x = 0
half_one_y = 0
if ksize[0] % 2 == 0:
n_halfsizeX -= 1
half_one_x = 0.5
if ksize[1] % 2 == 0:
n_halfsizeY -= 1
half_one_y = 0.5
for i in range(n_halfsizeX + 1):
for j in range(n_halfsizeY + 1):
value = math.exp(-((i + half_one_x) ** 2 + (j + half_one_y) ** 2) / sigma2)
kernel[halfsizeX + i][halfsizeY + j] = value
kernel[halfsizeX + i][n_halfsizeY - j] = value
kernel[n_halfsizeX - i][halfsizeY + j] = value
kernel[n_halfsizeX - i][n_halfsizeY - j] = value
kernel = kernel / kernel.sum()
return kernel
def detect_plane_from_vp(vpData, img, mask, option):
'''
:param vpData:
:param img:
:param mask:
:param option:
:return:
'''
assert (img.shape[-1] == 3)
height, width, channel = img.shape
HfilterX = gaussian_kernel([1, option.filterSize], option.filterSigma)
HfilterY = HfilterX.T
modelPlane = type("modelPlane", (), {"vp": []})
# first estimate the spatial support of each VP by diffusing
# its corresponding line segments using a wide Gaussian kernel
for i in range(vpData.numVP):
imgLines = np.zeros((height, width))
for line in vpData.vp[i].lines:
cv2.line(imgLines, (int(line[0]), int(line[1])), (int(line[2]), int(line[3])), 255, 1)
#cv2.imshow("tmp", imgLines)
#cv2.waitKey()
imgLines = imgLines.astype(np.double) / 255
imgLinesPosMap = imgLines.copy()
for k in range(option.numFilterIter):
imgLinesPosMap = cv2.filter2D(imgLinesPosMap, -1, HfilterX, borderType=cv2.BORDER_REPLICATE)
for k in range(option.numFilterIter):
imgLinesPosMap = cv2.filter2D(imgLinesPosMap, -1, HfilterY, borderType=cv2.BORDER_REPLICATE)
# Save results
modelPlane.vp.append(VP_imgLines(imgLines, imgLinesPosMap))
# Estimate plane support and plane parameters
numPlane = vpData.numVP * (vpData.numVP - 1) // 2
# Initialize plane data
modelPlane.plane = []
# A pair of vanishing points forms a plane hypothesis
for i in range(vpData.numVP - 1):
for j in range(i + 1, vpData.numVP):
modelPlane.plane.append(Plane(vLineFromTwoVP(vpData.vp[i].pos, vpData.vp[j].pos),
modelPlane.vp[i].imgLinesPosMap * modelPlane.vp[j].imgLinesPosMap,
[i, j]))
for i in range(numPlane):
for vpInd in [0, 1]:
linesCurr = np.array(vpData.vp[modelPlane.plane[i].sourceVP[vpInd]].lines)
invalidLieInd = linesCurr[:, 4] == 0 # 长度不是0的线段
linesCurr = linesCurr[invalidLieInd == 0, :]
numLines = linesCurr.shape[0]
vLineCurr = modelPlane.plane[i].vline
# Rectified homography
H = np.eye(3) # 平移变换
H[2, :] = vLineCurr
linesStart = np.hstack([linesCurr[:, :2], np.ones((numLines, 1))]).T
linesEnd = np.hstack([linesCurr[:, 2:4], np.ones((numLines, 1))]).T
linesStartRect = H.dot(linesStart)
linesStartRect = linesStartRect / np.vstack([linesStartRect[2, :],
linesStartRect[2, :],
linesStartRect[2, :]])
linesEndRect = H.dot(linesEnd)
linesEndRect = linesEndRect / np.vstack([linesEndRect[2, :],
linesEndRect[2, :],
linesEndRect[2, :]])
linesVec = linesStartRect[:2, :] - linesEndRect[:2, :]
linesSign = linesEndRect[1, :] > linesStartRect[1, :]
linesSign = 2 * linesSign - 1
linesLength = np.sqrt(np.sum(linesVec ** 2, axis=0))
linesCos = linesSign * linesVec[0, :] / linesLength
theta = np.arccos(linesCos)
thetaAvg = np.mean(theta)
for iter in range(5):
thetadiff = theta - thetaAvg
indLargeTheat = thetadiff > math.pi / 2
theta[indLargeTheat] = math.pi - theta[indLargeTheat]
indSmallTheta = thetadiff < -math.pi / 2
theta[indSmallTheta] = math.pi + theta[indSmallTheta]
thetaAvg = np.mean(theta)
modelPlane.plane[i].rotPar[vpInd] = thetaAvg
# add ad fronto-parallel plane
modelPlane.plane.append(Plane(np.array([0, 0, 1]), option.fpPlaneProb*np.ones((height, width)), 0))
numPlane += 1
modelPlane.numPlane = numPlane
# compute posterior prob
planeProb = np.zeros((height, width, numPlane))
for i in range(numPlane):
planeProb[:, :, i] = modelPlane.plane[i].imgPlaneProb
planeProbSum = np.sum(planeProb, axis=2)
planeProb = planeProb / planeProbSum[..., None]
modelPlane.postProbHole = planeProb
edt, inds = ndimage.distance_transform_edt(1-(mask == 0), return_indices=True)
maskInt = mask.copy()
maskInt[0, :] = 0
maskInt[-1, :] = 0
maskInt[:, 0] = 0
maskInt[:, -1] = 0
# propagate posterior prob into the hole region
for i in range(numPlane):
planeProbCh = planeProb[:, :, i]
planeProb[:, :, i] = planeProbCh[inds[0, :, :], inds[1, :, :]].copy()
#cv2.imshow("postProb", planeProb[:, :, i])
#cv2.waitKey()
planeProbSum = np.sum(planeProb, axis=2)
planeProb = planeProb / planeProbSum[..., None]
planeProbSum = 1 + numPlane*option.probConst
planeProb = (planeProb + option.probConst) / planeProbSum
modelPlane.postProb = planeProb.copy()
#print(modelPlane.postProb.shape)
#cv2.imshow("postProb", modelPlane.postProb)
#cv2.waitKey()
return modelPlane
def read_vpdata(fileName):
'''
:param fileName: vp filename
:return: VP: VPdata
'''
VP = type("VPData", (), {"numVP": 0, "vp": []})
with open(fileName, 'r') as f:
f.readline()
while True:
temp = f.readline()
if temp == "\n":
f.readline()
break
numbers = temp.split()
record = list(map(float, numbers))
VP.vp.append(vp(np.array(record[:3]), record[3], int(record[4])))
VP.numVP += 1
allLines = f.readlines()
nowLine = 0
for i in range(VP.numVP):
numLines = VP.vp[i].numLines
assert (int(allLines[nowLine]) == numLines)
def clean(line):
numbers = line.split()
record = list(map(float, numbers))
return record
VP.vp[i].lines = list(map(clean, allLines[nowLine + 1:nowLine + numLines + 1]))
assert (len(VP.vp[i].lines) == numLines)
nowLine += numLines + 1
return VP
def extract_plane(image_name, img, mask, option):
'''
extract plane model from an image
:param image_name:
:param img:
:param maskD:
:param option:
:return: modelPlane
'''
# VP detection
vpFilePath = 'cache/vpdetection'
vpFileName = image_name[:-4] + '-vanishingpoints.txt'
if not os.path.exists(os.path.join(vpFilePath, 'text', vpFileName)):
vpDetectCMD = 'vpdetection.exe -indir data -infile ' + image_name + ' -outdir ' + vpFilePath
print("Using CMD: ", vpDetectCMD)
os.system(vpDetectCMD)
# 获得三个消失点和对应的线段
vpData = read_vpdata(os.path.join(vpFilePath, 'text', vpFileName))
modelPlane = detect_plane_from_vp(vpData, img, mask, option)
return modelPlane | takahiromorita/heroku-python-flask5 | source/extract/extract_plane.py | Python | bsd-2-clause | 8,810 | [
"Gaussian"
] | 058fef62eca3b41864300b469480b0fb71edcbe3ceae649d080144457b4a4696 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.