text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.47284
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/mediaplayerremove.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class mediaplayerremove(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(mediaplayerremove, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_90165367 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2simplexmlresult>
\t<e2state>''')
_v = VFFSL(SL,"result",True) # u'$result' on line 4, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$result')) # from line 4, col 11.
write(u'''</e2state>
\t<e2statetext>''')
_v = VFFSL(SL,"message",True) # u'$message' on line 5, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$message')) # from line 5, col 15.
write(u'''</e2statetext>
</e2simplexmlresult>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_90165367
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_mediaplayerremove= 'respond'
## END CLASS DEFINITION
if not hasattr(mediaplayerremove, '_initCheetahAttributes'):
templateAPIClass = getattr(mediaplayerremove, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(mediaplayerremove)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=mediaplayerremove()).run()
| MOA-2011/enigma2-plugin-extensions-openwebif | plugin/controllers/views/web/mediaplayerremove.py | Python | gpl-2.0 | 5,283 | [
"VisIt"
] | fa837d2c999aeca314dfe8f513995183d96730deef30dd149d7dbcb1bf7ae4bb |
#!/usr/bin/env python
##############################################################################################
#
#
# CMIP6_hybrid_regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
# Modified by Marcus Koehler 2018-10-22 <mok21@cam.ac.uk>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/OXBUDS/0.5x0.5/cmip6_hybrid/v2/CMIP6_hybrid_combined_iso-butane_1960-2020_v2_greg.nc'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='i-C4H10'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name='iso-butane surface emissions'
ocube.standard_name='tendency_of_atmosphere_mass_content_of_butane_due_to_emission'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='surface'
ocube.attributes['tracer_name']=str.strip(species_name)
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='CMIP6_hybrid_combined_iso-butane_1960-2020_v2_greg.nc'
ocube.attributes['title']='Time-varying monthly surface emissions of iso-butane from 1960 to 2020.'
ocube.attributes['File_version']='CMIP6_hybrid_v2'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Hoesly et al., Geosci. Mod. Dev., 2018; Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010; Helmig et al., Atmos. Environ., 2014.'
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1960-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='gregorian')
ocube.coord(axis='t').points=numpy.array([15.5, 45.5, 75.5, 106, 136.5, 167, 197.5,
228.5, 259, 289.5, 320, 350.5, 381.5, 411, 440.5, 471, 501.5, 532, 562.5, 593.5,
624, 654.5, 685, 715.5, 746.5, 776, 805.5, 836, 866.5, 897, 927.5, 958.5, 989,
1019.5, 1050, 1080.5, 1111.5, 1141, 1170.5, 1201, 1231.5, 1262, 1292.5, 1323.5,
1354, 1384.5, 1415, 1445.5, 1476.5, 1506.5, 1536.5, 1567, 1597.5, 1628, 1658.5,
1689.5, 1720, 1750.5, 1781, 1811.5, 1842.5, 1872, 1901.5, 1932, 1962.5, 1993,
2023.5, 2054.5, 2085, 2115.5, 2146, 2176.5, 2207.5, 2237, 2266.5, 2297, 2327.5,
2358, 2388.5, 2419.5, 2450, 2480.5, 2511, 2541.5, 2572.5, 2602, 2631.5, 2662,
2692.5, 2723, 2753.5, 2784.5, 2815, 2845.5, 2876, 2906.5, 2937.5, 2967.5, 2997.5,
3028, 3058.5, 3089, 3119.5, 3150.5, 3181, 3211.5, 3242, 3272.5, 3303.5, 3333,
3362.5, 3393, 3423.5, 3454, 3484.5, 3515.5, 3546, 3576.5, 3607, 3637.5, 3668.5,
3698, 3727.5, 3758, 3788.5, 3819, 3849.5, 3880.5, 3911, 3941.5, 3972, 4002.5,
4033.5, 4063, 4092.5, 4123, 4153.5, 4184, 4214.5, 4245.5, 4276, 4306.5, 4337,
4367.5, 4398.5, 4428.5, 4458.5, 4489, 4519.5, 4550, 4580.5, 4611.5, 4642, 4672.5,
4703, 4733.5, 4764.5, 4794, 4823.5, 4854, 4884.5, 4915, 4945.5, 4976.5, 5007,
5037.5, 5068, 5098.5, 5129.5, 5159, 5188.5, 5219, 5249.5, 5280, 5310.5, 5341.5,
5372, 5402.5, 5433, 5463.5, 5494.5, 5524, 5553.5, 5584, 5614.5, 5645, 5675.5,
5706.5, 5737, 5767.5, 5798, 5828.5, 5859.5, 5889.5, 5919.5, 5950, 5980.5, 6011,
6041.5, 6072.5, 6103, 6133.5, 6164, 6194.5, 6225.5, 6255, 6284.5, 6315, 6345.5,
6376, 6406.5, 6437.5, 6468, 6498.5, 6529, 6559.5, 6590.5, 6620, 6649.5, 6680,
6710.5, 6741, 6771.5, 6802.5, 6833, 6863.5, 6894, 6924.5, 6955.5, 6985, 7014.5,
7045, 7075.5, 7106, 7136.5, 7167.5, 7198, 7228.5, 7259, 7289.5, 7320.5, 7350.5,
7380.5, 7411, 7441.5, 7472, 7502.5, 7533.5, 7564, 7594.5, 7625, 7655.5, 7686.5,
7716, 7745.5, 7776, 7806.5, 7837, 7867.5, 7898.5, 7929, 7959.5, 7990, 8020.5,
8051.5, 8081, 8110.5, 8141, 8171.5, 8202, 8232.5, 8263.5, 8294, 8324.5, 8355,
8385.5, 8416.5, 8446, 8475.5, 8506, 8536.5, 8567, 8597.5, 8628.5, 8659, 8689.5,
8720, 8750.5, 8781.5, 8811.5, 8841.5, 8872, 8902.5, 8933, 8963.5, 8994.5, 9025,
9055.5, 9086, 9116.5, 9147.5, 9177, 9206.5, 9237, 9267.5, 9298, 9328.5, 9359.5,
9390, 9420.5, 9451, 9481.5, 9512.5, 9542, 9571.5, 9602, 9632.5, 9663, 9693.5,
9724.5, 9755, 9785.5, 9816, 9846.5, 9877.5, 9907, 9936.5, 9967, 9997.5, 10028,
10058.5, 10089.5, 10120, 10150.5, 10181, 10211.5, 10242.5, 10272.5, 10302.5,
10333, 10363.5, 10394, 10424.5, 10455.5, 10486, 10516.5, 10547, 10577.5, 10608.5,
10638, 10667.5, 10698, 10728.5, 10759, 10789.5, 10820.5, 10851, 10881.5, 10912,
10942.5, 10973.5, 11003, 11032.5, 11063, 11093.5, 11124, 11154.5, 11185.5, 11216,
11246.5, 11277, 11307.5, 11338.5, 11368, 11397.5, 11428, 11458.5, 11489, 11519.5,
11550.5, 11581, 11611.5, 11642, 11672.5, 11703.5, 11733.5, 11763.5, 11794,
11824.5, 11855, 11885.5, 11916.5, 11947, 11977.5, 12008, 12038.5, 12069.5, 12099,
12128.5, 12159, 12189.5, 12220, 12250.5, 12281.5, 12312, 12342.5, 12373, 12403.5,
12434.5, 12464, 12493.5, 12524, 12554.5, 12585, 12615.5, 12646.5, 12677, 12707.5,
12738, 12768.5, 12799.5, 12829, 12858.5, 12889, 12919.5, 12950, 12980.5, 13011.5,
13042, 13072.5, 13103, 13133.5, 13164.5, 13194.5, 13224.5, 13255, 13285.5, 13316,
13346.5, 13377.5, 13408, 13438.5, 13469, 13499.5, 13530.5, 13560, 13589.5, 13620,
13650.5, 13681, 13711.5, 13742.5, 13773, 13803.5, 13834, 13864.5, 13895.5, 13925,
13954.5, 13985, 14015.5, 14046, 14076.5, 14107.5, 14138, 14168.5, 14199, 14229.5,
14260.5, 14290, 14319.5, 14350, 14380.5, 14411, 14441.5, 14472.5, 14503, 14533.5,
14564, 14594.5, 14625.5, 14655.5, 14685.5, 14716, 14746.5, 14777, 14807.5,
14838.5, 14869, 14899.5, 14930, 14960.5, 14991.5, 15021, 15050.5, 15081,
15111.5, 15142, 15172.5, 15203.5, 15234, 15264.5, 15295, 15325.5,
15356.5, 15386, 15415.5, 15446, 15476.5, 15507, 15537.5, 15568.5, 15599,
15629.5, 15660, 15690.5, 15721.5, 15751, 15780.5, 15811, 15841.5, 15872,
15902.5, 15933.5, 15964, 15994.5, 16025, 16055.5, 16086.5, 16116.5,
16146.5, 16177, 16207.5, 16238, 16268.5, 16299.5, 16330, 16360.5, 16391,
16421.5, 16452.5, 16482, 16511.5, 16542, 16572.5, 16603, 16633.5,
16664.5, 16695, 16725.5, 16756, 16786.5, 16817.5, 16847, 16876.5, 16907,
16937.5, 16968, 16998.5, 17029.5, 17060, 17090.5, 17121, 17151.5,
17182.5, 17212, 17241.5, 17272, 17302.5, 17333, 17363.5, 17394.5, 17425,
17455.5, 17486, 17516.5, 17547.5, 17577.5, 17607.5, 17638, 17668.5,
17699, 17729.5, 17760.5, 17791, 17821.5, 17852, 17882.5, 17913.5, 17943,
17972.5, 18003, 18033.5, 18064, 18094.5, 18125.5, 18156, 18186.5, 18217,
18247.5, 18278.5, 18308, 18337.5, 18368, 18398.5, 18429, 18459.5,
18490.5, 18521, 18551.5, 18582, 18612.5, 18643.5, 18673, 18702.5, 18733,
18763.5, 18794, 18824.5, 18855.5, 18886, 18916.5, 18947, 18977.5,
19008.5, 19038.5, 19068.5, 19099, 19129.5, 19160, 19190.5, 19221.5,
19252, 19282.5, 19313, 19343.5, 19374.5, 19404, 19433.5, 19464, 19494.5,
19525, 19555.5, 19586.5, 19617, 19647.5, 19678, 19708.5, 19739.5, 19769,
19798.5, 19829, 19859.5, 19890, 19920.5, 19951.5, 19982, 20012.5, 20043,
20073.5, 20104.5, 20134, 20163.5, 20194, 20224.5, 20255, 20285.5,
20316.5, 20347, 20377.5, 20408, 20438.5, 20469.5, 20499.5, 20529.5,
20560, 20590.5, 20621, 20651.5, 20682.5, 20713, 20743.5, 20774, 20804.5,
20835.5, 20865, 20894.5, 20925, 20955.5, 20986, 21016.5, 21047.5, 21078,
21108.5, 21139, 21169.5, 21200.5, 21230, 21259.5, 21290, 21320.5, 21351,
21381.5, 21412.5, 21443, 21473.5, 21504, 21534.5, 21565.5, 21595,
21624.5, 21655, 21685.5, 21716, 21746.5, 21777.5, 21808, 21838.5, 21869,
21899.5, 21930.5, 21960.5, 21990.5, 22021, 22051.5, 22082, 22112.5,
22143.5, 22174, 22204.5, 22235, 22265.5])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([15.5, 45.5, 75.5, 106, 136.5, 167, 197.5,
228.5, 259, 289.5, 320, 350.5, 381.5, 411, 440.5, 471, 501.5, 532, 562.5, 593.5,
624, 654.5, 685, 715.5, 746.5, 776, 805.5, 836, 866.5, 897, 927.5, 958.5, 989,
1019.5, 1050, 1080.5, 1111.5, 1141, 1170.5, 1201, 1231.5, 1262, 1292.5, 1323.5,
1354, 1384.5, 1415, 1445.5, 1476.5, 1506.5, 1536.5, 1567, 1597.5, 1628, 1658.5,
1689.5, 1720, 1750.5, 1781, 1811.5, 1842.5, 1872, 1901.5, 1932, 1962.5, 1993,
2023.5, 2054.5, 2085, 2115.5, 2146, 2176.5, 2207.5, 2237, 2266.5, 2297, 2327.5,
2358, 2388.5, 2419.5, 2450, 2480.5, 2511, 2541.5, 2572.5, 2602, 2631.5, 2662,
2692.5, 2723, 2753.5, 2784.5, 2815, 2845.5, 2876, 2906.5, 2937.5, 2967.5, 2997.5,
3028, 3058.5, 3089, 3119.5, 3150.5, 3181, 3211.5, 3242, 3272.5, 3303.5, 3333,
3362.5, 3393, 3423.5, 3454, 3484.5, 3515.5, 3546, 3576.5, 3607, 3637.5, 3668.5,
3698, 3727.5, 3758, 3788.5, 3819, 3849.5, 3880.5, 3911, 3941.5, 3972, 4002.5,
4033.5, 4063, 4092.5, 4123, 4153.5, 4184, 4214.5, 4245.5, 4276, 4306.5, 4337,
4367.5, 4398.5, 4428.5, 4458.5, 4489, 4519.5, 4550, 4580.5, 4611.5, 4642, 4672.5,
4703, 4733.5, 4764.5, 4794, 4823.5, 4854, 4884.5, 4915, 4945.5, 4976.5, 5007,
5037.5, 5068, 5098.5, 5129.5, 5159, 5188.5, 5219, 5249.5, 5280, 5310.5, 5341.5,
5372, 5402.5, 5433, 5463.5, 5494.5, 5524, 5553.5, 5584, 5614.5, 5645, 5675.5,
5706.5, 5737, 5767.5, 5798, 5828.5, 5859.5, 5889.5, 5919.5, 5950, 5980.5, 6011,
6041.5, 6072.5, 6103, 6133.5, 6164, 6194.5, 6225.5, 6255, 6284.5, 6315, 6345.5,
6376, 6406.5, 6437.5, 6468, 6498.5, 6529, 6559.5, 6590.5, 6620, 6649.5, 6680,
6710.5, 6741, 6771.5, 6802.5, 6833, 6863.5, 6894, 6924.5, 6955.5, 6985, 7014.5,
7045, 7075.5, 7106, 7136.5, 7167.5, 7198, 7228.5, 7259, 7289.5, 7320.5, 7350.5,
7380.5, 7411, 7441.5, 7472, 7502.5, 7533.5, 7564, 7594.5, 7625, 7655.5, 7686.5,
7716, 7745.5, 7776, 7806.5, 7837, 7867.5, 7898.5, 7929, 7959.5, 7990, 8020.5,
8051.5, 8081, 8110.5, 8141, 8171.5, 8202, 8232.5, 8263.5, 8294, 8324.5, 8355,
8385.5, 8416.5, 8446, 8475.5, 8506, 8536.5, 8567, 8597.5, 8628.5, 8659, 8689.5,
8720, 8750.5, 8781.5, 8811.5, 8841.5, 8872, 8902.5, 8933, 8963.5, 8994.5, 9025,
9055.5, 9086, 9116.5, 9147.5, 9177, 9206.5, 9237, 9267.5, 9298, 9328.5, 9359.5,
9390, 9420.5, 9451, 9481.5, 9512.5, 9542, 9571.5, 9602, 9632.5, 9663, 9693.5,
9724.5, 9755, 9785.5, 9816, 9846.5, 9877.5, 9907, 9936.5, 9967, 9997.5, 10028,
10058.5, 10089.5, 10120, 10150.5, 10181, 10211.5, 10242.5, 10272.5, 10302.5,
10333, 10363.5, 10394, 10424.5, 10455.5, 10486, 10516.5, 10547, 10577.5, 10608.5,
10638, 10667.5, 10698, 10728.5, 10759, 10789.5, 10820.5, 10851, 10881.5, 10912,
10942.5, 10973.5, 11003, 11032.5, 11063, 11093.5, 11124, 11154.5, 11185.5, 11216,
11246.5, 11277, 11307.5, 11338.5, 11368, 11397.5, 11428, 11458.5, 11489, 11519.5,
11550.5, 11581, 11611.5, 11642, 11672.5, 11703.5, 11733.5, 11763.5, 11794,
11824.5, 11855, 11885.5, 11916.5, 11947, 11977.5, 12008, 12038.5, 12069.5, 12099,
12128.5, 12159, 12189.5, 12220, 12250.5, 12281.5, 12312, 12342.5, 12373, 12403.5,
12434.5, 12464, 12493.5, 12524, 12554.5, 12585, 12615.5, 12646.5, 12677, 12707.5,
12738, 12768.5, 12799.5, 12829, 12858.5, 12889, 12919.5, 12950, 12980.5, 13011.5,
13042, 13072.5, 13103, 13133.5, 13164.5, 13194.5, 13224.5, 13255, 13285.5, 13316,
13346.5, 13377.5, 13408, 13438.5, 13469, 13499.5, 13530.5, 13560, 13589.5, 13620,
13650.5, 13681, 13711.5, 13742.5, 13773, 13803.5, 13834, 13864.5, 13895.5, 13925,
13954.5, 13985, 14015.5, 14046, 14076.5, 14107.5, 14138, 14168.5, 14199, 14229.5,
14260.5, 14290, 14319.5, 14350, 14380.5, 14411, 14441.5, 14472.5, 14503, 14533.5,
14564, 14594.5, 14625.5, 14655.5, 14685.5, 14716, 14746.5, 14777, 14807.5,
14838.5, 14869, 14899.5, 14930, 14960.5, 14991.5, 15021, 15050.5, 15081,
15111.5, 15142, 15172.5, 15203.5, 15234, 15264.5, 15295, 15325.5,
15356.5, 15386, 15415.5, 15446, 15476.5, 15507, 15537.5, 15568.5, 15599,
15629.5, 15660, 15690.5, 15721.5, 15751, 15780.5, 15811, 15841.5, 15872,
15902.5, 15933.5, 15964, 15994.5, 16025, 16055.5, 16086.5, 16116.5,
16146.5, 16177, 16207.5, 16238, 16268.5, 16299.5, 16330, 16360.5, 16391,
16421.5, 16452.5, 16482, 16511.5, 16542, 16572.5, 16603, 16633.5,
16664.5, 16695, 16725.5, 16756, 16786.5, 16817.5, 16847, 16876.5, 16907,
16937.5, 16968, 16998.5, 17029.5, 17060, 17090.5, 17121, 17151.5,
17182.5, 17212, 17241.5, 17272, 17302.5, 17333, 17363.5, 17394.5, 17425,
17455.5, 17486, 17516.5, 17547.5, 17577.5, 17607.5, 17638, 17668.5,
17699, 17729.5, 17760.5, 17791, 17821.5, 17852, 17882.5, 17913.5, 17943,
17972.5, 18003, 18033.5, 18064, 18094.5, 18125.5, 18156, 18186.5, 18217,
18247.5, 18278.5, 18308, 18337.5, 18368, 18398.5, 18429, 18459.5,
18490.5, 18521, 18551.5, 18582, 18612.5, 18643.5, 18673, 18702.5, 18733,
18763.5, 18794, 18824.5, 18855.5, 18886, 18916.5, 18947, 18977.5,
19008.5, 19038.5, 19068.5, 19099, 19129.5, 19160, 19190.5, 19221.5,
19252, 19282.5, 19313, 19343.5, 19374.5, 19404, 19433.5, 19464, 19494.5,
19525, 19555.5, 19586.5, 19617, 19647.5, 19678, 19708.5, 19739.5, 19769,
19798.5, 19829, 19859.5, 19890, 19920.5, 19951.5, 19982, 20012.5, 20043,
20073.5, 20104.5, 20134, 20163.5, 20194, 20224.5, 20255, 20285.5,
20316.5, 20347, 20377.5, 20408, 20438.5, 20469.5, 20499.5, 20529.5,
20560, 20590.5, 20621, 20651.5, 20682.5, 20713, 20743.5, 20774, 20804.5,
20835.5, 20865, 20894.5, 20925, 20955.5, 20986, 21016.5, 21047.5, 21078,
21108.5, 21139, 21169.5, 21200.5, 21230, 21259.5, 21290, 21320.5, 21351,
21381.5, 21412.5, 21443, 21473.5, 21504, 21534.5, 21565.5, 21595,
21624.5, 21655, 21685.5, 21716, 21746.5, 21777.5, 21808, 21838.5, 21869,
21899.5, 21930.5, 21960.5, 21990.5, 22021, 22051.5, 22082, 22112.5,
22143.5, 22174, 22204.5, 22235, 22265.5],dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='gregorian'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_iC4H10.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name'])
# end of script
| acsis-project/emissions | emissions/python/CMIP6_hybrid/CMIP6_hybrid_regrid_iC4H10_emissions_n96e_greg.py | Python | gpl-3.0 | 18,931 | [
"NetCDF"
] | ba86fad1f88b25f628a587103a11b3800ba2c59c9526c3edfed8129c0eab77d4 |
from sqlalchemy import extract, select
from sqlalchemy import sql
from sqlalchemy.databases import sybase
from sqlalchemy.testing import assert_raises_message, \
fixtures, AssertsCompiledSQL
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = sybase.dialect()
def test_extract(self):
t = sql.table('t', sql.column('col1'))
mapping = {
'day': 'day',
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond',
'millisecond': 'millisecond',
'year': 'year',
}
for field, subst in list(mapping.items()):
self.assert_compile(
select([extract(field, t.c.col1)]),
'SELECT DATEPART("%s", t.col1) AS anon_1 FROM t' % subst)
def test_offset_not_supported(self):
stmt = select([1]).offset(10)
assert_raises_message(
NotImplementedError,
"Sybase ASE does not support OFFSET",
stmt.compile, dialect=self.__dialect__
)
| robin900/sqlalchemy | test/dialect/test_sybase.py | Python | mit | 1,066 | [
"ASE"
] | a09fd09ab9f52e6e7e8f5dfa2955dfd6fda28af68137f900685ba76e28cb3fa6 |
class Person():
def __init__(self, name, phoneNum, carrier):
self.name = name
self.phoneNum = phoneNum
self.carrier = carrier
self.smsEmail = None
self.currentAssignment = None
self.makeSMSEmail()
def getName(self):
return self.name
def getPhoneNum(self):
return self.phoneNum
def getCarrier(self):
return self.carrier
def getSMSEmail(self):
return self.smsEmail
def makeSMSEmail(self):
if self.carrier == 'sprint':
self.smsEmail = self.phoneNum+'@messaging.sprintpcs.com'
elif self.carrier == 'tmobile':
self.smsEmail = self.phoneNum+'@tmomail.net'
elif self.carrier == 'att':
self.smsEmail = self.phoneNum+'@txt.att.net'
elif self.carrier == 'verizon':
self.smsEmail = self.phoneNum+'@vtext.com'
elif self.carrier == 'boostmobile':
self.smsEmail = self.phoneNum+'@sms.myboostmobile.com'
### TODO : Improve code by running threads to send all texts at (about) the same time
### TESTING
me = Person("Izzy Gomez", "7023088493", "sprint")
# print me.getName(), me.getPhoneNum(), me.getCarrier(), me.getSMSEmail() # Testing
keisuke = Person("Keisuke Hatanaka", "6194951433", "verizon")
angel = Person("Angel Diaz", "3058014751", "tmobile")
johnstevens = Person("John Stevens", "6145811243", "verizon")
jonatan = Person("Jonatan Yucra", "7174916770", "att")
brian = Person("Brian Saavedra", "7082005751", "boostmobile")
edwin = Person("Edwin Africano", "4015273181", "tmobile")
alex = Person("Alex Aguilar", "3232130199", "verizon")
phillip = Person("Phillip Graham", "2103803991", "att")
import smtplib
import thread
import time # Not used yet
def login():
global server
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login("tdc.house.manager@gmail.com", "minerva1847")
print "Successful Login!"
def logout():
server.quit()
print "Successful Logout!"
"""
TODO Function Explanation
@param person,
@param message,
"""
def sendMessage(person, message):
try:
server.sendmail("tdc.house.manager@gmail.com",\
person.getSMSEmail(),\
message)
print "\tSuccesfully sent message to "+person.getName()+"!"
except Exception, e:
print "\t***Error sending message to "+person.getName()+":\n\t", e
login()
for i in range(20):
# thread.start_new_thread( sendMessage, (me, "Test: "+str(i)))
sendMessage(me, "Test: "+str(i))
time.sleep(10)
logout()
| izzygomez/RemPy | brothers.py | Python | mit | 2,349 | [
"Brian"
] | 9189c42005b7704648b237f60175785216886ffe5a707779362415e21bff0359 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# PMDA
# Copyright (c) 2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
from __future__ import absolute_import
import numpy as np
import pytest
import MDAnalysis as mda
from MDAnalysisTests.datafiles import DCD, PSF
import joblib
import dask
from pmda import parallel
from pmda.util import make_balanced_slices
def test_timeing():
io = np.arange(5)
compute = np.arange(5) + 1
total = 5
universe = np.arange(2)
prepare = 3
conclude = 6
wait = 12
io_block = np.sum(io)
compute_block = np.sum(compute)
timing = parallel.Timing(io, compute, total,
universe, prepare, conclude, wait,
io_block, compute_block,)
np.testing.assert_equal(timing.io, io)
np.testing.assert_equal(timing.compute, compute)
np.testing.assert_equal(timing.total, total)
np.testing.assert_equal(timing.universe, universe)
np.testing.assert_equal(timing.cumulate_time, np.sum(io) + np.sum(compute))
np.testing.assert_equal(timing.prepare, prepare)
np.testing.assert_equal(timing.conclude, conclude)
np.testing.assert_equal(timing.wait, wait)
np.testing.assert_equal(timing.io_block, io_block)
np.testing.assert_equal(timing.compute_block, compute_block)
class NoneAnalysis(parallel.ParallelAnalysisBase):
def __init__(self, atomgroup):
universe = atomgroup.universe
super(NoneAnalysis, self).__init__(universe, (atomgroup, ))
def _prepare(self):
pass
def _conclude(self):
self.res = np.concatenate(self._results)
def _single_frame(self, ts, atomgroups):
return ts.frame
@pytest.fixture
def analysis():
u = mda.Universe(PSF, DCD)
ana = NoneAnalysis(u.atoms)
return ana
@pytest.mark.parametrize('n_jobs', (1, 2))
def test_all_frames(analysis, n_jobs):
analysis.run(n_jobs=n_jobs)
u = mda.Universe(analysis._top, analysis._traj)
assert len(analysis.res) == u.trajectory.n_frames
@pytest.mark.parametrize('n_jobs', (1, 2))
def test_sub_frames(analysis, n_jobs):
analysis.run(start=10, stop=50, step=10, n_jobs=n_jobs)
np.testing.assert_almost_equal(analysis.res, [10, 20, 30, 40])
@pytest.mark.parametrize('n_jobs', (1, 2, 3))
def test_no_frames(analysis, n_jobs):
u = mda.Universe(analysis._top, analysis._traj)
n_frames = u.trajectory.n_frames
with pytest.warns(UserWarning):
analysis.run(start=n_frames, stop=n_frames+1, n_jobs=n_jobs)
assert len(analysis.res) == 0
np.testing.assert_equal(analysis.res, [])
np.testing.assert_equal(analysis.timing.compute, [])
np.testing.assert_equal(analysis.timing.io, [])
np.testing.assert_equal(analysis.timing.io_block, [0])
np.testing.assert_equal(analysis.timing.compute_block, [0])
np.testing.assert_equal(analysis.timing.wait, [0])
assert analysis.timing.universe == 0
def test_scheduler(analysis, scheduler):
analysis.run()
def test_nframes_less_nblocks_warning(analysis):
u = mda.Universe(analysis._top, analysis._traj)
n_frames = u.trajectory.n_frames
with pytest.raises(ValueError):
analysis.run(stop=2, n_blocks=4, n_jobs=2)
@pytest.mark.parametrize('n_blocks', np.arange(1, 11))
def test_nblocks(analysis, n_blocks):
analysis.run(n_blocks=n_blocks)
assert len(analysis._results) == n_blocks
def test_guess_nblocks(analysis):
with dask.config.set(scheduler='processes'):
analysis.run(n_jobs=-1)
assert len(analysis._results) == joblib.cpu_count()
@pytest.mark.parametrize('n_blocks', np.arange(1, 11))
def test_blocks(analysis, n_blocks):
analysis.run(n_blocks=n_blocks)
u = mda.Universe(analysis._top, analysis._traj)
n_frames = u.trajectory.n_frames
start, stop, step = u.trajectory.check_slice_indices(
None, None, None)
slices = make_balanced_slices(n_frames, n_blocks, start, stop, step)
blocks = [
range(bslice.start, bslice.stop, bslice.step) for bslice in slices
]
assert analysis._blocks == blocks
def test_attrlock():
u = mda.Universe(PSF, DCD)
pab = parallel.ParallelAnalysisBase(u, (u.atoms,))
# Should initially be allowed to set attributes
pab.thing1 = 24
assert pab.thing1 == 24
# Apply lock
with pab.readonly_attributes():
# Reading should still work
assert pab.thing1 == 24
# Setting should fail
with pytest.raises(AttributeError):
pab.thing2 = 100
# Outside of lock context setting should again work
pab.thing2 = 100
assert pab.thing2 == 100
def test_reduce():
res = []
u = mda.Universe(PSF, DCD)
ana = NoneAnalysis(u.atoms)
res = ana._reduce(res, [1])
res = ana._reduce(res, [1])
# Should see res become a list with 2 elements.
assert res == [[1], [1]]
| MDAnalysis/pmda | pmda/test/test_parallel.py | Python | gpl-2.0 | 5,084 | [
"MDAnalysis"
] | a9a7bfbe9c1605c7a3f2e9bdc09b9495a5e7043dbfc7cb42cc06e5dd31be3141 |
import sys
import socket
import Messages
import pickle
import threading
import Queue
import sha
import struct
class _Protocol(object):
NAIVE = 0
LOCKSTEP = 1
AS = 2
PROTOCOL = _Protocol.NAIVE
if len(sys.argv) > 3:
protocol = sys.argv[3]
if protocol == "naive":
pass
elif protocol == "lockstep":
PROTOCOL = _Protocol.LOCKSTEP
elif protocol == "as":
PROTOCOL = _Protocol.AS
else:
print("protocol not recognized")
sys.exit(1)
#code from http://stackoverflow.com/questions/12435211/python-threading-timer-repeat-function-every-n-seconds
def _setInterval(interval):
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop(): # executed in another thread
while not stopped.wait(interval): # until stopped
function(*args, **kwargs)
t = threading.Thread(target=loop)
t.daemon = True # stop if the program exits
t.start()
return stopped
return wrapper
return decorator
comm_array = []
_peers = []
_me = Messages.Peer("127.0.0.1", 0)
_avatars = []
_spotlights = []
_throttle = None
_in_messages = Queue.Queue()
_out_messages = Queue.Queue()
network_fps = 30.0
network_frame = 1.0/network_fps
def find_peers(server_name, n):
for p in range(n-1):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print("Created listen socket")
sock.bind(('', 0))
print("Listen socket bound")
sock.listen(5)
comm_array.append(sock)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((server_name, Messages.MATCHMAKING_PORT))
print("Connecting to matchmaking server")
sock.send(pickle.dumps(Messages.MatchmakingConfiguration(int(n), [s.getsockname()[1] for s in comm_array])))
print("Waiting for players...")
messages = pickle.loads(sock.recv(4096))
print("Found players!")
for message in messages:
if isinstance(message, Messages.Spotlights):
_spotlights.extend(message.spotlights)
elif isinstance(message, Messages.MatchmakingError):
print("Matchmaking Error")
sys.exit(1)
elif isinstance(message, Messages.MatchmakingAccept):
print("Connecting to 1 peer")
server_sock = [s for s in comm_array if s.getsockname()[1] == message.port][0]
comm_array.remove(server_sock)
conn, addr = server_sock.accept()
_peers.append(Messages.Peer(addr, conn))
#conn.send(pickle.dumps(Messages.PeerConnected()))
print("Connected to %s on port %s" % (addr, conn.getsockname()[1]))
server_sock.close()
elif isinstance(message, Messages.MatchmakingPeers):
print("Connecting to %s peer(s)" % len(message.peers))
for addr in message.peers:
comm_array.pop().close()
peer_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
peer_sock.connect(addr)
_peers.append(Messages.Peer(addr, peer_sock))
#message = pickle.loads(peer_sock.recv(4096))
#if not isinstance(message, Messages.PeerConnected):
#print("Expected PeerConnected message; got %s" % str(message))
print("Connected to %s on port %s" % (addr, peer_sock.getsockname()[1]))
else:
print("Protocol breach: %s" % str(message))
sys.exit(1)
#for peer in _peers:
#peer.sock.setblocking(0)
print("Connected to %s peer(s)" % (n-1))
def get_spotlights():
return _spotlights
def register_local_avatar(avatar):
_me.avatar = avatar
if (PROTOCOL == _Protocol.NAIVE):
def register_remote_avatars(avatars):
for i in range(len(_peers)):
_peers[i].avatar = avatars[i]
@_setInterval(network_frame)
def blast():
messages = []
while not _out_messages.empty():
message = _out_messages.get()
messages.append(message)
s = pickle.dumps(messages)
raw_size = struct.pack("!L", len(s))
for peer in _peers:
peer.sock.send(raw_size)
peer.sock.send(s)
def in_loop(peer):
data = ""
while peer.active:
while len(data) < 4:
data += peer.sock.recv(4)
raw_size, data = data[:4], data[4:]
size = struct.unpack("!L", raw_size)[0]
while len(data) < size:
data += peer.sock.recv(4096)
message, data = data[:size], data[size:]
_in_messages.put((peer, message))
_blaster = blast()
for peer in _peers:
_throttle = threading.Thread(target=in_loop, args=(peer,))
_throttle.daemon = True
_throttle.start()
def send(message):
_out_messages.put(message)
def recv():
result = []
while not _in_messages.empty():
peer, messages = _in_messages.get()
try:
messages = pickle.loads(messages)
except EOFError:
print("Peer quit the game")
peer.active = False
for message in messages:
if isinstance(message, Messages.SwordSwing):
_me.avatar.recv(message)
else:
peer.avatar.recv(message)
elif (PROTOCOL == _Protocol.LOCKSTEP):
hashes = Queue.Queue(len(_peers))
def register_remote_avatars(avatars):
for i in range(len(_peers)):
_peers[i].avatar = avatars[i]
@_setInterval(network_frame)
def blast():
messages = []
while not _out_messages.empty():
message = _out_messages.get()
messages.append(message)
s = pickle.dumps(messages)
h = sha.new(s).digest()
for peer in _peers:
peer.sock.send(h)
ct = 0
while ct < len(_peers):
hashes.get()
ct += 1
raw_size = struct.pack("!L", len(s))
for peer in _peers:
peer.sock.send(raw_size)
peer.sock.send(s)
def in_loop(peer):
data = ""
while peer.active:
while len(data) < 20:
data += peer.sock.recv(20)
h, data = data[:20], data[20:]
hashes.put(h)
while len(data) < 4:
data += peer.sock.recv(4)
raw_size, data = data[:4], data[4:]
size = struct.unpack("!L", raw_size)[0]
while len(data) < size:
data += peer.sock.recv(4096)
message, data = data[:size], data[size:]
_in_messages.put((peer, message, h))
_blaster = blast()
for peer in _peers:
_throttle = threading.Thread(target=in_loop, args=(peer,))
_throttle.daemon = True
_throttle.start()
def send(message):
_out_messages.put(message)
def recv():
result = []
while not _in_messages.empty():
peer, s, h = _in_messages.get()
check = sha.new(s).digest()
print("Received hash: %s" % h)
print("Plaintext => %s" % check)
if not check == h:
print("CHEATER! (%s : %s)" % (check, h))
messages = []
try:
messages = pickle.loads(s)
except EOFError:
print("Peer quit the game")
peer.active = False
for message in messages:
if isinstance(message, Messages.SwordSwing):
_me.avatar.recv(message)
else:
peer.avatar.recv(message)
else:
hashes = Queue.Queue(len(_peers))
def register_remote_avatars(avatars):
for i in range(len(_peers)):
_peers[i].avatar = avatars[i]
@_setInterval(network_frame)
def blast():
messages = []
while not _out_messages.empty():
message = _out_messages.get()
messages.append(message)
s = pickle.dumps(messages)
h = sha.new(s).digest()
for peer in _peers:
peer.sock.send(h)
ct = 0
while ct < len(_peers):
hashes.get()
ct += 1
raw_size = struct.pack("!L", len(s))
for peer in _peers:
peer.sock.send(raw_size)
peer.sock.send(s)
def in_loop(peer):
data = ""
while peer.active:
while len(data) < 20:
data += peer.sock.recv(20)
h, data = data[:20], data[20:]
hashes.put(h)
while len(data) < 4:
data += peer.sock.recv(4)
raw_size, data = data[:4], data[4:]
size = struct.unpack("!L", raw_size)[0]
while len(data) < size:
data += peer.sock.recv(4096)
message, data = data[:size], data[size:]
_in_messages.put((peer, message, h))
_blaster = blast()
for peer in _peers:
_throttle = threading.Thread(target=in_loop, args=(peer,))
_throttle.daemon = True
_throttle.start()
def send(message):
sending = True
if isinstance(message, Messages.NinjaMove):
lit = False
x = _me.avatar.x
y = _me.avatar.y
if message.orientation == Messages.Orientation.Horizontal:
x += message.magnitude
elif message.orientation == Messages.Orientation.Vertical:
y += message.magnitude
for spotlight in _spotlights:
if not (_me.avatar.x < spotlight[0]-1
or _me.avatar.x > spotlight[0]+1
or _me.avatar.y < spotlight[1]-1
or _me.avatar.y > spotlight[1]+1):
lit = True
break
if not (x < spotlight[0]-1
or x > spotlight[0]+1
or y < spotlight[1]-1
or y > spotlight[1]+1):
message = Messages.NinjaPosition(x, y)
lit = True
break
sending = lit
if sending:
_out_messages.put(message)
def recv():
result = []
while not _in_messages.empty():
peer, s, h = _in_messages.get()
check = sha.new(s).digest()
if not check == h:
print("CHEATER! (%s : %s)" % (check, h))
messages = []
try:
messages = pickle.loads(s)
except EOFError:
print("Peer quit the game")
peer.active = False
for message in messages:
if isinstance(message, Messages.SwordSwing):
_me.avatar.recv(message)
else:
peer.avatar.recv(message)
| jiminychris/shiny-ninja | ShinyNinja/Networking/Client.py | Python | mit | 11,444 | [
"BLAST"
] | 087a233b7a8a2c291b2f0d76d8d692f71922f5c4ae8bc062fa6feb0655e1e853 |
# Copyright 2014 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""defines few unrelated algorithms, that works on declarations"""
def declaration_path(decl, with_defaults=True):
"""
returns a list of parent declarations names
:param decl: declaration for which declaration path should be calculated
:type decl: :class:`declaration_t`
:rtype: [names], where first item contains top parent name and last item
contains the `decl` name
"""
if not decl:
return []
if not decl.cache.declaration_path:
result = [decl.name]
parent = decl.parent
while parent:
if parent.cache.declaration_path:
result.reverse()
decl.cache.declaration_path = parent.cache.declaration_path + \
result
return decl.cache.declaration_path
else:
result.append(parent.name)
parent = parent.parent
result.reverse()
decl.cache.declaration_path = result
return result
else:
return decl.cache.declaration_path
def partial_declaration_path(decl):
"""
returns a list of parent declarations names without template arguments that
have default value
:param decl: declaration for which declaration path should be calculated
:type decl: :class:`declaration_t`
:rtype: [names], where first item contains top parent name and last item
contains the `decl` name
"""
# TODO:
# If parent declaration cache already has declaration_path, reuse it for
# calculation.
if not decl:
return []
if not decl.cache.partial_declaration_path:
result = [decl.partial_name]
parent = decl.parent
while parent:
if parent.cache.partial_declaration_path:
result.reverse()
decl.cache.partial_declaration_path \
= parent.cache.partial_declaration_path + result
return decl.cache.partial_declaration_path
else:
result.append(parent.partial_name)
parent = parent.parent
result.reverse()
decl.cache.partial_declaration_path = result
return result
else:
return decl.cache.partial_declaration_path
def get_named_parent(decl):
"""
returns a reference to a named parent declaration
:param decl: the child declaration
:type decl: :class:`declaration_t`
:rtype: reference to :class:`declaration_t` or None if not found
"""
if not decl:
return None
parent = decl.parent
while parent and (not parent.name or parent.name == '::'):
parent = parent.parent
return parent
def full_name_from_declaration_path(dpath):
# Here I have lack of knowledge:
# TODO: "What is the full name of declaration declared in unnamed
# namespace?"
result = [_f for _f in dpath if _f]
result = result[0] + '::'.join(result[1:])
return result
def full_name(decl, with_defaults=True):
"""
returns declaration full qualified name
If `decl` belongs to anonymous namespace or class, the function will return
C++ illegal qualified name.
:param decl: :class:`declaration_t`
:type decl: :class:`declaration_t`
:rtype: full name of declarations.
"""
if None is decl:
raise RuntimeError("Unable to generate full name for None object!")
if with_defaults:
if not decl.cache.full_name:
decl.cache.full_name = full_name_from_declaration_path(
declaration_path(decl))
return decl.cache.full_name
else:
if not decl.cache.full_partial_name:
decl.cache.full_partial_name = full_name_from_declaration_path(
partial_declaration_path(decl))
return decl.cache.full_partial_name
def make_flatten(decl_or_decls):
"""
converts tree representation of declarations to flatten one.
:param decl_or_decls: reference to list of declaration's or single
declaration
:type decl_or_decls: :class:`declaration_t` or [ :class:`declaration_t` ]
:rtype: [ all internal declarations ]
"""
import pygccxml.declarations # prevent cyclic import
def proceed_single(decl):
answer = [decl]
if not isinstance(decl, pygccxml.declarations.scopedef_t):
return answer
for elem in decl.declarations:
if isinstance(elem, pygccxml.declarations.scopedef_t):
answer.extend(proceed_single(elem))
else:
answer.append(elem)
return answer
decls = []
if isinstance(decl_or_decls, list):
decls.extend(decl_or_decls)
else:
decls.append(decl_or_decls)
answer = []
for decl in decls:
answer.extend(proceed_single(decl))
return answer
def __make_flatten_generator(decl_or_decls):
"""
converts tree representation of declarations to flatten one.
:param decl_or_decls: reference to list of declaration's or single
declaration
:type decl_or_decls: :class:`declaration_t` or [ :class:`declaration_t` ]
:rtype: [ all internal declarations ]
"""
import pygccxml.declarations
def proceed_single(decl):
yield decl
if not isinstance(decl, pygccxml.declarations.scopedef_t):
return
for internal in decl.declarations:
if isinstance(internal, pygccxml.declarations.scopedef_t):
for internal_internal in proceed_single(internal):
yield internal_internal
else:
yield internal
if isinstance(decl_or_decls, list):
for creator in decl_or_decls:
for internal in proceed_single(creator):
yield internal
else:
for internal in proceed_single(decl_or_decls):
yield internal
def get_global_namespace(decls):
import pygccxml.declarations
found = [decl for decl in make_flatten(decls) if decl.name == '::'
and isinstance(decl, pygccxml.declarations.namespace_t)]
if len(found) == 1:
return found[0]
raise RuntimeError("Unable to find global namespace.")
class match_declaration_t:
"""
helper class for different search algorithms.
This class will help developer to match declaration by:
- declaration type, for example :class:`class_t` or
:class:`operator_t`.
- declaration name
- declaration full name
- reference to parent declaration
"""
def __init__(self, type=None, name=None, fullname=None, parent=None):
self.type = type
self.name = name
self.fullname = fullname
self.parent = parent
def does_match_exist(self, inst):
"""
returns True if inst do match one of specified criteria
:param inst: declaration instance
:type inst: :class:`declaration_t`
:rtype: bool
"""
answer = True
if None != self.type:
answer &= isinstance(inst, self.type)
if None != self.name:
answer &= inst.name == self.name
if None != self.parent:
answer &= self.parent is inst.parent
if None != self.fullname:
if inst.name:
answer &= self.fullname == full_name(inst)
else:
answer = False
return answer
def __call__(self, inst):
"""
.. code-block:: python
return self.does_match_exist(inst)
"""
return self.does_match_exist(inst)
def find_all_declarations(
declarations,
type=None,
name=None,
parent=None,
recursive=True,
fullname=None):
"""
returns a list of all declarations that match criteria, defined by
developer
For more information about arguments see :class:`match_declaration_t`
class.
:rtype: [ matched declarations ]
"""
decls = []
if recursive:
decls = make_flatten(declarations)
else:
decls = declarations
return list(
filter(
match_declaration_t(
type,
name,
fullname,
parent),
decls))
def find_declaration(
declarations,
type=None,
name=None,
parent=None,
recursive=True,
fullname=None):
"""
returns single declaration that match criteria, defined by developer.
If more the one declaration was found None will be returned.
For more information about arguments see :class:`match_declaration_t`
class.
:rtype: matched declaration :class:`declaration_t` or None
"""
decl = find_all_declarations(
declarations,
type=type,
name=name,
parent=parent,
recursive=recursive,
fullname=fullname)
if len(decl) == 1:
return decl[0]
def find_first_declaration(
declarations,
type=None,
name=None,
parent=None,
recursive=True,
fullname=None):
"""
returns first declaration that match criteria, defined by developer
For more information about arguments see :class:`match_declaration_t`
class.
:rtype: matched declaration :class:`declaration_t` or None
"""
matcher = match_declaration_t(type, name, fullname, parent)
if recursive:
decls = make_flatten(declarations)
else:
decls = declarations
for decl in decls:
if matcher(decl):
return decl
return None
def declaration_files(decl_or_decls):
"""
returns set of files
Every declaration is declared in some file. This function returns set, that
contains all file names of declarations.
:param decl_or_decls: reference to list of declaration's or single
declaration
:type decl_or_decls: :class:`declaration_t` or [:class:`declaration_t`]
:rtype: set(declaration file names)
"""
files = set()
decls = make_flatten(decl_or_decls)
for decl in decls:
if decl.location:
files.add(decl.location.file_name)
return files
class visit_function_has_not_been_found_t(RuntimeError):
"""
Exception that is raised, from :func:`apply_visitor`, when a visitor could
not be applied.
"""
def __init__(self, visitor, decl_inst):
RuntimeError.__init__(self)
self.__msg = (
"Unable to find visit function. Visitor class: %s. " +
"Declaration instance class: %s'") \
% (visitor.__class__.__name__, decl_inst.__class__.__name__)
def __str__(self):
return self.__msg
def apply_visitor(visitor, decl_inst):
"""
applies a visitor on declaration instance
:param visitor: instance
:type visitor: :class:`type_visitor_t` or :class:`decl_visitor_t`
"""
fname = 'visit_' + \
decl_inst.__class__.__name__[:-2] # removing '_t' from class name
if not hasattr(visitor, fname):
raise visit_function_has_not_been_found_t(visitor, decl_inst)
return getattr(visitor, fname)()
| iMichka/mini-iw | pygccxml/declarations/algorithm.py | Python | apache-2.0 | 11,319 | [
"VisIt"
] | 8e2861be99e6c3167ede93ce717172f756e398d34ab65a9baf5fd1677931cbe4 |
## INFO ########################################################################
## ##
## plastey ##
## ======= ##
## ##
## Oculus Rift + Leap Motion + Python 3 + C + Blender + Arch Linux ##
## Version: 0.2.2.112 (20150514) ##
## File: app.py ##
## ##
## For more information about the project, visit ##
## <http://plastey.kibu.hu>. ##
## Copyright (C) 2015 Peter Varo, Kitchen Budapest ##
## ##
## This program is free software: you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License as published by the ##
## Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ##
## See the GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program, most likely a file in the root directory, ##
## called 'LICENSE'. If not, see <http://www.gnu.org/licenses>. ##
## ##
######################################################################## INFO ##
# Import python modules
from os.path import join
from select import select
from threading import Thread
from math import radians
from datetime import datetime
from subprocess import Popen, PIPE
from queue import Queue, Empty
from os import makedirs, listdir
from pickle import dump, HIGHEST_PROTOCOL
from sys import path as sys_path, stderr, stdin
# Import leap modules
sys_path.insert(0, '/usr/lib/Leap')
import Leap
# Import oculus modules
import oculus
# Import blender modules
import bge
from mathutils import Vector, Matrix, Euler, Quaternion
# Import user modules
from hud import Text
from hand import Hands
from surface import Surface
from callback import CallbackManager
from utils import save_to_file, load_from_file
# Import global level constants
from const import (INT_TEMP_SAVE_FILE,
INT_AUTO_SAVE_FILE,
INT_TEXT_INTERVAL,
INT_AUTO_SAVE_INTERVAL,
INT_TEMPORARY_FOLDER,
INT_PERMANENT_FOLDER,
INT_AUTO_SAVE_FOLDER,
INT_TEMP_SAVE_FOLDER,
WINDOW_FULL_SCREEN,
WINDOW_DISPLAY_X,
WINDOW_DISPLAY_Y,
WINDOW_RESOLUTION_X,
WINDOW_RESOLUTION_Y,
APP_RUNNING,
APP_ESCAPED,
OBJ_PROTOTYPE_FINGER,
OBJ_PROTOTYPE_SURFACE,
OBJ_PROTOTYPE_VERTEX_ALL,
OBJ_ARMATURE_CONTROL,
OBJ_ARMATURE,
OBJ_GEOMETRY,
OBJ_GLOBAL,
OBJ_DOT,
OBJ_TEXT_FIRST,
OBJ_TEXT_OTHER,
OBJ_HUD_SCENE,
PROP_TEXT_TIMER,
COLOR_GEOMETRY_DARK,
LEAP_MULTIPLIER,
RIFT_MULTIPLIER,
RIFT_POSITION_SHIFT_Y,
RIFT_POSITION_SHIFT_Z,
RIFT_ORIENTATION_SHIFT,
COMM_IS_PAIRED,
COMM_DEVICE_NAME,
COMM_THIS_HOST,
COMM_THIS_PORT,
COMM_OTHER_HOST,
COMM_OTHER_PORT,
COMM_IS_MASTER,
COMM_RUNNING,
COMM_RESTART)
# Import conditional user modules
if COMM_IS_PAIRED:
if COMM_IS_MASTER:
from communication import sizeof_pow2, Server as Connection
else:
from communication import sizeof_pow2, Client as Connection
# Conditional module level constant
BUFFER_SIZE = sizeof_pow2([(int(), float(), float(), float()),
(int(), float(), float(), float())])
# TODO: make build-script work :)
# Import cutils modules => versioning
#import build
# Module level constants
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
MOUNTED_ON_HEAD = 0
MOUNTED_ON_DESK = 1
# Local references of blender constants
S_KEY = bge.events.SKEY
R_KEY = bge.events.RKEY
L_KEY = bge.events.LKEY
HOME_KEY = bge.events.HOMEKEY
SPACE_KEY = bge.events.SPACEKEY
ESCAPE_KEY = bge.events.ESCKEY
BACK_SPACE_KEY = bge.events.BACKSPACEKEY
JUST_ACTIVATED = bge.logic.KX_INPUT_JUST_ACTIVATED
#------------------------------------------------------------------------------#
class RestartApplication(Exception): pass
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
class EscapeApplication(Exception): pass
#------------------------------------------------------------------------------#
class Application(CallbackManager):
# NOTE: local->global: http://blenderartists.org/forum/archive/index.php/t-180690.html
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
@property
def text(self):
return self._text
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
@property
def hands(self):
return self._hands
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
@property
def surface(self):
return self._surface
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
@property
def vertex_origo(self):
return self._vertex_origo
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def __init__(self, mounted_on_desk, *args, **kwargs):
super().__init__(*args, **kwargs)
# Remove current exit key (make it available for customised setting)
# HACK: There is no 'NONE_KEY' defined, so setting the escape key to 0
# could cause undefined behaviour, as it is undocomunted. During
# the tests, this setting did not activate any of the keys, so it
# is a working work-around. (At least on Arch Linux)
bge.logic.setExitKey(0)
# Place the window
window_command = ['sleep 1']
window_command.append('wmctrl -r :ACTIVE: '
'-e 0,{},{},{},{}'.format(WINDOW_DISPLAY_X,
WINDOW_DISPLAY_Y,
WINDOW_RESOLUTION_X,
WINDOW_RESOLUTION_Y))
if WINDOW_FULL_SCREEN:
window_command.append('wmctrl -r :ACTIVE: -b add,fullscreen')
Popen(args = ' && '.join(window_command),
shell = True,
stdin = PIPE,
stderr = PIPE,
universal_newlines=True)
# Create folder structures if they don't exists yet
makedirs(INT_TEMPORARY_FOLDER, exist_ok=True)
makedirs(INT_PERMANENT_FOLDER, exist_ok=True)
makedirs(INT_TEMP_SAVE_FOLDER, exist_ok=True)
makedirs(INT_AUTO_SAVE_FOLDER, exist_ok=True)
## Start input-daemon
#self._lines_queue = Queue()
#def get_input():
# print('start')
# for line in iter(stdin.readline, ''):
# print('try')
# self._lines_queue.put(line)
# print('stop')
# stdin.close()
#Thread(name = 'inputd',
# target = get_input).start()
#self._should_restart = False
#self._should_shut_down = False
try:
# Create connection
self._connection = Connection(this_host=COMM_THIS_HOST,
this_port=COMM_THIS_PORT,
buffer_size=BUFFER_SIZE,
device=COMM_DEVICE_NAME)
self._connection.connect(other_host=COMM_OTHER_HOST,
other_port=COMM_OTHER_PORT)
# If connection is not imported
except NameError:
pass
# Create a new instance of the leap-motion controller
self._leap_controller = leap_controller = Leap.Controller()
# Create a new instance of the oculus-rift controller
self._rift_controller = oculus.OculusRiftDK2(head_factor =RIFT_MULTIPLIER,
head_shift_y=RIFT_POSITION_SHIFT_Y,
head_shift_z=RIFT_POSITION_SHIFT_Z)
# Enable HMD optimisation
if MOUNTED_ON_HEAD:
leap_controller.set_policy(Leap.Controller.POLICY_OPTIMIZE_HMD)
## Enable circle gesture
#leap_controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE)
## Configure circle gesture
#leap_controller.config.set("Gesture.Circle.MinRadius", 100.0)
#leap_controller.config.set("Gesture.Circle.MinArc", radians(359))
## Configure swipe gesture
#leap_controller.config.set("Gesture.Swipe.MinLength", 200.0)
#leap_controller.config.set("Gesture.Swipe.MinVelocity", 750)
#leap_controller.config.save()
# Create a reference to the blender scene
self._blender_scene = blender_scene = bge.logic.getCurrentScene()
bge.logic.addScene(OBJ_HUD_SCENE, 1)
# HUD scene has to be set up
self._preprocess = True
# Make references to blender objects
self._camera = blender_scene.active_camera
self._origo = blender_scene.objects[OBJ_GLOBAL]
# Create hands
self._hands = Hands(self._prototype_creator(OBJ_PROTOTYPE_FINGER))
# Create surface blender object from prototype and
# store its reference inside a Surface instance
# HACK: Surface arguments should be protoype_creator methods, instead of
# actual objects, but right now, prototyping the surface object
# with its armature and all bones are not copying.. or something
# like that..
self._surface = Surface(blender_scene.objects[OBJ_PROTOTYPE_SURFACE],
blender_scene.objects[OBJ_PROTOTYPE_VERTEX_ALL],
COLOR_GEOMETRY_DARK)
# TODO: fake casted shadow with negative lamp:
# https://www.youtube.com/watch?v=iJUlqwKEdVQ
# HACK: yuck.. this is getting out of hands now :(:(:(
self._vertex_origo = blender_scene.objects[OBJ_PROTOTYPE_VERTEX_ALL]
# EXPERIMENTAL
self._armature_control = blender_scene.objects[OBJ_ARMATURE_CONTROL]
self._armature = blender_scene.objects[OBJ_ARMATURE]
self._geometry = blender_scene.objects[OBJ_GEOMETRY]
# EXPERIMENTAL
# Set position setter
# If DESK
if mounted_on_desk:
self._positioner = self._positioner_on_desk
self._selector = self._select_right_hand_on_desk
# If HEAD
else:
self._positioner = self._positioner_on_head
self._selector = self._select_right_hand_on_head
# Last time saved
self._auto_save_time = self._origo[PROP_TEXT_TIMER]
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def reset_view(self):
orientation = Matrix(((1.0, 0.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 0.0, 1.0)))
self._armature_control.worldOrientation = orientation
self._armature.worldOrientation = orientation
self._vertex_origo.worldScale = 1, 1, 1
self._surface.update()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def save(self):
# Save created mesh
file_path = INT_TEMP_SAVE_FILE.format(datetime.now())
save_to_file(path=file_path, data=self._surface.serialise())
print('[OKAY] file has been saved to:', file_path)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def load(self):
try:
file_path = join(INT_TEMP_SAVE_FOLDER,
next(reversed(sorted(listdir(INT_TEMP_SAVE_FOLDER)))))
self._surface.deserialise(load_from_file(file_path))
print('[OKAY] file has been loaded from:', file_path)
except StopIteration:
print('[FAIL] there is no file in:', INT_TEMP_SAVE_FOLDER)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def auto_save(self):
current_time = self._origo[PROP_TEXT_TIMER]
if self._auto_save_time + INT_AUTO_SAVE_INTERVAL <= current_time:
# Update last-time checked value
self._auto_save_time = current_time
# Save created mesh
save_to_file(path=INT_AUTO_SAVE_FILE,
data=self._surface.serialise())
print('[OKAY] file has been auto-saved to:', INT_AUTO_SAVE_FILE)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def recover_from_auto_save(self):
self._auto_save_time = self._origo[PROP_TEXT_TIMER]
self._surface.deserialise(load_from_file(INT_AUTO_SAVE_FILE))
print('[OKAY] file has been recovered from:', INT_AUTO_SAVE_FILE)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def __call__(self):
# HACK: The ugliest hack I've ever done...
if self._preprocess:
try:
# Set HUD scene
self._blender_overlay_scene = bge.logic.getSceneList()[1]
# Stop preprocessing loops
self._preprocess = False
# Set text and its details
text_1st_obj = self._blender_overlay_scene.objects[OBJ_TEXT_FIRST]
text_nth_obj = self._blender_overlay_scene.objects[OBJ_TEXT_OTHER]
text_1st_obj.resolution = text_nth_obj.resolution = 10
# Create HUD messaging system
self._text = Text(text_first_object=text_1st_obj,
text_other_object=text_nth_obj,
time_getter=lambda: self._origo[PROP_TEXT_TIMER],
interval=INT_TEXT_INTERVAL)
except IndexError:
return
# Try to create backup
self.auto_save()
#try:
# print('input:', self._lines_queue.get_nowait())
#except Empty:
# pass
# Ste states
self.set_states(restart=COMM_RUNNING,
escape =APP_RUNNING)
# If user pressed the space bar => restart game
if bge.logic.keyboard.events[ESCAPE_KEY] == JUST_ACTIVATED:
self.set_states(escape=APP_ESCAPED)
elif bge.logic.keyboard.events[SPACE_KEY] == JUST_ACTIVATED:
self.set_states(restart=COMM_RESTART)
elif bge.logic.keyboard.events[R_KEY] == JUST_ACTIVATED:
self.recover_from_auto_save()
elif bge.logic.keyboard.events[S_KEY] == JUST_ACTIVATED:
self.save()
elif bge.logic.keyboard.events[L_KEY] == JUST_ACTIVATED:
self.load()
elif bge.logic.keyboard.events[HOME_KEY] == JUST_ACTIVATED:
self.reset_view()
elif bge.logic.keyboard.events[BACK_SPACE_KEY] == JUST_ACTIVATED:
self._text.clear()
# Get current values of oculus-rift
rift_frame = self._rift_controller.frame()
# Get current values of the leap-motion
leap_frame = self._leap_controller.frame()
# Set camera position and orientation
self._camera.worldPosition = rift_frame.position
self._camera.worldOrientation = \
RIFT_ORIENTATION_SHIFT*Quaternion(rift_frame.orientation)
# If leap was unable to get a proper frame
if not leap_frame.is_valid:
return print('(leap) Invalid frame', file=stderr)
# Update messaging system
self._text.update()
# If leap was able to get the frame set finger positions
selector = self._selector
positioner = self._positioner
try:
self.execute_all_callbacks()
#circle_cw = circle_ccw = False
#for gesture in leap_frame.gestures():
# if (gesture.type is Leap.Gesture.TYPE_CIRCLE and
# gesture.is_valid and
# gesture.state is Leap.Gesture.STATE_STOP):
# circle = Leap.CircleGesture(gesture)
# if (circle.pointable.direction.angle_to(circle.normal) <= Leap.PI/2):
# circle_ccw=True
# else:
# circle_cw=True
#self._hands.set_states(circle_cw=circle_cw,
# circle_ccw=circle_ccw)
for leap_hand in leap_frame.hands:
hand = selector(leap_hand.is_right)
# TODO: do I still need to set the states of these?
hand.set_states(hand=hand,
#circle_cw=circle_cw,
#circle_ccw=circle_ccw,
leap_hand=leap_hand)
for finger in leap_hand.fingers:
# TODO: positioner(*finger.tip_position) => leaking memory and never returns
hand.finger_by_leap(finger.type()).position = positioner(finger.tip_position)
hand.execute_all_callbacks()
self._hands.execute_all_callbacks()
except EscapeApplication:
self._clean_up()
bge.logic.endGame()
except RestartApplication:
self._clean_up()
bge.logic.restartGame()
# TODO: use `leap_frame.images` as background
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def _select_right_hand_on_head(self, is_left):
return self._hands.left if is_left else self._hands.right
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def _select_right_hand_on_desk(self, is_right):
return self._hands.right if is_right else self._hands.left
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def _positioner_on_head(self, position):
# The USB cable is on the right side and
# the indicator light is on the top
return (position[0] * -LEAP_MULTIPLIER,
position[1] * LEAP_MULTIPLIER - 10,
position[2] * -LEAP_MULTIPLIER + 10)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def _positioner_on_desk(self, position):
# The USB cable is on the right side and
# the indicator light is at the back
return (position[0] * LEAP_MULTIPLIER,
position[2] * -LEAP_MULTIPLIER,
position[1] * LEAP_MULTIPLIER -10)#-25)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def _prototype_creator(self, prototype):
def creator(**preferences):
object = self._blender_scene.addObject(prototype, OBJ_GLOBAL)
for preference, value in preferences.items():
setattr(object, preference, value)
return object
return creator
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def _clean_up(self):
# Close connection if app is paired
try:
self.connection.stop()
except AttributeError:
pass
# Save created mesh
self.save()
| kitchenbudapest/vr | app.py | Python | gpl-3.0 | 20,905 | [
"VisIt"
] | a21719180d39b1877063715278a4380a087cff01fd7a37792b3875ebec2c7f1c |
"""
TODO
"""
import sys
import logging
import numpy
import numpy as np
from theano import function, shared
from theano import tensor as TT
import theano
sharedX = lambda X, name : \
shared(numpy.asarray(X, dtype=theano.config.floatX), name=name)
def kinetic_energy(vel):
"""
Returns the kinetic energy associated with the given velocity and mass of 1.
Parameters
----------
vel: theano matrix
Symbolic matrix whose rows are velocity vectors.
Returns
-------
return: theano vector
Vector whose i-th entry is the kinetic entry associated with vel[i].
"""
return 0.5 * (vel**2).sum(axis=1)
def hamiltonian(pos, vel, energy_fn):
"""
Returns the Hamiltonian (sum of potential and kinetic energy) for the given
velocity and position.
Parameters
----------
pos: theano matrix
Symbolic matrix whose rows are position vectors.
vel: theano matrix
Symbolic matrix whose rows are velocity vectors.
energy_fn: python function
Python function, operating on symbolic theano variables, used to compute
the potential energy at a given position.
Returns
-------
return: theano vector
Vector whose i-th entry is the Hamiltonian at position pos[i] and
velocity vel[i].
"""
# assuming mass is 1
return energy_fn(pos) + kinetic_energy(vel)
def metropolis_hastings_accept(energy_prev, energy_next, s_rng):
"""
Performs a Metropolis-Hastings accept-reject move.
Parameters
----------
energy_prev: theano vector
Symbolic theano tensor which contains the energy associated with the
configuration at time-step t.
energy_next: theano vector
Symbolic theano tensor which contains the energy associated with the
proposed configuration at time-step t+1.
s_rng: theano.tensor.shared_randomstreams.RandomStreams
Theano shared random stream object used to generate the random number
used in proposal.
Returns
-------
return: boolean
True if move is accepted, False otherwise
"""
ediff = energy_prev - energy_next
return (TT.exp(ediff) - s_rng.uniform(size=energy_prev.shape)) >= 0
def simulate_dynamics(initial_pos, initial_vel, stepsize, n_steps, energy_fn):
"""
Return final (position, velocity) obtained after an `n_steps` leapfrog
updates, using Hamiltonian dynamics.
Parameters
----------
initial_pos: shared theano matrix
Initial position at which to start the simulation
initial_vel: shared theano matrix
Initial velocity of particles
stepsize: shared theano scalar
Scalar value controlling amount by which to move
energy_fn: python function
Python function, operating on symbolic theano variables, used to compute
the potential energy at a given position.
Returns
-------
rval1: theano matrix
Final positions obtained after simulation
rval2: theano matrix
Final velocity obtained after simulation
"""
def leapfrog(pos, vel, step):
"""
Inside loop of Scan. Performs one step of leapfrog update, using
Hamiltonian dynamics.
Parameters
----------
pos: theano matrix
in leapfrog update equations, represents pos(t), position at time t
vel: theano matrix
in leapfrog update equations, represents vel(t - stepsize/2),
velocity at time (t - stepsize/2)
step: theano scalar
scalar value controlling amount by which to move
Returns
-------
rval1: [theano matrix, theano matrix]
Symbolic theano matrices for new position pos(t + stepsize), and
velocity vel(t + stepsize/2)
rval2: dictionary
Dictionary of updates for the Scan Op
"""
# from pos(t) and vel(t-stepsize/2), compute vel(t+stepsize/2)
dE_dpos = TT.grad(energy_fn(pos).sum(), pos)
new_vel = vel - step * dE_dpos
# from vel(t+stepsize/2) compute pos(t+stepsize)
new_pos = pos + step * new_vel
return [new_pos, new_vel],{}
# compute velocity at time-step: t + stepsize/2
initial_energy = energy_fn(initial_pos)
dE_dpos = TT.grad(initial_energy.sum(), initial_pos)
vel_half_step = initial_vel - 0.5*stepsize*dE_dpos
# compute position at time-step: t + stepsize
pos_full_step = initial_pos + stepsize * vel_half_step
# perform leapfrog updates: the scan op is used to repeatedly compute
# vel(t + (m-1/2)*stepsize) and pos(t + m*stepsize) for m in [2,n_steps].
(final_pos, final_vel), scan_updates = theano.scan(leapfrog,
outputs_info=[
dict(initial=pos_full_step, return_steps=1),
dict(initial=vel_half_step, return_steps=1),
],
non_sequences=[stepsize],
n_steps=n_steps-1)
# The last velocity returned by scan is vel(t + (n_steps-1/2)*stepsize)
# We therefore perform one more half-step to return vel(t + n_steps*stepsize)
energy = energy_fn(final_pos)
final_vel = final_vel - 0.5 * stepsize * TT.grad(energy.sum(), final_pos)
# return new proposal state
return final_pos, final_vel
def hmc_move(s_rng, positions, energy_fn, stepsize, n_steps):
"""
This function performs one-step of Hybrid Monte-Carlo sampling. We start by
sampling a random velocity from a univariate Gaussian distribution, perform
`n_steps` leap-frog updates using Hamiltonian dynamics and accept-reject
using Metropolis-Hastings.
Parameters
----------
s_rng: theano shared random stream
Symbolic random number generator used to draw random velocity and
perform accept-reject move.
positions: shared theano matrix
Symbolic matrix whose rows are position vectors.
energy_fn: python function
Python function, operating on symbolic theano variables, used to compute
the potential energy at a given position.
stepsize: shared theano scalar
Shared variable containing the stepsize to use for `n_steps` of HMC
simulation steps.
n_steps: integer
Number of HMC steps to perform before proposing a new position.
Returns
-------
rval1: boolean
True if move is accepted, False otherwise
rval2: theano matrix
Matrix whose rows contain the proposed "new position"
"""
# sample random velocity
initial_vel = s_rng.normal(size=positions.shape)
# perform simulation of particles subject to Hamiltonian dynamics
final_pos, final_vel = simulate_dynamics(
initial_pos = positions,
initial_vel = initial_vel,
stepsize = stepsize,
n_steps = n_steps,
energy_fn = energy_fn)
# accept/reject the proposed move based on the joint distribution
accept = metropolis_hastings_accept(
energy_prev = hamiltonian(positions, initial_vel, energy_fn),
energy_next = hamiltonian(final_pos, final_vel, energy_fn),
s_rng=s_rng)
return accept, final_pos
def hmc_updates(positions, stepsize, avg_acceptance_rate, final_pos, accept,
target_acceptance_rate, stepsize_inc, stepsize_dec,
stepsize_min, stepsize_max, avg_acceptance_slowness):
"""
This function is executed after `n_steps` of HMC sampling (`hmc_move`
function). It creates the updates dictionary used by the `simulate`
function. It takes care of updating: the position (if the move is accepted),
the stepsize (to track a given target acceptance rate) and the average
acceptance rate (computed as a moving average).
Parameters
----------
positions: shared variable, theano matrix
Shared theano matrix whose rows contain the old position
stepsize: shared variable, theano scalar
Shared theano scalar containing current step size
avg_acceptance_rate: shared variable, theano scalar
Shared theano scalar containing the current average acceptance rate
final_pos: shared variable, theano matrix
Shared theano matrix whose rows contain the new position
accept: theano scalar
Boolean-type variable representing whether or not the proposed HMC move
should be accepted or not.
target_acceptance_rate: float
The stepsize is modified in order to track this target acceptance rate.
stepsize_inc: float
Amount by which to increment stepsize when acceptance rate is too high.
stepsize_dec: float
Amount by which to decrement stepsize when acceptance rate is too low.
stepsize_min: float
Lower-bound on `stepsize`.
stepsize_min: float
Upper-bound on `stepsize`.
avg_acceptance_slowness: float
Average acceptance rate is computed as an exponential moving average.
(1-avg_acceptance_slowness) is the weight given to the newest
observation.
Returns
-------
rval1: dictionary-like
A dictionary of updates to be used by the `HMC_Sampler.simulate`
function. The updates target the position, stepsize and average
acceptance rate.
"""
## POSITION UPDATES ##
# broadcast `accept` scalar to tensor with the same dimensions as final_pos.
accept_matrix = accept.dimshuffle(0, *(('x',)*(final_pos.ndim-1)))
# if accept is True, update to `final_pos` else stay put
new_positions = TT.switch(accept_matrix, final_pos, positions)
## STEPSIZE UPDATES ##
# if acceptance rate is too low, our sampler is too "noisy" and we reduce
# the stepsize. If it is too high, our sampler is too conservative, we can
# get away with a larger stepsize (resulting in better mixing).
_new_stepsize = TT.switch(avg_acceptance_rate > target_acceptance_rate,
stepsize * stepsize_inc, stepsize * stepsize_dec)
# maintain stepsize in [stepsize_min, stepsize_max]
new_stepsize = TT.clip(_new_stepsize, stepsize_min, stepsize_max)
## ACCEPT RATE UPDATES ##
# perform exponential moving average
new_acceptance_rate = TT.add(
avg_acceptance_slowness * avg_acceptance_rate,
(1.0 - avg_acceptance_slowness) * accept.mean())
return [(positions, new_positions),
(stepsize, new_stepsize),
(avg_acceptance_rate, new_acceptance_rate)]
class HMC_sampler(object):
"""
Convenience wrapper for performing Hybrid Monte Carlo (HMC). It creates the
symbolic graph for performing an HMC simulation (using `hmc_move` and
`hmc_updates`). The graph is then compiled into the `simulate` function, a
theano function which runs the simulation and updates the required shared
variables.
Users should interface with the sampler thorugh the `draw` function which
advances the markov chain and returns the current sample by calling
`simulate` and `get_position` in sequence.
The hyper-parameters are the same as those used by Marc'Aurelio's
'train_mcRBM.py' file (available on his personal home page).
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@classmethod
def new_from_shared_positions(cls, shared_positions, energy_fn,
initial_stepsize=0.01, target_acceptance_rate=.9, n_steps=20,
stepsize_dec = 0.98,
stepsize_min = 0.001,
stepsize_max = 0.25,
stepsize_inc = 1.02,
avg_acceptance_slowness = 0.9, # used in geometric avg. 1.0 would be not moving at all
seed=12345):
"""
:param shared_positions: theano ndarray shared var with many particle [initial] positions
:param energy_fn:
callable such that energy_fn(positions)
returns theano vector of energies.
The len of this vector is the batchsize.
The sum of this energy vector must be differentiable (with theano.tensor.grad) with
respect to the positions for HMC sampling to work.
"""
batchsize = shared_positions.shape[0]
# allocate shared variables
stepsize = sharedX(initial_stepsize, 'hmc_stepsize')
avg_acceptance_rate = sharedX(target_acceptance_rate, 'avg_acceptance_rate')
s_rng = TT.shared_randomstreams.RandomStreams(seed)
# define graph for an `n_steps` HMC simulation
accept, final_pos = hmc_move(
s_rng,
shared_positions,
energy_fn,
stepsize,
n_steps)
# define the dictionary of updates, to apply on every `simulate` call
simulate_updates = hmc_updates(
shared_positions,
stepsize,
avg_acceptance_rate,
final_pos=final_pos,
accept=accept,
stepsize_min=stepsize_min,
stepsize_max=stepsize_max,
stepsize_inc=stepsize_inc,
stepsize_dec=stepsize_dec,
target_acceptance_rate=target_acceptance_rate,
avg_acceptance_slowness=avg_acceptance_slowness)
# compile theano function
simulate = function([], [], updates=simulate_updates)
# create HMC_sampler object with the following attributes ...
return cls(
positions=shared_positions,
stepsize=stepsize,
stepsize_min=stepsize_min,
stepsize_max=stepsize_max,
avg_acceptance_rate=avg_acceptance_rate,
target_acceptance_rate=target_acceptance_rate,
s_rng=s_rng,
_updates=simulate_updates,
simulate=simulate)
def draw(self, **kwargs):
"""
Returns a new position obtained after `n_steps` of HMC simulation.
Parameters
----------
kwargs: dictionary
The `kwargs` dictionary is passed to the shared variable
(self.positions) `get_value()` function. For example, to avoid
copying the shared variable value, consider passing `borrow=True`.
Returns
-------
rval: numpy matrix
Numpy matrix whose of dimensions similar to `initial_position`.
"""
self.simulate()
return self.positions.value.copy()
| NoSRPKU/GradD | codes/mcrbm/hmc.py | Python | mit | 14,485 | [
"Gaussian"
] | 70f5616ff5ee7bf5c7a22f1c03acffea1383449e190a05b613029c8f7e1d7ddc |
import re
from blaze import resource, DataFrame
import pandas as pd
from snakemakelib.odo.pandas import annotate_by_uri
@resource.register('.+htseq.counts')
@annotate_by_uri
def resource_fastqc_summary(uri, **kwargs):
with open(uri):
data = pd.read_csv(uri, sep="\t", header=None, names=["FBgn", "count"], index_col=["FBgn"])
return DataFrame(data)
| Oliver-Lab/snakemakelib-oliver | snakemakelib_oliver/odo/htseq.py | Python | mit | 367 | [
"HTSeq"
] | 120475076b9fdf7516bd6c5444ea51619c66cbc309494d41c7b22ff1eba1793b |
r"""
I/O utils (:mod:`skbio.io.util`)
================================
.. currentmodule:: skbio.io.util
This module provides utility functions to deal with files and I/O in
general.
Functions
---------
.. autosummary::
:toctree: generated/
open
open_file
open_files
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import io
from contextlib2 import contextmanager, ExitStack
from skbio.io import IOSourceError
from skbio.io._iosources import get_io_sources, get_compression_handler
from skbio.io._fileobject import (
is_binary_file, SaneTextIOWrapper, CompressedBufferedReader,
CompressedBufferedWriter)
from skbio.util._decorator import stable
_d = dict(mode='r', encoding=None, errors=None, newline=None,
compression='auto', compresslevel=9)
def _resolve(file, mode=_d['mode'], encoding=_d['encoding'],
errors=_d['errors'], newline=_d['newline'],
compression=_d['compression'], compresslevel=_d['compresslevel']):
arguments = locals().copy()
if mode not in {'r', 'w'}:
raise ValueError("Unsupported mode: %r, use 'r' or 'w'" % mode)
newfile = None
source = None
for source_handler in get_io_sources():
source = source_handler(file, arguments)
if mode == 'r' and source.can_read():
newfile = source.get_reader()
break
elif mode == 'w' and source.can_write():
newfile = source.get_writer()
break
if newfile is None:
raise IOSourceError(
"Could not open source: %r (mode: %r)" % (file, mode))
return newfile, source, is_binary_file(newfile)
@stable(as_of="0.4.0")
def open(file, mode=_d['mode'], encoding=_d['encoding'], errors=_d['errors'],
newline=_d['newline'], compression=_d['compression'],
compresslevel=_d['compresslevel']):
r"""Convert input into a filehandle.
Supported inputs:
+----------------------------+----------+-----------+-------------+
| type | can read | can write | source type |
+============================+==========+===========+=============+
| file path | True | True | Binary |
+----------------------------+----------+-----------+-------------+
| URL | True | False | Binary |
+----------------------------+----------+-----------+-------------+
| ``[u"lines list\n"]`` | True | True | Text |
+----------------------------+----------+-----------+-------------+
| :class:`io.StringIO` | True | True | Text |
+----------------------------+----------+-----------+-------------+
| :class:`io.BytesIO` | True | True | Binary |
+----------------------------+----------+-----------+-------------+
| :class:`io.TextIOWrapper` | True | True | Text |
+----------------------------+----------+-----------+-------------+
| :class:`io.BufferedReader` | True | False | Binary |
+----------------------------+----------+-----------+-------------+
| :class:`io.BufferedWriter` | False | True | Binary |
+----------------------------+----------+-----------+-------------+
| :class:`io.BufferedRandom` | True | True | Binary |
+----------------------------+----------+-----------+-------------+
.. note:: Filehandles opened with ``open`` in Python 2 are **not**
supported. Use ``io.open`` if you need to pass a filehandle.
.. note:: When reading a list of unicode (str) lines, the input for
`newline` is used to determine the number of lines in the resulting file
handle, not the number of elements in the list. This is to allow
composition with ``file.readlines()``.
Parameters
----------
file : filepath, url, filehandle, list
The input to convert to a filehandle.
mode : {'r', 'w'}, optional
Whether to return a readable or writable file. Conversely, this does
not imply that the returned file will be unwritable or unreadable.
To geta binary filehandle set `encoding` to binary.
encoding : str, optional
The encoding scheme to use for the file. If set to 'binary', no bytes
will be translated. Otherwise this matches the behavior of
:func:`io.open`.
errors : str, optional
Specifies how encoding and decoding errors are to be handled. This has
no effect when `encoding` is binary (as there can be no errors).
Otherwise this matches the behavior of :func:`io.open`.
newline : {None, "", '\\n', '\\r\\n', '\\r'}, optional
Matches the behavior of :func:`io.open`.
compression : {'auto', 'gzip', 'bz2', None}, optional
Will compress or decompress `file` depending on `mode`. If 'auto' then
determining the compression of the file will be attempted and the
result will be transparently decompressed. 'auto' will do nothing
when writing. Other legal values will use their respective compression
schemes. `compression` cannot be used with a text source.
compresslevel : int (0-9 inclusive), optional
The level of compression to use, will be passed to the appropriate
compression handler. This is only used when writing.
Returns
-------
filehandle : io.TextIOBase or io.BufferedReader/Writer
When `encoding='binary'` an :class:`io.BufferedReader` or
:class:`io.BufferedWriter` will be returned depending on `mode`.
Otherwise an implementation of :class:`io.TextIOBase` will be returned.
.. note:: Any underlying resources needed to create `filehandle` are
managed transparently. If `file` was closeable, garbage collection
of `filehandle` will not close `file`. Calling `close` on
`filehandle` will close `file`. Conversely calling `close` on `file`
will cause `filehandle` to reflect a closed state. **This does not
mean that a `flush` has occured for `filehandle`, there may still
have been data in its buffer! Additionally, resources may not have
been cleaned up properly, so ALWAYS call `close` on `filehandle` and
NOT on `file`.**
"""
arguments = locals().copy()
del arguments['file']
file, _, is_binary_file = _resolve(file, **arguments)
return _munge_file(file, is_binary_file, arguments)
def _munge_file(file, is_binary_file, arguments):
mode = arguments.get('mode', _d['mode'])
encoding = arguments.get('encoding', _d['encoding'])
errors = arguments.get('errors', _d['errors'])
newline = arguments.get('newline', _d['newline'])
compression = arguments.get('compression', _d['compression'])
is_output_binary = encoding == 'binary'
newfile = file
compression_handler = get_compression_handler(compression)
if is_output_binary and newline is not _d['newline']:
raise ValueError("Cannot use `newline` with binary encoding.")
if compression is not None and not compression_handler:
raise ValueError("Unsupported compression: %r" % compression)
if is_binary_file:
if compression:
c = compression_handler(newfile, arguments)
if mode == 'w':
newfile = CompressedBufferedWriter(file, c.get_writer(),
streamable=c.streamable)
else:
newfile = CompressedBufferedReader(file, c.get_reader())
if not is_output_binary:
newfile = SaneTextIOWrapper(newfile, encoding=encoding,
errors=errors, newline=newline)
else:
if compression is not None and compression != 'auto':
raise ValueError("Cannot use compression with that source.")
if is_output_binary:
raise ValueError("Source is not a binary source")
return newfile
@contextmanager
def _resolve_file(file, **kwargs):
file, source, is_binary_file = _resolve(file, **kwargs)
try:
yield file, source, is_binary_file
finally:
if source.closeable:
file.close()
@contextmanager
@stable(as_of="0.4.0")
def open_file(file, **kwargs):
r"""Context manager for :func:`skbio.io.util.open`.
The signature matches :func:`open`. This context manager will not close
filehandles that it did not create itself.
Examples
--------
Here our input isn't a filehandle and so `f` will get closed.
>>> with open_file(['a\n']) as f:
... f.read()
...
'a\n'
>>> f.closed
True
Here we provide an open file and so `f` will not get closed and neither
will `file`.
>>> file = io.BytesIO(b'BZh91AY&SY\x03\x89\x0c\xa6\x00\x00\x01\xc1\x00\x00'
... b'\x108\x00 \x00!\x9ah3M\x1c\xb7\x8b\xb9"\x9c(H\x01'
... b'\xc4\x86S\x00')
>>> with open_file(file) as f:
... f.read()
...
'a\nb\nc\n'
>>> f.closed
False
>>> file.closed
False
"""
with _resolve_file(file, **kwargs) as (file, source, is_binary_file):
newfile = _munge_file(file, is_binary_file, source.options)
try:
yield newfile
finally:
# As soon as we leave the above context manager file will be closed
# It is important to realize that because we are closing an inner
# buffer, the outer buffer will reflect that state, but it won't
# get flushed as the inner buffer is oblivious to the outer
# buffer's existence.
if not newfile.closed:
newfile.flush()
_flush_compressor(newfile)
def _flush_compressor(file):
if isinstance(file, io.TextIOBase) and hasattr(file, 'buffer'):
file = file.buffer
if isinstance(file, CompressedBufferedWriter) and not file.streamable:
# Some formats like BZ2 compress the entire file, and so they will
# only flush once they have been closed. These kinds of files do not
# close their underlying buffer, but only testing can prove that...
file.raw.close()
@contextmanager
@stable(as_of="0.4.0")
def open_files(files, **kwargs):
"""A plural form of :func:`open_file`."""
with ExitStack() as stack:
yield [stack.enter_context(open_file(f, **kwargs)) for f in files]
| xguse/scikit-bio | skbio/io/util.py | Python | bsd-3-clause | 10,848 | [
"scikit-bio"
] | fea9c5aff72bcf43b11bf155a37d9535630510f6b7248a93418f6e5035c667ea |
"""Contains AssetManager, AssetLoader, and Asset parent classes"""
# assets.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
import os
import threading
import copy
from Queue import PriorityQueue
import sys
import traceback
from mpf.system.config import CaseInsensitiveDict
class AssetLoader(threading.Thread):
"""Base class for the Asset Loader with runs as a separate thread and
actually loads the assets from disk.
Args:
name: String name of what this loader will be called. (Only really used
to give a friendly name to it in logs.)
queue: A reference to the asset loader ``Queue`` which holds assets
waiting to be loaded.
machine: The main ``MachineController`` object.
"""
def __init__(self, queue):
threading.Thread.__init__(self)
self.log = logging.getLogger('Asset Loader')
self.queue = queue
def run(self):
"""Run loop for the loader thread."""
try:
while True:
asset = self.queue.get()
if not asset[1].loaded:
self.log.debug("Loading Asset: %s. Callback: %s", asset[1],
asset[2])
asset[1].do_load(asset[2])
self.log.debug("Asset Finished Loading: %s. Remaining: %s",
asset[1], self.queue.qsize())
# If the asset is already loaded and we don't need to load it
# again, we still need to call the callback.
elif asset[2]:
self.log.debug("Calling callback for asset %s since it's "
"already loaded. Callback: %s", asset[1],
asset[2])
asset[2]()
# AssetManager.remove_asset_to_load()
# If the asset is already loaded, just ignore it and move on.
# I thought about trying to make sure that an asset isn't
# in the queue before it gets added. But since this is separate
# threads that would require all sorts of work. It's actually
# more efficient to add it to the queue anyway and then just
# skip it if it's already loaded by the time the loader gets to
# it.
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
msg = ''.join(line for line in lines)
self.exception_queue.put(msg)
class AssetManager(object):
"""Base class for an Asset Manager.
Args:
machine: The main ``MachineController`` object.
config_section: String of the name of the section in the config file
for the asset settings that this Asset Manager will machine. e.g.
'image'.
path_string: The setting in the paths section of the config file that
specifies what path these asset files are in. e.g. 'images'.
asset_class: A class object of the base class for the assets that this
Asset Manager will manage. e.g. Image.
asset_attribute: The string name that you want to refer to this asset
collection as. e.g. a value of 'images' means that assets will be
accessible via ``self.machine.images``.
file_extensions: A tuple of strings of valid file extensions that files
for this asset will use. e.g. ``('png', 'jpg', 'jpeg', 'bmp')``
There will be one Asset Manager for each different type of asset. (e.g. one
for images, one for movies, one for sounds, etc.)
All asset managers share a single loader thread.
"""
total_assets = 0
loader_queue = PriorityQueue()
loader_thread = AssetLoader(loader_queue)
loader_thread.daemon = True
loader_thread.start()
def __init__(self, machine, config_section, path_string, asset_class,
asset_attribute, file_extensions):
self.log = logging.getLogger(config_section + ' Asset Manager')
self.log.debug("Initializing...")
self.machine = machine
self.loader_thread.exception_queue = self.machine.crash_queue
self.max_memory = None
self.registered_assets = set()
self.path_string = path_string
self.config_section = config_section
self.asset_class = asset_class
self.file_extensions = file_extensions
self.machine.asset_managers[config_section] = self
if not hasattr(self.machine, asset_attribute):
setattr(self.machine, asset_attribute, CaseInsensitiveDict())
self.asset_list = getattr(self.machine, asset_attribute)
self.machine.mode_controller.register_load_method(self.load_assets,
self.config_section,
load_key='preload',
priority=asset_class.load_priority)
self.machine.mode_controller.register_start_method(self.load_assets,
self.config_section,
load_key='mode_start',
priority=asset_class.load_priority)
# register & load systemwide assets
self.machine.events.add_handler('init_phase_4',
self.register_and_load_machine_assets,
priority=self.asset_class.load_priority)
self.defaults = self.setup_defaults(self.machine.config)
def process_assets_from_disk(self, config, path=None):
"""Looks at a path and finds all the assets in the folder.
Looks in a subfolder based on the asset's path string.
Crawls subfolders too. The first subfolder it finds is used for the
asset's default config section.
If an asset has a related entry in the config file, it will create
the asset with that config. Otherwise it uses the default
Args:
config: A dictionary which contains a list of asset names with
settings that will be used for the specific asset. (Note this
is not needed for all assets, as any asset file found not in the
config dictionary will be set up with the folder it was found
in's asset_defaults settings.)
path: A full system path to the root folder that will be searched
for assetsk. This should *not* include the asset-specific path
string. If omitted, only the machine's root folder will be
searched.
"""
if not path:
path = self.machine.machine_path
if not config:
config = dict()
root_path = os.path.join(path, self.path_string)
self.log.debug("Processing assets from base folder: %s", root_path)
for path, _, files in os.walk(root_path, followlinks=True):
valid_files = [f for f in files if f.endswith(self.file_extensions)]
for file_name in valid_files:
folder = os.path.basename(path)
name = os.path.splitext(file_name)[0].lower()
full_file_path = os.path.join(path, file_name)
if folder == self.path_string or folder not in self.defaults:
default_string = 'default'
else:
default_string = folder
#print "------"
#print "path:", path
#print "full_path", full_file_path
#print "file:", file_name
#print "name:", name
#print "folder:", folder
#print "default settings name:", default_string
#print "default settings:", self.defaults[default_string]
built_up_config = copy.deepcopy(self.defaults[default_string])
for k, v in config.iteritems():
if ('file' in v and v['file'] == file_name) or name == k:
if name != k:
name = k
#print "NEW NAME:", name
built_up_config.update(config[k])
break
built_up_config['file'] = full_file_path
config[name] = built_up_config
self.log.debug("Registering Asset: %s, File: %s, Default Group:"
" %s, Final Config: %s", name, file_name,
default_string, built_up_config)
return config
def register_and_load_machine_assets(self):
"""Called on MPF boot to register any assets found in the machine-wide
configuration files. (i.e. any assets not specified in mode config
files.)
If an asset is set with the load type of 'preload', this method will
also load the asset file into memory.
"""
self.log.debug("Registering machine-wide %s", self.config_section)
if self.config_section in self.machine.config:
config = self.machine.config[self.config_section]
else:
config = None
self.machine.config[self.config_section] = self.register_assets(
config=config)
self.log.debug("Loading machine-wide 'preload' %s", self.config_section)
# Load preload systemwide assets
self.load_assets(self.machine.config[self.config_section],
load_key='preload')
def setup_defaults(self, config):
"""Processed the ``asset_defaults`` section of the machine config
files.
"""
default_config_dict = dict()
if 'asset_defaults' in config and config['asset_defaults']:
if (self.config_section in config['asset_defaults'] and
config['asset_defaults'][self.config_section]):
this_config = config['asset_defaults'][self.config_section]
# set the default
default_config_dict['default'] = this_config.pop('default')
for default_section_name in this_config:
# first get a copy of the default for this section
default_config_dict[default_section_name] = (
copy.deepcopy(default_config_dict['default']))
# then merge in this section's specific settings
default_config_dict[default_section_name].update(
this_config[default_section_name])
return default_config_dict
def register_assets(self, config, mode_path=None):
"""Scans a config dictionary and registers any asset entries it finds.
Args:
config: A dictionary of asset entries. This dictionary needs to
be "localized" to just the section for this particular
asset type. e.g. if you're loading "Images" the keys of this
dictionary should be image_1, image_2, etc., not "Images".
mode_path: The full path to the base folder that will be
seaerched for the asset file on disk. This folder should
*not* include the asset-specific folder. If omitted, the
base machine folder will be searched.
Note that this method merely registers the assets so they can be
referenced in MPF. It does not actually load the asset files into
memory.
"""
# config here is already localized
config = self.process_assets_from_disk(config=config, path=mode_path)
for asset in config:
if not os.path.isfile(config[asset]['file']):
config[asset]['file'] = self.locate_asset_file(
file_name=config[asset]['file'],
path=mode_path)
self.register_asset(asset=asset.lower(),
config=config[asset])
return config
def load_assets(self, config, mode=None, load_key=None, callback=None,
**kwargs):
"""Loads the assets from a config dictionary.
Args:
config: Dictionary that holds the assets to load.
mode: Not used. Included here since this method is registered as a
mode start handler.
load_key: String name of the load key which specifies which assets
should be loaded.
callback: Callback method which is called by each asset once it's
loaded.
**kwargs: Not used. Included to allow this method to be used as an
event handler.
The assets must already be registered in order for this method to work.
"""
# actually loads assets from a config file. Assumes that they've
# aleady been registered.
asset_set = set()
for asset in config:
if self.asset_list[asset].config['load'] == load_key:
self.asset_list[asset].load(callback=callback)
asset_set.add(self.asset_list[asset])
return self.unload_assets, asset_set
def register_asset(self, asset, config):
"""Registers an asset with the Asset Manager.
Args:
asset: String name of the asset to register.
config: Dictionary which contains settings for this asset.
Registering an asset is what makes it available to be used in the game.
Note that registering an asset is separate from loading an asset. All
assets will be registered on MPF boot, but they can be loaded and
unloaded as needed to save on memory.
"""
#file_name = self.locate_asset_file(config['file'], path)
#
## get the defaults based on the path name
#this_config = copy.deepcopy(self.defaults[default_config_name])
#this_config.update(config)
self.asset_list[asset] = self.asset_class(self.machine, config,
config['file'], self)
def unload_assets(self, asset_set):
"""Unloads assets from memory.
Args:
asset_set: A set (or any iterable) of Asset objects which will be
unloaded.
Unloading an asset does not de-register it. It's still available to be
used, but it's just unloaded from memory to save on memory.
"""
for asset in asset_set:
self.log.debug("Unloading asset: %s", asset.file_name)
asset.unload()
def load_asset(self, asset, callback, priority=10):
"""Loads an asset into memory.
Args:
asset: The Asset object to load.
callback: The callback that will be called once the asset has been
loaded by the loader thread.
priority: The relative loading priority of the asset. If there's a
queue of assets waiting to be loaded, this load request will be
inserted into the queue in a position based on its priority.
"""
self.loader_queue.put((-priority, asset, callback))
# priority above is negative so this becomes a LIFO queue
self.log.debug("Adding %s to loader queue at priority %s. New queue "
"size: %s", asset, priority, self.loader_queue.qsize())
AssetManager.total_assets += 1
def locate_asset_file(self, file_name, path=None):
"""Takes a file name and a root path and returns a link to the absolute
path of the file
Args:
file_name: String of the file name
path: root of the path to check (without the specific asset path
string)
Returns: String of the full path (path + file name) of the asset.
Note this method will add the path string between the path you pass and
the file. Also if it can't find the file in the path you pass, it will
look for the file in the machine root plus the path string location.
"""
if path:
path_list = [path]
else:
path_list = list()
path_list.append(self.machine.machine_path)
for path in path_list:
full_path = os.path.join(path, self.path_string, file_name)
if os.path.isfile(full_path):
return full_path
self.log.critical("Could not locate asset file '%s'. Quitting...",
file_name)
raise Exception()
class Asset(object):
load_priority = 100
"""Specifies the priority order that assets will be loaded in. (Higher
numbers load first.) This is useful because some assets are built on
others, so we need a way to ensure that certain asset classes are loaded
first. (e.g. images, sounds, and videos need to be loaded before shows.).
"""
def __init__(self, machine, config, file_name, asset_manager):
self.machine = machine
self.config = config
self.file_name = file_name
self.asset_manager = asset_manager
self.loaded = False
self._initialize_asset()
def load(self, callback=None):
self.asset_manager.load_asset(self, callback)
def do_load(self, callback):
pass
def unload(self):
self._unload()
self.loaded = False
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
| spierepf/mpf | mpf/system/assets.py | Python | mit | 18,843 | [
"Brian"
] | ce983609ad8991eac65a6a5f02c96f8524a8fe26e07ebac707c093c8471dccc9 |
"""
The B{0install list-feeds} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _
from zeroinstall.cmd import UsageError
from zeroinstall.injector import model
syntax = "URI"
def add_options(parser):
pass
def handle(config, options, args):
"""@type args: [str]"""
if len(args) != 1: raise UsageError()
uri = model.canonical_iface_uri(args[0])
iface = config.iface_cache.get_interface(uri)
if iface.extra_feeds:
for f in iface.extra_feeds:
print(f.uri)
else:
print(_("(no feeds)"))
| linuxmidhun/0install | zeroinstall/cmd/list_feeds.py | Python | lgpl-2.1 | 645 | [
"VisIt"
] | 168b2dc3fc54f2f3e5f00e80ad7222e6cae9122d9b74d67f9bacb68df98e6491 |
'''
A reporter class to prepare reports and network accounting plots.
Supports: packet loss rate (standard and magnified),
one-way delay, jitter, jitter over one-way delay
'''
__RCSID__ = "$Id: $"
from DIRAC import S_OK
from DIRAC.AccountingSystem.Client.Types.Network import Network
from DIRAC.AccountingSystem.private.Plotters.BaseReporter import BaseReporter
import numpy as np
class NetworkPlotter( BaseReporter ):
_typeName = "Network"
_typeKeyFields = [ dF[0] for dF in Network().definitionKeyFields ]
_reportPacketLossRateName = "Packet loss rate"
def _reportPacketLossRate( self, reportRequest ):
selectFields = ( self._getSelectStringForGrouping( reportRequest[ 'groupingFields' ] ) + ", %s, %s, 100 - SUM(%s)/SUM(%s), 100",
reportRequest[ 'groupingFields' ][1] + [ 'startTime', 'bucketLength', 'PacketLossRate', 'entriesInBucket' ]
)
retVal = self._getTimedData( reportRequest[ 'startTime' ],
reportRequest[ 'endTime' ],
selectFields,
reportRequest[ 'condDict' ],
reportRequest[ 'groupingFields' ],
{ 'convertToGranularity' : 'average' }
)
if not retVal[ 'OK' ]:
return retVal
dataDict, granularity = retVal[ 'Value' ]
self.stripDataField( dataDict, 0 )
return S_OK( { 'data' : dataDict, 'granularity' : granularity } )
def _plotPacketLossRate( self, reportRequest, plotInfo, filename ):
# prepare custom scale (10,20,...,100)
scale_data = dict( zip( range( 0, 101 ), range( 100, -1, -1 ) ) )
scale_ticks = range( 0, 101, 10 )
metadata = { 'title' : 'Packet loss rate by %s' % reportRequest[ 'grouping' ] ,
'starttime' : reportRequest[ 'startTime' ],
'endtime' : reportRequest[ 'endTime' ],
'span' : plotInfo[ 'granularity' ],
'graph_size' : 'large',
'reverse_labels' : True,
'scale_data' : scale_data,
'scale_ticks' : scale_ticks }
return self._generateQualityPlot( filename, plotInfo[ 'data' ], metadata )
_reportMagnifiedPacketLossRateName = "Packet loss rate (magnified)"
def _reportMagnifiedPacketLossRate( self, reportRequest ):
selectFields = ( self._getSelectStringForGrouping( reportRequest[ 'groupingFields' ] ) + ", %s, %s, 100 - IF(SUM(PacketLossRate)/SUM(entriesInBucket)*10 > 100, 100, SUM(PacketLossRate)/SUM(entriesInBucket)*10), 100",
reportRequest[ 'groupingFields' ][1] + [ 'startTime', 'bucketLength' ]
)
retVal = self._getTimedData( reportRequest[ 'startTime' ],
reportRequest[ 'endTime' ],
selectFields,
reportRequest[ 'condDict' ],
reportRequest[ 'groupingFields' ],
{ 'convertToGranularity' : 'average' }
)
if not retVal[ 'OK' ]:
return retVal
dataDict, granularity = retVal[ 'Value' ]
self.stripDataField( dataDict, 0 )
return S_OK( { 'data' : dataDict, 'granularity' : granularity } )
def _plotMagnifiedPacketLossRate( self, reportRequest, plotInfo, filename ):
# prepare custom scale (1..10, 100)
boundaries = list( np.arange( 0, 10, 0.1 ) )
boundaries.extend( range( 10, 110, 10 ) )
values = list( np.arange( 100, 0, -1 ) )
values.extend( [0] * 10 )
scale_data = dict( zip( boundaries, values ) )
scale_ticks = range( 0, 11 )
scale_ticks.append( 100 )
metadata = { 'title' : 'Magnified packet loss rate by %s' % reportRequest[ 'grouping' ] ,
'starttime' : reportRequest[ 'startTime' ],
'endtime' : reportRequest[ 'endTime' ],
'span' : plotInfo[ 'granularity' ],
'reverse_labels' : True,
'graph_size' : 'large',
'scale_data' : scale_data,
'scale_ticks' : scale_ticks }
return self._generateQualityPlot( filename, plotInfo[ 'data' ], metadata )
_reportAverageOneWayDelayName = "One-way delay (average)"
def _reportAverageOneWayDelay( self, reportRequest ):
selectFields = ( self._getSelectStringForGrouping( reportRequest[ 'groupingFields' ] ) + ", %s, %s, SUM(%s)/SUM(%s)",
reportRequest[ 'groupingFields' ][1] + [ 'startTime', 'bucketLength', 'OneWayDelay', 'entriesInBucket' ]
)
retVal = self._getTimedData( reportRequest[ 'startTime' ],
reportRequest[ 'endTime' ],
selectFields,
reportRequest[ 'condDict' ],
reportRequest[ 'groupingFields' ],
{ 'convertToGranularity' : 'average' }
)
if not retVal[ 'OK' ]:
return retVal
dataDict, granularity = retVal[ 'Value' ]
self.stripDataField( dataDict, 0 )
dataDict = self._fillWithZero( granularity, reportRequest[ 'startTime' ], reportRequest[ 'endTime' ], dataDict )
return S_OK( { 'data' : dataDict, 'granularity' : granularity, 'unit' : 'ms' } )
def _plotAverageOneWayDelay( self, reportRequest, plotInfo, filename ):
metadata = { 'title' : 'One-way delay by %s' % reportRequest[ 'grouping' ] ,
'ylabel' : plotInfo[ 'unit' ],
'starttime' : reportRequest[ 'startTime' ],
'endtime' : reportRequest[ 'endTime' ],
'graph_size' : 'large',
'span' : plotInfo[ 'granularity' ],
'sort_labels': 'avg_nozeros',
'legend_unit': plotInfo[ 'unit' ]
}
return self._generateStackedLinePlot( filename, plotInfo[ 'data' ], metadata )
_reportJitterName = "Jitter"
def _reportJitter( self, reportRequest ):
selectFields = ( self._getSelectStringForGrouping( reportRequest[ 'groupingFields' ] ) + ", %s, %s, SUM(%s)/SUM(%s)",
reportRequest[ 'groupingFields' ][1] + [ 'startTime', 'bucketLength', 'Jitter', 'entriesInBucket' ]
)
retVal = self._getTimedData( reportRequest[ 'startTime' ],
reportRequest[ 'endTime' ],
selectFields,
reportRequest[ 'condDict' ],
reportRequest[ 'groupingFields' ],
{ 'convertToGranularity' : 'average' }
)
if not retVal[ 'OK' ]:
return retVal
dataDict, granularity = retVal[ 'Value' ]
self.stripDataField( dataDict, 0 )
dataDict = self._fillWithZero( granularity, reportRequest[ 'startTime' ], reportRequest[ 'endTime' ], dataDict )
return S_OK( { 'data' : dataDict, 'granularity' : granularity, 'unit' : 'ms' } )
def _plotJitter( self, reportRequest, plotInfo, filename ):
metadata = { 'title' : 'Jitter by %s' % reportRequest[ 'grouping' ] ,
'ylabel' : plotInfo[ 'unit' ],
'starttime' : reportRequest[ 'startTime' ],
'endtime' : reportRequest[ 'endTime' ],
'graph_size' : 'large',
'span' : plotInfo[ 'granularity' ],
'sort_labels': 'avg_nozeros',
'legend_unit': plotInfo[ 'unit' ] }
return self._generateStackedLinePlot( filename, plotInfo[ 'data' ], metadata )
_reportJitterDelayRatioName = "Jitter/Delay"
def _reportJitterDelayRatio( self, reportRequest ):
selectFields = ( self._getSelectStringForGrouping( reportRequest[ 'groupingFields' ] ) + ", %s, %s, SUM(%s)/SUM(%s)",
reportRequest[ 'groupingFields' ][1] + [ 'startTime', 'bucketLength', 'Jitter', 'OneWayDelay' ]
)
retVal = self._getTimedData( reportRequest[ 'startTime' ],
reportRequest[ 'endTime' ],
selectFields,
reportRequest[ 'condDict' ],
reportRequest[ 'groupingFields' ],
{ 'convertToGranularity' : 'average' }
)
if not retVal[ 'OK' ]:
return retVal
dataDict, granularity = retVal[ 'Value' ]
self.stripDataField( dataDict, 0 )
dataDict = self._fillWithZero( granularity, reportRequest[ 'startTime' ], reportRequest[ 'endTime' ], dataDict )
return S_OK( { 'data' : dataDict, 'granularity' : granularity } )
def _plotJitterDelayRatio( self, reportRequest, plotInfo, filename ):
metadata = { 'title' : 'Jitter over one-way delay by %s' % reportRequest[ 'grouping' ],
'ylabel' : '',
'starttime' : reportRequest[ 'startTime' ],
'endtime' : reportRequest[ 'endTime' ],
'graph_size' : 'large',
'span' : plotInfo[ 'granularity' ],
'sort_labels': 'avg_nozeros',
'legend_unit': ''
}
return self._generateStackedLinePlot( filename, plotInfo[ 'data' ], metadata )
| andresailer/DIRAC | AccountingSystem/private/Plotters/NetworkPlotter.py | Python | gpl-3.0 | 9,348 | [
"DIRAC"
] | 91ef3a24005b8ff41524e294b310b5a301c58cbef7fddc8bf58c54bfccfd9253 |
def smhm(logMstar, redshift):
"""Convenience method to calculate halo masses using default SMHM relations.
Current default: smhm_rodriguez().
--- Inputs ---
logMstar = log of stellar mass of galaxies (can be list/array)
redshift = redshift of galaxies (can be list/array)
--- Returns ---
logM200c = M200 mass compared with critical density (numpy array)
"""
logMhalo = smhm_rodriguez(logMstar, redshift)
return logMhalo
def smhm_tinker(logMstar,redshift,h=0.7):
"""Calculate Tinker halo masses for given stellar masses.
This fit was done to Figure 10 of Tinker+ (2017) uses and assumed log M* = 0.18 dex. Below log M*=10.9 we match the power-law index from Velander+ (2014).
Defaults to h=0.7. """
import numpy as np
from astropy.modeling import models
# Tinker fit. This fit was done to Figure 10 of Tinker+ (2017) uses
# an assumed log M* = 0.18 dex. Below log M*=10.9, we match the power-law
# index from Velander+ (2014).
tinker_smhm_fit = models.Chebyshev1D(5, c0=12.81806600627276,
c1=1.2018634412571902,
c2=0.013199452285390979, c3=0.01775015568831073,
c4=-0.029254096888480078, c5=-0.025509308396318747,
domain=[10.3, 12.19191])
#########################
# If only one redshift, duplicate it.
num_gals = np.size(logMstar)
if (num_gals != 1) & (np.size(redshift) == 1):
zzz = np.full_like(logMstar,redshift)
else:
zzz = redshift
# Calculate halo masses with the Tinker fit. The h^-1 correction
# is applied here following the discussion in their paper.
logMhalo = np.array(tinker_smhm_fit(logMstar))-np.log10(h)
# For stellar masses outside of the fit domain, replace masses with NaN
bad = ((logMstar <= np.min(tinker_smhm_fit.domain)) |
(logMstar >= np.max(tinker_smhm_fit.domain)))
logMhalo[bad] = np.nan
# Return a numpy array:
logMhalo = np.array(logMhalo)
return logMhalo
def smhm_behroozi(logMstar, redshift):
"""Calculate halo masses from the SMHM relations of Behroozi+ (2010).
**Appropriate for redshifts z<=1.**
--- Inputs ---
logMstar = log of stellar mass of galaxies (can be list/array)
redshift = redshift of galaxies (can be list/array)
--- Returns ---
logM200c = M200 mass compared with critical density (numpy array)
"""
import numpy as np
def _calc_m200_Behroozi2010(logMstar,redshift):
"""Calculate M_200c following Behroozi+ (2010)"""
# Coefficients for M200c with redshift evolution from B10:
M10 = 12.35
M1a = 0.28
M00 = 10.72
M0a = 0.55
beta0 = 0.44
betaa = 0.18
delta0 = 0.57
deltaa = 0.17
gamma0 = 1.56
gammaa = 2.51
# Scale factor (for z<1 this works fine).
a = 1./(1.+redshift)
logM1 = M10 + M1a*(a-1.)
logM0 = M00 + M0a*(a-1.)
beta = beta0 + betaa*(a-1.)
delta = delta0 + deltaa*(a-1.)
gamma = gamma0 + gammaa*(a-1.)
# Calculate the M200
logMstarM0 = logMstar - logM0
logM200 = (logM1 + beta * logMstarM0 +
10 ** (delta * logMstarM0) / (1 + 10 ** (-gamma * logMstarM0)) - 0.5)
return logM200
# TODO: Incorporate Behroozi+ (2013) as an option
# def _calc_m200_Behroozi2013(logMstar,redshift):
# """Calculate M_200c following Behroozi+ (2013)"""
#
# def _fff(x,alpha,delta,gamma)
# fx = -np.log10(10**(alpha*x)+1.)
# fx += delta*(np.log10(1.+np.exp(x)))**gamma/(1.+np.exp(10.**(-x)))
#
# return fx
#
#
# # Coefficients for M200c with redshift evolution from B13:
# M10 = 12.35
# M1a = 0.28
# M1z =
#
# delta0 = 0.57
# deltaa = 0.17
# deltaz =
# gamma0 = 1.56
# gammaa = 2.51
# gammaz =
#
# alpha0 =
# alphaa =
# epsilon0 =
# epsilona =
# epsilonz =
# epsilona2 =
#
# # Scale factor (for z<1 this works fine).
# z=redshift
# a = 1./(1.+z) # Scale factor...calculate this.
#
# nu = np.exp(-4.*a**2)
#
# logM1 = M10 + (M1a*(a-1.)+M1z*z)*nu
# logEpsilon = epsilon0+(epsilona*(a-1.)+epsilonz*z)*nu+epsilona2*(a-1.)
# alpha = alpha0+(alphaa*(a-1.))*nu
#
# delta = delta0 + (deltaa*(a-1.)+deltaz*z)*nu
# gamma = gamma0 + (gammaa*(a-1.)+gammaz*z)*nu
#
#
# # Calculate the M200
# logMstarM0 = logMstar - logM0
# logM200 = (logM1 + beta * logMstarM0 +
# 10 ** (delta * logMstarM0) / (1 + 10 ** (-gamma * logMstarM0)) - 0.5)
#
# return logM200
#########################
# Main part of the procedure just loops over the number of galaxies:
num_gals = np.size(logMstar)
# If we've go multiple galaxies but only one redshift, clone the redshift.
if (num_gals != 1) & (np.size(redshift) == 1):
zzz = np.full_like(logMstar,redshift)
logM = np.array(logMstar)
# If we've got multiple galaxies and redshifts
else:
zzz = np.array(redshift)
logM = np.array(logMstar)
logMhalo = []
# Loop over the number of galaxies
if num_gals == 1:
logMhalo = _calc_m200_Behroozi2010(logM,zzz)
else:
for j in np.arange(num_gals):
logMhalo.append(_calc_m200_Behroozi2010(logM[j],zzz[j]))
# Return a numpy array:
logMhalo = np.array(logMhalo)
return logMhalo
def smhm_shan(logMstar, redshift):
"""Calculate halo masses from the SMHM relations of Shan+ (2017). At
log M_star < 11, this relies on the results of M. Hudson+ (2015).
--- Inputs ---
logMstar = log of stellar mass of galaxies (can be list/array)
redshift = redshift of galaxies (can be list/array)
--- Returns ---
logM200c = M200 mass compared with critical density (numpy array)
"""
import numpy as np
def _calc_m200(logMstar,redshift):
"""Calculate M_200c following Shan+ (2017)"""
# Coefficients for M200c (+ scatter) from Shan+ (2017)
if redshift < 0.2:
logM1 = 12.52
logM0 = 10.98
beta = 0.47
delta = 0.55
gamma = 1.43
print('Redshift z={0} is outside of the Shan+ (2017) range; '
'assuming value for 0.2 < z < 0.4'.format(redshift))
elif ((redshift >= 0.2) and (redshift < 0.4)):
logM1 = 12.52
logM0 = 10.98
beta = 0.47
delta = 0.55
gamma = 1.43
elif ((redshift >= 0.4) & (redshift <= 0.6)):
logM1 = 12.70
logM0 = 11.11
beta = 0.50
delta = 0.54
gamma = 1.72
elif ((redshift > 0.6) & (redshift <= 1.0)):
logM1 = 12.70
logM0 = 11.11
beta = 0.50
delta = 0.54
gamma = 1.72
print('Redshift z={0} is outside of the Shan+ (2017) range; '
'assuming value for 0.4 < z < 0.6'.format(redshift))
# Calculate the M200
logMstarM0 = logMstar - logM0
logM200 = (logM1 + beta * logMstarM0 +
10 ** (delta * logMstarM0) / (1 + 10 ** (-gamma * logMstarM0)) - 0.5)
return logM200
#########################
# Main part of the procedure just loops over the number of galaxies:
num_gals = np.size(logMstar)
# If we've go multiple galaxies but only one redshift, clone the redshift.
if (num_gals != 1) & (np.size(redshift) == 1):
zzz = np.full_like(logMstar,redshift)
logM = np.array(logMstar)
# If we've got multiple galaxies and redshifts
else:
zzz = np.array(redshift)
logM = np.array(logMstar)
logMhalo = []
# Loop over the number of galaxies
if num_gals == 1:
logMhalo = _calc_m200(logM,zzz)
else:
for j in np.arange(num_gals):
logMhalo.append(_calc_m200(logM[j],zzz[j]))
# Return a numpy array:
logMhalo = np.array(logMhalo)
return logMhalo
def smhm_rodriguez(logMstar, redshift):
"""Calculate halo masses from the SMHM relations of Rodriguez-Puebla+ (2017).
--- Inputs ---
logMstar = log of stellar mass of galaxies (can be list/array)
redshift = redshift of galaxies (can be list/array)
--- Returns ---
logM200c = M200 mass compared with critical density (numpy array)
"""
import numpy as np
def _calc_mvir(logMstar,redshift):
"""Calculate M_vir following Rodriguez-Puebla+ (2017)"""
RP17_redshifts=np.array([0.1,0.25,0.5,0.75,1.,1.25,1.5])
logM1 = np.array([12.58,12.61,12.68,12.77,12.89,13.01,13.15])
logM0 = np.array([10.9,10.93,10.99,11.08,11.19,11.31,11.47])
beta = np.array([0.48,0.48,0.48,0.50,0.51,0.53,0.54])
delta = np.array([0.29,0.27,0.23,0.18,0.12,0.03,-0.10])
gamma = np.array([1.52,1.46,1.39,1.33,1.27,1.22,1.17])
idx=(np.abs(redshift-RP17_redshifts)).argmin()
# Calculate the M200
logMstarM0 = logMstar - logM0[idx]
logMvir = (logM1[idx] + beta[idx] * logMstarM0 +
10 ** (delta[idx] * logMstarM0) / (1 + 10 ** (-gamma[idx] * logMstarM0)) - 0.5)
return logMvir
#########################
# Main part of the procedure just loops over the number of galaxies:
num_gals = np.size(logMstar)
# If we've go multiple galaxies but only one redshift, clone the redshift.
if (num_gals != 1) & (np.size(redshift) == 1):
zzz = np.full_like(logMstar,redshift)
logM = np.array(logMstar)
# If we've got multiple galaxies and redshifts
else:
zzz = np.array(redshift)
logM = np.array(logMstar)
logMhalo = []
# Loop over the number of galaxies
if num_gals == 1:
logMhalo = _calc_mvir(logM,zzz)
else:
for j in np.arange(num_gals):
logMhalo.append(_calc_mvir(logM[j],zzz[j]))
# Return a numpy array:
logMhalo = np.array(logMhalo)
return logMhalo
def virial_radius(logMhalo, redshift, delta=200., rhocrit=True,
BryanNorman=False, WMAP=False, COSHalos=False):
"""Calculate the virial radius of a galaxy.
--- Inputs: ---
logMhalo: Halo mass(es).
redshift: Galaxy redshift
delta=200: Overdensity (Default=200)
rhocrit: Use the critical density (Default=True); alternately the mean density of the universe.
WMAP: Use WMAP9 cosmology. (Default=False)
BryanNorman: Use the Bryan & Norman (1998) scaling (Default=False)
COSHalos: Use the COS-Halos assumptions (Default=False)
"""
import numpy as np
import astropy.units as u
import astropy.constants as c
# Set some terms based on the COSHalos flag, which matches the calculations
# from the COS-Halos work (Prochaska+ 2017).
#
# This overrides the other user-set flags.
if COSHalos:
BryanNorman=True
WMAP=True
rhocrit=True
# Choose the cosmology. Default is Plank15.
if WMAP:
from astropy.cosmology import WMAP9 as cosmo
else:
from astropy.cosmology import Planck15 as cosmo
# Use the Bryan & Norman (2008) definition of Rvir? Default: False
#
# This overrides user-set flags for delta and rhocrit.
if BryanNorman:
# Overdensity depends on redshift:
x = cosmo.Om(redshift)-1.
delta = (18.*np.pi**2+82.*x-39.*x**2)
# Also assume critical density scaling:
rhocrit = True
# Choose whether to scale by mean or critical density. Default: Critical
if rhocrit == True:
rho = cosmo.critical_density(redshift)
else:
rho = cosmo.Om(redshift)*cosmo.critical_density(redshift)
# Linear halo mass (requires logMhalo in numpy array for calculations.
Mhalo = (10.**np.array(logMhalo))*u.M_sun
# Calculate the virial radius.
Rvir3 = (3./(4.*np.pi))*(Mhalo/(delta*rho.to('Msun/kpc3')))
Rvir = (Rvir3)**(1./3.)
return Rvir
def calc_r200(logM200, redshift):
"""Convenience method for calculating R_200.
---Inputs---
logM200 = Halo mass at 200x critical density (can be list/array)
redshift = Halo redshift (can be list/array)
"""
import numpy as np
# Set the parameters for the virial radius calculation
delta=200.
rhocrit=True
r200=virial_radius(logM200, redshift, delta=delta, rhocrit=rhocrit)
return r200
def log_schechter_function(logMstar, logMref=10.5,
alpha1=-1.0, logPhi1=-3.0,
alpha2=-1.0, logPhi2=-30.0):
"""Calculate two-component Schechter function. Default values (crude) are
equivalent to single-component with log M*=10.5, alpha=-1, logPhi0=-3."""
import numpy as np
schechter_part1 = 10.**logPhi1*10.**((1+alpha1)*(logMstar-logMref))
schechter_part2 = 10.**logPhi2*10.**((1+alpha2)*(logMstar-logMref))
schechter_part3 = np.exp(-10.**(logMstar - logMref))
logPhi_out = np.log10((schechter_part1+schechter_part2)*schechter_part3)
return logPhi_out
| jchowk/pyND | pyND/gal/halos.py | Python | gpl-3.0 | 13,574 | [
"Galaxy",
"TINKER"
] | 607dc3a78d2fe4e2de703e4f3c188c10e2bccfb486019df7aba3dcb6e74ae75a |
from collections import defaultdict
from itertools import chain
import logging
import numpy
from theano import function
import warnings
logger = logging.getLogger(__name__)
class StateComputer(object):
"""
Convenient interface to theano function for computing states/cells
of a trained SequenceGenerator model.
Expects a Model instance as argument. This model must also be created
from a SequenceGenerator.cost application. Furthermore, expects a dict
that maps from characters to indices.
Extracts the model's inputs and auxiliary variables into attributes
and creates theano function from inputs to auxiliary vars.
Finally, provides a method that wraps the output of the theano function in
a dict keyed by auxiliary variable labels.
"""
def __init__(self, cost_model, map_char_to_ind):
raw_state_vars = filter(self._relevant, cost_model.auxiliary_variables)
self.state_variables = sorted(raw_state_vars, key=lambda var: var.name)
self.state_var_names = [var.name for var in self.state_variables]
self.inputs = sorted(cost_model.inputs, key=lambda var: var.name)
self.func = function(self.inputs, self.state_variables)
self.map_char_to_ind = map_char_to_ind
self._prob_func = None
def _relevant(self, aux_var):
not_final_value = "final_value" not in aux_var.name
cell_or_state = "states" in aux_var.name or "cells" in aux_var.name
return cell_or_state and not_final_value
def read_single_sequence(self, sequence):
"""
Combines list of aux var values (output from theano func) with their
corresponding labels.
New and improved with (I think) more convenient interface.
- as input provide a list (or something array-like, i.e. that can be
converted to a numpy array) of either characters or their representing
integers. If a list of characters is provided, the conversion is done
in this method. NOTE that a string is more or less a list of characters
for the purpose of this method =)
- the theano function expects a 2D input where the first dimension is
batch_size. This "fake" dimension is added below, so don't wrap the
input sequence yourself.
- because the cost was originally defined with a mask, the theano
function needs one, as well. This mask is constructed below so you
don't have to provide it yourself.
"""
if all(isinstance(entry, str) for entry in sequence):
indices = [self.map_char_to_ind[char] for char in sequence]
converted_sequence = numpy.array([indices], dtype="int32")
elif all(isinstance(entry, int) for entry in sequence):
converted_sequence = numpy.array([sequence], dtype="int32")
else:
raise ValueError("Some or all sequence elements have invalid type "
"(should be str or int)!")
mask = numpy.ones(converted_sequence.shape, dtype="int8")
computed_aux_vars = self.func(converted_sequence, mask)
flattened_aux_vars = map(drop_batch_dim, computed_aux_vars)
return dict(zip(self.state_var_names, flattened_aux_vars))
def read_sequence_batch(self, sequences, mask=None):
"""
Basically read_single_sequence but for a whole batch to speed things up.
Note that, because I find it unlikely that someone wants to pass in
a batch of strings, this only works with sequences of integers atm.
That is, the way the data is stored on disk.
Mask can be computed beforehand, in which case sequences arg should
be padded.
If no mask is passed, we use pad_mask function to create it (and padding).
"""
if mask is None:
# tries to pad sequences as well
sequences, mask = pad_mask(sequences)
computed_states = self.func(sequences, mask)
return dict(zip(self.state_var_names, computed_states))
def compute_sequence_probabilities(self, test_sequences, mask=None, prob_func_inputs=None, prob_func_outputs=None):
if mask is None:
test_sequences, mask = pad_mask(test_sequences)
if self._prob_func is None:
if prob_func_inputs is None or prob_func_outputs is None: raise ValueError('Inputs needed for initializing probability function.')
self.set_prob_func(prob_func_inputs, prob_func_outputs)
elif prob_func_inputs or prob_func_outputs:
logger.info('State computer already has a probability function, inputs argument will be ignored. You can overwrite it by calling set_prob_func.')
probs = self._prob_func(test_sequences, mask).swapaxes(0, 1) # sentence-dimension first
seq_probs = []
for s_ix in range(len(probs)):
rows = numpy.arange(len(test_sequences[s_ix]))
elem_wise_probs = probs[s_ix][rows, test_sequences[s_ix]]
s_prob = elem_wise_probs.prod()
seq_probs.append(s_prob)
return numpy.array(seq_probs)
def compute_raw_sequence_probabilities(self, test_sequences, mask=None, prob_func_inputs=None, prob_func_outputs=None):
if mask is None:
test_sequences, mask = pad_mask(test_sequences)
if self._prob_func is None:
if prob_func_inputs is None or prob_func_outputs is None: raise ValueError('Inputs needed for initializing probability function.')
self.set_prob_func(prob_func_inputs, prob_func_outputs)
elif prob_func_inputs or prob_func_outputs:
logger.info('State computer already has a probability function, inputs argument will be ignored. You can overwrite it by calling set_prob_func.')
probs = self._prob_func(test_sequences, mask).swapaxes(0, 1) # sentence-dimension first
seq_probs = []
for s_ix in range(len(probs)):
rows = numpy.arange(len(test_sequences[s_ix]))
elem_wise_probs = probs[s_ix][rows, test_sequences[s_ix]]
seq_probs.append(elem_wise_probs)
return numpy.array(seq_probs)
def set_prob_func(self, inputs, outputs):
self._prob_func = function(inputs, outputs)
def drop_batch_dim(array_with_batch_dim):
"""When reading in one sentence at a time the batch dimension is superflous.
Using numpy.squeeze we get rid of it. This relies on two assumptions:
- that dimension being "1"
- that being the second dimension (shape[1])
"""
return numpy.squeeze(array_with_batch_dim, axis=1)
def pad_mask(batch):
maxlen = max(len(example) for example in batch) # clearer than example.shape[0]
# For one, zero-pad the sequences
# Also build the mask
dims = (len(batch), maxlen)
padded_seqs = numpy.zeros(dims, dtype="int32")
# mask is int8 because apparently it's converted to float later,
# and higher ints would complain about loss of precision
mask = numpy.zeros(dims, dtype="int8")
for (example_ind, example) in enumerate(batch):
# we go through the sequences and simply put them into the padded array;
# this will leave 0s wherever the sequence is shorter than maxlen.
# similarly, mask will be set to 1 only up to the length of the respective sequence.
# note that the transpose is done implicitly by essentially swapping indices
padded_seqs[example_ind, :len(example)] = example
mask[example_ind, :len(example)] = 1
return padded_seqs, mask
def select_positions(aux_var_dict, indx=1):
"""Select certain indices from auxiliary var dictionary.
Note that indx can be any slicing/indexing construct valid in numpy.
"""
return {var_name: var_val[indx] for var_name, var_val in aux_var_dict.items()}
def mark_seq_len(seq):
return numpy.arange(len(seq))
def mark_word_boundaries(seq):
# define what's considered a word boundary. It's a set on purpose to permit
# adding more character types as needed.
wb = {
" ",
"\n"
}
return numpy.array([1 if char in wb else 0 for char in seq])
def mark_char_property(seq, bool_property_fun):
"""
the func arg is supposed to be a str function, e.g.:
str.isupper
str.islower
str.isalnum
str.isalpha
...
A complex lambda expression or wrapper function is also possible, of course
:param seq:
:param bool_property_fun:
:return:
"""
return numpy.array([1 if bool_property_fun(seq[i]) else 0 for i in range(len(seq))])
def mark_bracketing(seq, opening, closing, marking_fun, ignore_starts=None, **kwargs):
"""
:param seq:
:param opening:
:param closing:
:param marking_fun:
:param ignore_starts: a set of start indexes to be ignored, occures rarely
:param kwargs:
:return:
"""
ix = 0
trigger = [opening, closing]
collect = False
ix_list = []
ret_val = numpy.zeros(len(seq))
while ix < len(seq):
if seq[ix] == trigger[int(collect)] and not ix in ignore_starts:
collect = not collect
if not collect:
ix_list.append(ix)
ret_val[ix_list,] = marking_fun(ret_val, ix_list, **kwargs)
ix_list = []
if collect:
ix_list.append(ix)
ix += 1
return ret_val
def filter_by_threshold(neuron_array, threshold=1):
"""Tells which neuron activations surpass a certain threshold.
Args
neuron_array: numpy array of neuron activation values
threshold: numeric value by which to filter neurons.
Returns: indices of neurons with activations greater than threshold.
"""
return numpy.nonzero(neuron_array > threshold)
def unpack_value_lists(some_dict):
"""Pairs every key in some_dict with every item in its value (list)."""
return ((key, v) for key in some_dict for v in some_dict[key])
def dependencies(dep_graph):
"""Turns nltk.parse.DependencyGraph into dict keyed by dependency labels.
Returns dict that maps dependency labels to lists.
Each list consists of pairs of (head word index, dependent word index).
Here's an example entry:
'DET' : [(9, 0), (4, 5)]
"""
dep_dict = defaultdict(list)
for index in dep_graph.nodes:
node = dep_graph.nodes[index]
for dep_label, dep_index in unpack_value_lists(node['deps']):
dep_dict[dep_label].append((index - 1, dep_index - 1))
return dep_dict
def simple_mark_dependency(dep_graph, dep_label):
"""Simple marking function for dependencies.
Takes dictionary of dependencies and dependency label.
Constructs a numpy array of zeros and marks with 1 the positions of words
that take part in the dependency.
"""
warnings.warn('Use mark_dependency with marking_fun='+MarkingFunctions.__name__+'.'+MarkingFunctions.simple_mark.__name__+' instead.', DeprecationWarning)
dep_dict = dependencies(dep_graph)
indeces = dep_dict[dep_label]
unique_index_list = list(set(chain.from_iterable(indeces)))
marked = numpy.zeros(len(dep_dict) - 1)
marked[unique_index_list] = 1
return marked
def mark_dependency(dep_graph, dep_label, prior_long=True, precomputed_dependencies=None, marking_fun=lambda x, ix: numpy.ones(len(ix)), **fun_kwargs):
"""
This function marks a dependency like simple_mark_dependency, so head and dependent. But in addition
the tokens between head and dependent are marked as well. The marking can be a simple sequence of 1s,
which is the default, or a sequence given by a marking function. The marking function will be called
for each sequence of tokens from head to dependent (or vice versa) SEPARATELY. The priorities for sequences
of tokens including smaller sequences of tokens that also show the same dependency as in
______________________
/ __ \
/ / \ \
Das auf dem Berg stehende Haus
is handle by :param prior_long:, which is True by default causing that the given sequence will be treated
as a whole and there will be no marking call for the included. If you put :param prior_long: on False, "dem Berg"
would be marked before the whole sequence.
:param dep_graph:
:param dep_label:
:param prior_long:
:param marking_fun:
:param fun_args:
:param fun_kwargs:
:return:
"""
raw_deps = precomputed_dependencies if not precomputed_dependencies is None else dependencies(dep_graph)[dep_label] # note: if empty list provided instead of None, nothing is marked -> that's desired
raw_indices = {frozenset(range(sorted(tpl)[0], sorted(tpl)[1]+1)) for tpl in raw_deps}
filtered_indices = []
if prior_long:
for ixset in raw_indices:
diffset = raw_indices.difference({ixset})
if not [e for e in diffset if ixset.issubset(e)]:
filtered_indices.append(sorted(list(ixset)))
else:
# create discontinuous and coniuous spans of indexes
for ixset in raw_indices:
diffset = raw_indices.difference({ixset})
app_ixs = list(ixset)
for subset in [e for e in diffset if e.issubset(ixset)]:
for i in subset:
app_ixs.remove(i)
filtered_indices.append(sorted(app_ixs))
marked = numpy.zeros(len(dep_graph.nodes)-1)
for ixlist in filtered_indices:
marked[ixlist,] = marking_fun(marked, ixlist, **fun_kwargs)
return marked
class MarkingFunctions(object):
"""
This class is a collection of marking functions in the sense of functions to be used by
the markers above (mark_dependency, mark_property) to mark a phenomenon of interest.
"""
@staticmethod
def rising_flank_linear(vec, ix_list):
# TODO this is actually a special case of flank and should be removed in the future
"""
this function only overwrites zeros
"""
offset = 1/(len(vec[ix_list,])+1)
raw_val = numpy.arange(offset, 1+offset, offset)
return raw_val[-len(raw_val):]
# might make more sense for longer sequences
@staticmethod
def flank(vec, ix_list, nonlinearity=lambda x: x, frm=0, to=1, coeff=1):
return coeff*nonlinearity(numpy.arange(frm, to, abs((to-frm)/len(ix_list))))[-len(ix_list):]
@staticmethod
def simple_mark(vec, ixlist):
ret_val = numpy.zeros(len(ixlist))
ret_val[0] = 1
ret_val[-1] = 1
return ret_val
| Copper-Head/the-three-stooges | util.py | Python | mit | 14,526 | [
"NEURON"
] | 529ccd3457694d1596ee1eee005ac4ddf3f7e931cd3bf465037bc38aeb378eee |
#
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import numpy as np
import espressomd
from espressomd.galilei import GalileiTransform
BOX_L = np.array([10, 20, 30])
N_PART = 500
class Galilei(ut.TestCase):
system = espressomd.System(box_l=BOX_L)
def setUp(self):
self.system.part.add(pos=BOX_L * np.random.random((N_PART, 3)),
v=-5. + 10. * np.random.random((N_PART, 3)),
f=np.random.random((N_PART, 3)))
if espressomd.has_features("MASS"):
self.system.part[:].mass = 42. * np.random.random((N_PART,))
def tearDown(self):
self.system.part.clear()
def test_kill_particle_motion(self):
g = GalileiTransform()
g.kill_particle_motion()
np.testing.assert_array_equal(
np.copy(self.system.part[:].v), np.zeros((N_PART, 3)))
def test_kill_particle_forces(self):
g = GalileiTransform()
g.kill_particle_forces()
np.testing.assert_array_equal(
np.copy(self.system.part[:].f), np.zeros((N_PART, 3)))
def test_cms(self):
parts = self.system.part[:]
g = GalileiTransform()
total_mass = np.sum(parts.mass)
com = np.sum(
np.multiply(parts.mass.reshape((N_PART, 1)), parts.pos), axis=0) / total_mass
np.testing.assert_allclose(np.copy(g.system_CMS()), com)
def test_cms_velocity(self):
parts = self.system.part[:]
g = GalileiTransform()
total_mass = np.sum(parts.mass)
com_v = np.sum(
np.multiply(parts.mass.reshape((N_PART, 1)), parts.v), axis=0) / total_mass
np.testing.assert_allclose(np.copy(g.system_CMS_velocity()), com_v)
def test_galilei_transform(self):
g = GalileiTransform()
g.galilei_transform()
np.testing.assert_allclose(
np.copy(g.system_CMS_velocity()), np.zeros((3,)), atol=1e-15)
if __name__ == "__main__":
ut.main()
| mkuron/espresso | testsuite/python/galilei.py | Python | gpl-3.0 | 2,661 | [
"ESPResSo"
] | b9bdbbe480ac629ddc52e46290726db65fa0248ac26a873e308a1c06bd7fa73b |
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import platform
from pyglet.gl.base import Config, CanvasConfig, Context
from pyglet.gl import ContextException
from pyglet.gl import gl
from pyglet.gl import agl
from pyglet.canvas.cocoa import CocoaCanvas
from pyglet.libs.darwin.cocoapy import *
NSOpenGLPixelFormat = ObjCClass('NSOpenGLPixelFormat')
NSOpenGLContext = ObjCClass('NSOpenGLContext')
# Version info, needed as OpenGL different Lion and onward
"""Version is based on Darwin kernel, not OS-X version.
OS-X / Darwin version history
http://en.wikipedia.org/wiki/Darwin_(operating_system)#Release_history
pre-release: 0.1, 0.2, 1.0, 1.1,
kodiak: 1.2.1,
cheetah: 1.3.1,
puma: 1.4.1, 5.1 -> 5.5
jaguar: 6.0.1 -> 6.8
panther: 7.0 -> 7.9
tiger: 8.0 -> 8.11
leopard: 9.0 -> 9.8
snow_leopard: 10.0 -> 10.8
lion: 11.0 -> 11.4
mountain_lion: 12.0 -> 12.5
mavericks: 13.0 -> 13.4
yosemite: 14.0 -> 14.5
el_capitan: 15.0 -> 15.6
sierra: 16.0 -> 16.6
"""
os_x_release = {
'pre-release': (0,1),
'kodiak': (1,2,1),
'cheetah': (1,3,1),
'puma': (1,4.1),
'jaguar': (6,0,1),
'panther': (7,),
'tiger': (8,),
'leopard': (9,),
'snow_leopard': (10,),
'lion': (11,),
'mountain_lion': (12,),
'mavericks': (13,),
'yosemite': (14,),
'el_capitan': (15,),
'sierra': (16,),
}
def os_x_version():
version = tuple([int(v) for v in platform.release().split('.')])
# ensure we return a tuple
if len(version) > 0:
return version
return (version,)
_os_x_version = os_x_version()
# Valid names for GL attributes and their corresponding NSOpenGL constant.
_gl_attributes = {
'double_buffer': NSOpenGLPFADoubleBuffer,
'stereo': NSOpenGLPFAStereo,
'buffer_size': NSOpenGLPFAColorSize,
'sample_buffers': NSOpenGLPFASampleBuffers,
'samples': NSOpenGLPFASamples,
'aux_buffers': NSOpenGLPFAAuxBuffers,
'alpha_size': NSOpenGLPFAAlphaSize,
'depth_size': NSOpenGLPFADepthSize,
'stencil_size': NSOpenGLPFAStencilSize,
# Not exposed by pyglet API (set internally)
'all_renderers': NSOpenGLPFAAllRenderers,
'fullscreen': NSOpenGLPFAFullScreen,
'minimum_policy': NSOpenGLPFAMinimumPolicy,
'maximum_policy': NSOpenGLPFAMaximumPolicy,
'screen_mask' : NSOpenGLPFAScreenMask,
# Not supported in current pyglet API
'color_float': NSOpenGLPFAColorFloat,
'offscreen': NSOpenGLPFAOffScreen,
'sample_alpha': NSOpenGLPFASampleAlpha,
'multisample': NSOpenGLPFAMultisample,
'supersample': NSOpenGLPFASupersample,
}
# NSOpenGL constants which do not require a value.
_boolean_gl_attributes = frozenset([
NSOpenGLPFAAllRenderers,
NSOpenGLPFADoubleBuffer,
NSOpenGLPFAStereo,
NSOpenGLPFAMinimumPolicy,
NSOpenGLPFAMaximumPolicy,
NSOpenGLPFAOffScreen,
NSOpenGLPFAFullScreen,
NSOpenGLPFAColorFloat,
NSOpenGLPFAMultisample,
NSOpenGLPFASupersample,
NSOpenGLPFASampleAlpha,
])
# Attributes for which no NSOpenGLPixelFormatAttribute name exists.
# We could probably compute actual values for these using
# NSOpenGLPFAColorSize / 4 and NSOpenGLFAAccumSize / 4, but I'm not that
# confident I know what I'm doing.
_fake_gl_attributes = {
'red_size': 0,
'green_size': 0,
'blue_size': 0,
'accum_red_size': 0,
'accum_green_size': 0,
'accum_blue_size': 0,
'accum_alpha_size': 0
}
class CocoaConfig(Config):
def match(self, canvas):
# Construct array of attributes for NSOpenGLPixelFormat
attrs = []
for name, value in self.get_gl_attributes():
attr = _gl_attributes.get(name)
if not attr or not value:
continue
attrs.append(attr)
if attr not in _boolean_gl_attributes:
attrs.append(int(value))
# Support for RAGE-II, which is not compliant.
attrs.append(NSOpenGLPFAAllRenderers)
# Force selection policy.
attrs.append(NSOpenGLPFAMaximumPolicy)
# NSOpenGLPFAFullScreen is always supplied so we can switch to and
# from fullscreen without losing the context. Also must supply the
# NSOpenGLPFAScreenMask attribute with appropriate display ID.
# Note that these attributes aren't necessary to render in fullscreen
# on Mac OS X 10.6, because there we are simply rendering into a
# screen sized window. See:
# http://developer.apple.com/library/mac/#documentation/GraphicsImaging/Conceptual/OpenGL-MacProgGuide/opengl_fullscreen/opengl_cgl.html%23//apple_ref/doc/uid/TP40001987-CH210-SW6
# Otherwise, make sure we refer to the correct Profile for OpenGL (Core or
# Legacy) on Lion and afterwards
if _os_x_version < os_x_release['snow_leopard']:
attrs.append(NSOpenGLPFAFullScreen)
attrs.append(NSOpenGLPFAScreenMask)
attrs.append(quartz.CGDisplayIDToOpenGLDisplayMask(quartz.CGMainDisplayID()))
elif _os_x_version >= os_x_release['lion']:
# check for opengl profile
# This requires OS-X Lion (Darwin 11) or higher
version = (
getattr(self, 'major_version', None),
getattr(self, 'minor_version', None)
)
# tell os-x we want to request a profile
attrs.append(NSOpenGLPFAOpenGLProfile)
# check if we're wanting core or legacy
# Mavericks (Darwin 13) and up are capable of the Core 4.1 profile,
# while Lion and up are only capable of Core 3.2
if version == (4, 1) and _os_x_version >= os_x_release['mavericks']:
attrs.append(int(NSOpenGLProfileVersion4_1Core))
elif version == (3, 2):
attrs.append(int(NSOpenGLProfileVersion3_2Core))
else:
attrs.append(int(NSOpenGLProfileVersionLegacy))
# Terminate the list.
attrs.append(0)
# Create the pixel format.
attrsArrayType = c_uint32 * len(attrs)
attrsArray = attrsArrayType(*attrs)
pixel_format = NSOpenGLPixelFormat.alloc().initWithAttributes_(attrsArray)
# Return the match list.
if pixel_format is None:
return []
else:
return [CocoaCanvasConfig(canvas, self, pixel_format)]
class CocoaCanvasConfig(CanvasConfig):
def __init__(self, canvas, config, pixel_format):
super(CocoaCanvasConfig, self).__init__(canvas, config)
self._pixel_format = pixel_format
# Query values for the attributes of the pixel format, and then set the
# corresponding attributes of the canvas config.
for name, attr in _gl_attributes.items():
vals = c_int()
self._pixel_format.getValues_forAttribute_forVirtualScreen_(byref(vals), attr, 0)
setattr(self, name, vals.value)
# Set these attributes so that we can run pyglet.info.
for name, value in _fake_gl_attributes.items():
setattr(self, name, value)
# Update the minor/major version from profile if (Mountain)Lion
if _os_x_version >= os_x_release['lion']:
vals = c_int()
profile = self._pixel_format.getValues_forAttribute_forVirtualScreen_(
byref(vals),
NSOpenGLPFAOpenGLProfile,
0
)
if profile == NSOpenGLProfileVersion4_1Core:
setattr(self, "major_version", 4)
setattr(self, "minor_version", 1)
elif profile == NSOpenGLProfileVersion3_2Core:
setattr(self, "major_version", 3)
setattr(self, "minor_version", 2)
else:
setattr(self, "major_version", 2)
setattr(self, "minor_version", 1)
def create_context(self, share):
# Determine the shared NSOpenGLContext.
if share:
share_context = share._nscontext
else:
share_context = None
# Create a new NSOpenGLContext.
nscontext = NSOpenGLContext.alloc().initWithFormat_shareContext_(
self._pixel_format,
share_context)
return CocoaContext(self, nscontext, share)
def compatible(self, canvas):
return isinstance(canvas, CocoaCanvas)
class CocoaContext(Context):
def __init__(self, config, nscontext, share):
super(CocoaContext, self).__init__(config, share)
self.config = config
self._nscontext = nscontext
def attach(self, canvas):
# See if we want OpenGL 3 in a non-Lion OS
if _os_x_version < os_x_release['lion'] and self.config._requires_gl_3():
raise ContextException('OpenGL 3 not supported')
super(CocoaContext, self).attach(canvas)
# The NSView instance should be attached to a nondeferred window before calling
# setView, otherwise you get an "invalid drawable" message.
self._nscontext.setView_(canvas.nsview)
self._nscontext.view().setWantsBestResolutionOpenGLSurface_(1)
self.set_current()
def detach(self):
super(CocoaContext, self).detach()
self._nscontext.clearDrawable()
def set_current(self):
self._nscontext.makeCurrentContext()
super(CocoaContext, self).set_current()
def update_geometry(self):
# Need to call this method whenever the context drawable (an NSView)
# changes size or location.
self._nscontext.update()
def set_full_screen(self):
self._nscontext.makeCurrentContext()
self._nscontext.setFullScreen()
def destroy(self):
super(CocoaContext, self).destroy()
self._nscontext.release()
self._nscontext = None
def set_vsync(self, vsync=True):
vals = c_int(vsync)
self._nscontext.setValues_forParameter_(byref(vals), NSOpenGLCPSwapInterval)
def get_vsync(self):
vals = c_int()
self._nscontext.getValues_forParameter_(byref(vals), NSOpenGLCPSwapInterval)
return vals.value
def flip(self):
self._nscontext.flushBuffer()
| nicememory/pie | pyglet/pyglet/gl/cocoa.py | Python | apache-2.0 | 10,368 | [
"Jaguar"
] | b0b45388717e75cf5f10a3c4a9169073addf88eeeb878ef4509d147d17c266c7 |
#!/bin/env python
from __future__ import division, print_function , unicode_literals, absolute_import
import os, sys, subprocess
crow_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
eigen_cflags = ""
try:
has_pkg_eigen = subprocess.call(["pkg-config","--exists","eigen3"]) == 0
except:
has_pkg_eigen = False
if has_pkg_eigen:
eigen_cflags = subprocess.check_output(["pkg-config","eigen3","--cflags"])
libmesh_eigen = os.path.abspath(os.path.join(crow_dir,os.pardir,"moose","libmesh","contrib","eigen","eigen"))
if os.path.exists(libmesh_eigen):
eigen_cflags = "-I"+libmesh_eigen
if os.path.exists(os.path.join(crow_dir,"contrib","include","Eigen")):
eigen_cflags = ""
print(eigen_cflags)
| idaholab/raven | scripts/find_eigen.py | Python | apache-2.0 | 726 | [
"MOOSE"
] | ace062094afc2a556774721ce8ac2f300d312aa3b6847f8d7dcfe218e03692f8 |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create reader and extract the velocity and temperature
reader = vtk.vtkPNGReader()
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/vtk.png")
quant = vtk.vtkImageQuantizeRGBToIndex()
quant.SetInputConnection(reader.GetOutputPort())
quant.SetNumberOfColors(32)
i2pd = vtk.vtkImageToPolyDataFilter()
i2pd.SetInputConnection(quant.GetOutputPort())
i2pd.SetLookupTable(quant.GetLookupTable())
i2pd.SetColorModeToLUT()
i2pd.SetOutputStyleToPolygonalize()
i2pd.SetError(0)
i2pd.DecimationOn()
i2pd.SetDecimationError(0.0)
i2pd.SetSubImageSize(25)
#Need a triangle filter because the polygons are complex and concave
tf = vtk.vtkTriangleFilter()
tf.SetInputConnection(i2pd.GetOutputPort())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tf.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create graphics stuff
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren1.AddActor(actor)
ren1.SetBackground(1,1,1)
renWin.SetSize(300,250)
acamera = vtk.vtkCamera()
acamera.SetClippingRange(343.331,821.78)
acamera.SetPosition(-139.802,-85.6604,437.485)
acamera.SetFocalPoint(117.424,106.656,-14.6)
acamera.SetViewUp(0.430481,0.716032,0.549532)
acamera.SetViewAngle(30)
ren1.SetActiveCamera(acamera)
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# --- end of script --
| hlzz/dotfiles | graphics/VTK-7.0.0/Filters/Hybrid/Testing/Python/imageToPolyData.py | Python | bsd-3-clause | 1,672 | [
"VTK"
] | d94b9bb29321d1f4d5e9d59b4bb6c8dacc99ddba0aa0881bd1bff0828690164b |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
####################################################################################
### Copyright (C) 2015-2019 by ABLIFE
####################################################################################
####################################################################################
####################################################################################
# Date Version Author ChangeLog
#
#
#
#
#####################################################################################
"""
程序功能说明:
1.clip-seq第一步:将有overlap的reads进行cluster
程序设计思路:
利用HTSeq包
"""
import re, os, sys, logging, time, datetime
from optparse import OptionParser, OptionGroup
reload(sys)
sys.setdefaultencoding('utf-8')
import subprocess
import threading
sys.path.insert(1, os.path.split(os.path.realpath(__file__))[0] + "/../../../")
from ablib.utils.tools import *
from ablib.utils.iv_cluster import *
import gffutils
import HTSeq
import numpy
import multiprocessing
from matplotlib import pyplot
if sys.version_info < (2, 7):
print("Python Version error: please use phthon2.7")
sys.exit(-1)
_version = 'v0.1'
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def configOpt():
"""Init for option
"""
usage = 'Usage: %prog [-f] [other option] [-h]'
p = OptionParser(usage)
##basic options
p.add_option(
'-c', '--chrlen', dest='chrlen', action='store',
type='string', help='chrlen file')
p.add_option(
'-b', '--bam', dest='bam', action='store',
type='string', help='bam file')
p.add_option(
'-o', '--outfile', dest='outfile', default='patts', action='store',
type='string', help='ctts')
p.add_option(
'-n', '--samplename', dest='samplename', default='', action='store',
type='string', help='sample name,default is ""')
group = OptionGroup(p, "Preset options")
##preset options
group.add_option(
'-O', '--outDir', dest='outDir', default='./', action='store',
type='string', help='output directory', metavar="DIR")
group.add_option(
'-L', '--logDir', dest='logDir', default='', action='store',
type='string', help='log dir ,default is same as outDir')
group.add_option(
'-P', '--logPrefix', dest='logPrefix', default='', action='store',
type='string', help='log file prefix')
group.add_option(
'-E', '--email', dest='email', default='none', action='store',
type='string', help='email address, if you want get a email when this job is finished,default is no email',
metavar="EMAIL")
group.add_option(
'-Q', '--quiet', dest='quiet', default=False, action='store_true',
help='do not print messages to stdout')
group.add_option(
'-K', '--keepTemp', dest='keepTemp', default=False, action='store_true',
help='keep temp dir')
group.add_option(
'-T', '--test', dest='isTest', default=False, action='store_true',
help='run this program for test')
p.add_option_group(group)
if len(sys.argv) == 1:
p.print_help()
sys.exit(1)
opt, args = p.parse_args()
return (p, opt, args)
def listToString(x):
"""获得完整的命令
"""
rVal = ''
for a in x:
rVal += a + ' '
return rVal
opt_parser, opt, args = configOpt()
if opt.logDir == "":
opt.logDir = opt.outDir + '/log/'
sample = ""
if opt.samplename != "":
sample = opt.samplename + '.'
if opt.outfile == 'patts':
opt.outfile = sample + 'patts'
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
scriptPath = os.path.abspath(os.path.dirname(__file__)) # absolute script path
binPath = scriptPath + '/bin' # absolute bin path
outPath = os.path.abspath(opt.outDir) # absolute output path
os.mkdir(outPath) if not os.path.isdir(outPath) else None
logPath = os.path.abspath(opt.logDir)
os.mkdir(logPath) if not os.path.isdir(logPath) else None
tempPath = outPath + '/temp/' # absolute bin path
# os.mkdir(tempPath) if not os.path.isdir(tempPath) else None
resultPath = outPath + '/result/'
# os.mkdir(resultPath) if not os.path.isdir(resultPath) else None
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def initLogging(logFilename):
"""Init for logging
"""
logging.basicConfig(
level=logging.DEBUG,
format='[%(asctime)s : %(levelname)s] %(message)s',
datefmt='%y-%m-%d %H:%M',
filename=logFilename,
filemode='w')
if not opt.quiet:
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M')
# tell the handler to use this format
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
dt = datetime.datetime.now()
logFile = logPath + '/' + opt.logPrefix + 'log.' + str(dt.strftime('%Y%m%d.%H%M%S.%f')) + '.txt'
initLogging(logFile)
logging.debug(sys.modules[__name__].__doc__)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug('Program version: %s' % _version)
logging.debug('Start the program with [%s]\n', listToString(sys.argv))
startTime = datetime.datetime.now()
logging.debug("计时器:Program start at %s" % startTime)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### S
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### E
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def main():
print("Main procedure start...")
if not os.path.isfile(opt.bam):
print(opt.bam + 'is not exit,please check your file')
sys.exit(1)
os.chdir(opt.outDir)
chrlen_dict = {}
for eachLine in open(opt.chrlen):
line = eachLine.strip().split('\t')
chrlen_dict[line[0]] = line[1]
server = multiprocessing.Manager()
jobs = []
m = 0
n = 0
chr_dict = readBamHeader(opt.bam)
print (chr_dict.keys())
for chr in chrlen_dict:
print (chr)
if not chr in chr_dict:
continue
chr_iv = HTSeq.GenomicInterval(chr, 0, int(chrlen_dict[chr]), ".")
print ("222")
t1 = multiprocessing.Process(target=CTSS, args=(opt.bam, chr_iv, False, True))
jobs.append(t1)
n += 1
while m < n:
for i in range(m, m + 25):
if i >= n:
break
jobs[i].start()
for i in range(m, m + 25):
if i >= n:
break
jobs[i].join()
m = m + 25
os.system("cat _ctss_* > " + opt.outfile + " && rm -rf _ctss_*")
if __name__ == '__main__':
main()
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# if not opt.keepTemp:
# os.system('rm -rf ' + tempPath)
# logging.debug("Temp folder is deleted..")
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug("Program ended")
currentTime = datetime.datetime.now()
runningTime = (currentTime - startTime).seconds # in seconds
logging.debug("计时器:Program start at %s" % startTime)
logging.debug("计时器:Program end at %s" % currentTime)
logging.debug("计时器:Program ran %.2d:%.2d:%.2d" % (runningTime / 3600, (runningTime % 3600) / 60, runningTime % 60))
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if opt.email != "none":
run_cmd = listToString(sys.argv)
sendEmail(opt.email, str(startTime), str(currentTime), run_cmd, outPath)
logging.info("发送邮件通知到 %s" % opt.email)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def countProgram(programName, startT, runT, isTest):
countProgramFile = open('/users/ablife/ablifepy/countProgram.txt', 'a')
countProgramFile.write(
programName + '\t' + str(os.getlogin()) + '\t' + str(startT) + '\t' + str(runT) + 's\t' + isTest + '\n')
countProgramFile.close()
testStr = 'P'
if opt.isTest:
testStr = 'T'
countProgram(sys.argv[0], startTime, runningTime, testStr)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
| ablifedev/ABLIRC | ABLIRC/bin/public/iv_cluster/get_CTTS.py | Python | mit | 11,437 | [
"HTSeq"
] | 79d7dc64ef924fdced5f237ac1992eef073166b94edb73e24b19a1679aeb97cd |
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
import gobject
import gtk
import gtk.gdk
import gtk.keysyms
from . import draw
from .draw import RenderOptions
class PyBootchartWidget(gtk.DrawingArea):
__gsignals__ = {
'expose-event': 'override',
'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, gtk.gdk.Event)),
'position-changed' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT, gobject.TYPE_INT)),
'set-scroll-adjustments' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gtk.Adjustment, gtk.Adjustment))
}
def __init__(self, trace, options, xscale):
gtk.DrawingArea.__init__(self)
self.trace = trace
self.options = options
self.set_flags(gtk.CAN_FOCUS)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("button-press-event", self.on_area_button_press)
self.connect("button-release-event", self.on_area_button_release)
self.add_events(gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("motion-notify-event", self.on_area_motion_notify)
self.connect("scroll-event", self.on_area_scroll_event)
self.connect('key-press-event', self.on_key_press_event)
self.connect('set-scroll-adjustments', self.on_set_scroll_adjustments)
self.connect("size-allocate", self.on_allocation_size_changed)
self.connect("position-changed", self.on_position_changed)
self.zoom_ratio = 1.0
self.xscale = xscale
self.x, self.y = 0.0, 0.0
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
self.hadj = None
self.vadj = None
self.hadj_changed_signal_id = None
self.vadj_changed_signal_id = None
def do_expose_event(self, event):
cr = self.window.cairo_create()
# set a clip region for the expose event
cr.rectangle(
event.area.x, event.area.y,
event.area.width, event.area.height
)
cr.clip()
self.draw(cr, self.get_allocation())
return False
def draw(self, cr, rect):
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
cr.scale(self.zoom_ratio, self.zoom_ratio)
cr.translate(-self.x, -self.y)
draw.render(cr, self.options, self.xscale, self.trace)
def position_changed(self):
self.emit("position-changed", self.x, self.y)
ZOOM_INCREMENT = 1.25
def zoom_image (self, zoom_ratio):
self.zoom_ratio = zoom_ratio
self._set_scroll_adjustments (self.hadj, self.vadj)
self.queue_draw()
def zoom_to_rect (self, rect):
zoom_ratio = float(rect.width)/float(self.chart_width)
self.zoom_image(zoom_ratio)
self.x = 0
self.position_changed()
def set_xscale(self, xscale):
old_mid_x = self.x + self.hadj.page_size / 2
self.xscale = xscale
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
new_x = old_mid_x
self.zoom_image (self.zoom_ratio)
def on_expand(self, action):
self.set_xscale (int(self.xscale * 1.5 + 0.5))
def on_contract(self, action):
self.set_xscale (max(int(self.xscale / 1.5), 1))
def on_zoom_in(self, action):
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
def on_zoom_out(self, action):
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
def on_zoom_fit(self, action):
self.zoom_to_rect(self.get_allocation())
def on_zoom_100(self, action):
self.zoom_image(1.0)
self.set_xscale(1.0)
def show_toggled(self, button):
self.options.app_options.show_all = button.get_property ('active')
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
self._set_scroll_adjustments(self.hadj, self.vadj)
self.queue_draw()
POS_INCREMENT = 100
def on_key_press_event(self, widget, event):
if event.keyval == gtk.keysyms.Left:
self.x -= self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == gtk.keysyms.Right:
self.x += self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == gtk.keysyms.Up:
self.y -= self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == gtk.keysyms.Down:
self.y += self.POS_INCREMENT/self.zoom_ratio
else:
return False
self.queue_draw()
self.position_changed()
return True
def on_area_button_press(self, area, event):
if event.button == 2 or event.button == 1:
area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
self.prevmousex = event.x
self.prevmousey = event.y
if event.type not in (gtk.gdk.BUTTON_PRESS, gtk.gdk.BUTTON_RELEASE):
return False
return False
def on_area_button_release(self, area, event):
if event.button == 2 or event.button == 1:
area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
self.prevmousex = None
self.prevmousey = None
return True
return False
def on_area_scroll_event(self, area, event):
if event.state & gtk.gdk.CONTROL_MASK:
if event.direction == gtk.gdk.SCROLL_UP:
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
return True
if event.direction == gtk.gdk.SCROLL_DOWN:
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
return True
return False
def on_area_motion_notify(self, area, event):
state = event.state
if state & gtk.gdk.BUTTON2_MASK or state & gtk.gdk.BUTTON1_MASK:
x, y = int(event.x), int(event.y)
# pan the image
self.x += (self.prevmousex - x)/self.zoom_ratio
self.y += (self.prevmousey - y)/self.zoom_ratio
self.queue_draw()
self.prevmousex = x
self.prevmousey = y
self.position_changed()
return True
def on_set_scroll_adjustments(self, area, hadj, vadj):
self._set_scroll_adjustments (hadj, vadj)
def on_allocation_size_changed(self, widget, allocation):
self.hadj.page_size = allocation.width
self.hadj.page_increment = allocation.width * 0.9
self.vadj.page_size = allocation.height
self.vadj.page_increment = allocation.height * 0.9
def _set_adj_upper(self, adj, upper):
changed = False
value_changed = False
if adj.upper != upper:
adj.upper = upper
changed = True
max_value = max(0.0, upper - adj.page_size)
if adj.value > max_value:
adj.value = max_value
value_changed = True
if changed:
adj.changed()
if value_changed:
adj.value_changed()
def _set_scroll_adjustments(self, hadj, vadj):
if hadj == None:
hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
if vadj == None:
vadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
if self.hadj_changed_signal_id != None and \
self.hadj != None and hadj != self.hadj:
self.hadj.disconnect (self.hadj_changed_signal_id)
if self.vadj_changed_signal_id != None and \
self.vadj != None and vadj != self.vadj:
self.vadj.disconnect (self.vadj_changed_signal_id)
if hadj != None:
self.hadj = hadj
self._set_adj_upper (self.hadj, self.zoom_ratio * self.chart_width)
self.hadj_changed_signal_id = self.hadj.connect('value-changed', self.on_adjustments_changed)
if vadj != None:
self.vadj = vadj
self._set_adj_upper (self.vadj, self.zoom_ratio * self.chart_height)
self.vadj_changed_signal_id = self.vadj.connect('value-changed', self.on_adjustments_changed)
def on_adjustments_changed(self, adj):
self.x = self.hadj.value / self.zoom_ratio
self.y = self.vadj.value / self.zoom_ratio
self.queue_draw()
def on_position_changed(self, widget, x, y):
self.hadj.value = x * self.zoom_ratio
self.vadj.value = y * self.zoom_ratio
PyBootchartWidget.set_set_scroll_adjustments_signal('set-scroll-adjustments')
class PyBootchartShell(gtk.VBox):
ui = '''
<ui>
<toolbar name="ToolBar">
<toolitem action="Expand"/>
<toolitem action="Contract"/>
<separator/>
<toolitem action="ZoomIn"/>
<toolitem action="ZoomOut"/>
<toolitem action="ZoomFit"/>
<toolitem action="Zoom100"/>
</toolbar>
</ui>
'''
def __init__(self, window, trace, options, xscale):
gtk.VBox.__init__(self)
self.widget = PyBootchartWidget(trace, options, xscale)
# Create a UIManager instance
uimanager = self.uimanager = gtk.UIManager()
# Add the accelerator group to the toplevel window
accelgroup = uimanager.get_accel_group()
window.add_accel_group(accelgroup)
# Create an ActionGroup
actiongroup = gtk.ActionGroup('Actions')
self.actiongroup = actiongroup
# Create actions
actiongroup.add_actions((
('Expand', gtk.STOCK_ADD, None, None, None, self.widget.on_expand),
('Contract', gtk.STOCK_REMOVE, None, None, None, self.widget.on_contract),
('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget.on_zoom_in),
('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget.on_zoom_out),
('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget.on_zoom_fit),
('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget.on_zoom_100),
))
# Add the actiongroup to the uimanager
uimanager.insert_action_group(actiongroup, 0)
# Add a UI description
uimanager.add_ui_from_string(self.ui)
# Scrolled window
scrolled = gtk.ScrolledWindow()
scrolled.add(self.widget)
# toolbar / h-box
hbox = gtk.HBox(False, 8)
# Create a Toolbar
toolbar = uimanager.get_widget('/ToolBar')
hbox.pack_start(toolbar, True, True)
if not options.kernel_only:
# Misc. options
button = gtk.CheckButton("Show more")
button.connect ('toggled', self.widget.show_toggled)
button.set_active(options.app_options.show_all)
hbox.pack_start (button, False, True)
self.pack_start(hbox, False)
self.pack_start(scrolled)
self.show_all()
def grab_focus(self, window):
window.set_focus(self.widget)
class PyBootchartWindow(gtk.Window):
def __init__(self, trace, app_options):
gtk.Window.__init__(self)
window = self
window.set_title("Bootchart %s" % trace.filename)
window.set_default_size(750, 550)
tab_page = gtk.Notebook()
tab_page.show()
window.add(tab_page)
full_opts = RenderOptions(app_options)
full_tree = PyBootchartShell(window, trace, full_opts, 1.0)
tab_page.append_page (full_tree, gtk.Label("Full tree"))
if trace.kernel is not None and len (trace.kernel) > 2:
kernel_opts = RenderOptions(app_options)
kernel_opts.cumulative = False
kernel_opts.charts = False
kernel_opts.kernel_only = True
kernel_tree = PyBootchartShell(window, trace, kernel_opts, 5.0)
tab_page.append_page (kernel_tree, gtk.Label("Kernel boot"))
full_tree.grab_focus(self)
self.show()
def show(trace, options):
win = PyBootchartWindow(trace, options)
win.connect('destroy', gtk.main_quit)
gtk.main()
| wwright2/dcim3-angstrom1 | sources/openembedded-core/scripts/pybootchartgui/pybootchartgui/gui.py | Python | mit | 12,782 | [
"FLEUR"
] | 20482e87b7baf7e5959fb10b1a09d81474e54d78327eb6ec49b0ee0d63fc772c |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import platform
import re
import unittest
import warnings
import random
import sys
from pymatgen import SETTINGS, __version__ as pmg_version
from pymatgen.ext.matproj import MPRester, MPRestError, TaskType
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure, Composition
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.electronic_structure.bandstructure import (
BandStructureSymmLine, BandStructure)
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.analysis.pourbaix_diagram import PourbaixEntry, PourbaixDiagram
from pymatgen.analysis.wulff import WulffShape
from pymatgen.analysis.reaction_calculator import Reaction
from pymatgen.io.cif import CifParser
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
from pymatgen.phonon.dos import CompletePhononDos
from pymatgen.util.testing import PymatgenTest
@unittest.skipIf(not SETTINGS.get("PMG_MAPI_KEY"),
"PMG_MAPI_KEY environment variable not set.")
class MPResterTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
self.rester = MPRester()
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
self.rester.session.close()
def test_get_all_materials_ids_doc(self):
mids = self.rester.get_materials_ids("Al2O3")
random.shuffle(mids)
doc = self.rester.get_doc(mids.pop(0))
self.assertEqual(doc["pretty_formula"], "Al2O3")
def test_get_xas_data(self):
# Test getting XAS data
data = self.rester.get_xas_data("mp-19017", "Li")
self.assertEqual("mp-19017,Li", data['mid_and_el'])
self.assertAlmostEqual(data['spectrum']['x'][0], 55.178, places=2)
self.assertAlmostEqual(data['spectrum']['y'][0], 0.0164634, places=2)
def test_get_data(self):
props = ["energy", "energy_per_atom", "formation_energy_per_atom",
"nsites", "unit_cell_formula", "pretty_formula", "is_hubbard",
"elements", "nelements", "e_above_hull", "hubbards",
"is_compatible", "task_ids",
"density", "icsd_ids", "total_magnetization"]
expected_vals = [-191.3359011, -6.833425039285714, -2.5515769497278913,
28, {'P': 4, 'Fe': 4, 'O': 16, 'Li': 4},
"LiFePO4", True, ['Li', 'O', 'P', 'Fe'], 4, 0.0,
{'Fe': 5.3, 'Li': 0.0, 'O': 0.0, 'P': 0.0}, True,
{'mp-19017', 'mp-540081', 'mp-601412'},
3.464840709092822,
[159107, 154117, 160776, 99860, 181272, 166815,
260571, 92198, 165000, 155580, 38209, 161479, 153699,
260569, 260570, 200155, 260572, 181341, 181342,
72545, 56291, 97764, 162282, 155635],
3.999999999]
for (i, prop) in enumerate(props):
if prop not in ['hubbards', 'unit_cell_formula', 'elements',
'icsd_ids', 'task_ids']:
val = self.rester.get_data("mp-19017", prop=prop)[0][prop]
self.assertAlmostEqual(expected_vals[i], val, 2, "Failed with property %s" % prop)
elif prop in ["elements", "icsd_ids", "task_ids"]:
upstream_vals = set(
self.rester.get_data("mp-19017", prop=prop)[0][prop])
self.assertLessEqual(set(expected_vals[i]), upstream_vals)
else:
self.assertEqual(expected_vals[i],
self.rester.get_data("mp-19017",
prop=prop)[0][prop])
props = ['structure', 'initial_structure', 'final_structure', 'entry']
for prop in props:
obj = self.rester.get_data("mp-19017", prop=prop)[0][prop]
if prop.endswith("structure"):
self.assertIsInstance(obj, Structure)
elif prop == "entry":
obj = self.rester.get_data("mp-19017", prop=prop)[0][prop]
self.assertIsInstance(obj, ComputedEntry)
# Test chemsys search
data = self.rester.get_data('Fe-Li-O', prop='unit_cell_formula')
self.assertTrue(len(data) > 1)
elements = {Element("Li"), Element("Fe"), Element("O")}
for d in data:
self.assertTrue(
set(Composition(d['unit_cell_formula']).elements).issubset(
elements))
self.assertRaises(MPRestError, self.rester.get_data, "Fe2O3",
"badmethod")
# Test getting supported properties
self.assertNotEqual(self.rester.get_task_data("mp-30"), [])
# Test aliasing
data = self.rester.get_task_data("mp-30", "energy")
self.assertAlmostEqual(data[0]["energy"], -4.09929227, places=2)
def test_get_materials_id_from_task_id(self):
self.assertEqual(self.rester.get_materials_id_from_task_id(
"mp-540081"), "mp-19017")
def test_get_materials_id_references(self):
# nosetests pymatgen/matproj/tests/test_matproj.py:MPResterTest.test_get_materials_id_references
m = MPRester()
data = m.get_materials_id_references('mp-123')
self.assertTrue(len(data) > 1000)
def test_find_structure(self):
# nosetests pymatgen/matproj/tests/test_matproj.py:MPResterTest.test_find_structure
m = MPRester()
ciffile = self.TEST_FILES_DIR / 'Fe3O4.cif'
data = m.find_structure(str(ciffile))
self.assertTrue(len(data) > 1)
s = CifParser(ciffile).get_structures()[0]
data = m.find_structure(s)
self.assertTrue(len(data) > 1)
def test_get_entries_in_chemsys(self):
syms = ["Li", "Fe", "O"]
syms2 = "Li-Fe-O"
entries = self.rester.get_entries_in_chemsys(syms)
entries2 = self.rester.get_entries_in_chemsys(syms2)
elements = set([Element(sym) for sym in syms])
for e in entries:
self.assertIsInstance(e, ComputedEntry)
self.assertTrue(set(e.composition.elements).issubset(elements))
e1 = set([i.entry_id for i in entries])
e2 = set([i.entry_id for i in entries2])
self.assertTrue(e1 == e2)
def test_get_structure_by_material_id(self):
s1 = self.rester.get_structure_by_material_id("mp-1")
self.assertEqual(s1.formula, "Cs1")
def test_get_entry_by_material_id(self):
e = self.rester.get_entry_by_material_id("mp-19017")
self.assertIsInstance(e, ComputedEntry)
self.assertTrue(e.composition.reduced_formula, "LiFePO4")
def test_query(self):
criteria = {'elements': {'$in': ['Li', 'Na', 'K'], '$all': ['O']}}
props = ['pretty_formula', 'energy']
data = self.rester.query(
criteria=criteria, properties=props, chunk_size=0)
self.assertTrue(len(data) > 6)
data = self.rester.query(
criteria="*2O", properties=props, chunk_size=0)
self.assertGreaterEqual(len(data), 52)
self.assertIn("Li2O", (d["pretty_formula"] for d in data))
def test_query_chunk_size(self):
criteria = {"nelements": 2, "elements": "O"}
props = ['pretty_formula']
data1 = self.rester.query(
criteria=criteria, properties=props, chunk_size=0)
data2 = self.rester.query(
criteria=criteria, properties=props, chunk_size=500)
self.assertEqual({d['pretty_formula'] for d in data1},
{d['pretty_formula'] for d in data2})
self.assertIn("Al2O3", {d['pretty_formula'] for d in data1})
def test_get_exp_thermo_data(self):
data = self.rester.get_exp_thermo_data("Fe2O3")
self.assertTrue(len(data) > 0)
for d in data:
self.assertEqual(d.formula, "Fe2O3")
def test_get_dos_by_id(self):
dos = self.rester.get_dos_by_material_id("mp-2254")
self.assertIsInstance(dos, CompleteDos)
def test_get_bandstructure_by_material_id(self):
bs = self.rester.get_bandstructure_by_material_id("mp-2254")
self.assertIsInstance(bs, BandStructureSymmLine)
bs_unif = self.rester.get_bandstructure_by_material_id(
"mp-2254", line_mode=False)
self.assertIsInstance(bs_unif, BandStructure)
self.assertNotIsInstance(bs_unif, BandStructureSymmLine)
def test_get_phonon_data_by_material_id(self):
bs = self.rester.get_phonon_bandstructure_by_material_id("mp-661")
self.assertIsInstance(bs, PhononBandStructureSymmLine)
dos = self.rester.get_phonon_dos_by_material_id("mp-661")
self.assertIsInstance(dos, CompletePhononDos)
ddb_str = self.rester.get_phonon_ddb_by_material_id("mp-661")
self.assertIsInstance(ddb_str, str)
def test_get_structures(self):
structs = self.rester.get_structures("Mn3O4")
self.assertTrue(len(structs) > 0)
def test_get_entries(self):
entries = self.rester.get_entries("TiO2")
self.assertTrue(len(entries) > 1)
for e in entries:
self.assertEqual(e.composition.reduced_formula, "TiO2")
entries = self.rester.get_entries("TiO2", inc_structure=True)
self.assertTrue(len(entries) > 1)
for e in entries:
self.assertEqual(e.structure.composition.reduced_formula, "TiO2")
# all_entries = self.rester.get_entries("Fe", compatible_only=False)
# entries = self.rester.get_entries("Fe", compatible_only=True)
# self.assertTrue(len(entries) < len(all_entries))
entries = self.rester.get_entries("Fe", compatible_only=True,
property_data=["cif"])
self.assertIn("cif", entries[0].data)
for e in self.rester.get_entries("CdO2", inc_structure=False):
self.assertIsNotNone(e.data["oxide_type"])
# test if it will retrieve the conventional unit cell of Ni
entry = self.rester.get_entry_by_material_id(
"mp-23", inc_structure=True, conventional_unit_cell=True)
Ni = entry.structure
self.assertEqual(Ni.lattice.a, Ni.lattice.b)
self.assertEqual(Ni.lattice.a, Ni.lattice.c)
self.assertEqual(Ni.lattice.alpha, 90)
self.assertEqual(Ni.lattice.beta, 90)
self.assertEqual(Ni.lattice.gamma, 90)
# Ensure energy per atom is same
primNi = self.rester.get_entry_by_material_id(
"mp-23", inc_structure=True, conventional_unit_cell=False)
self.assertEqual(primNi.energy_per_atom, entry.energy_per_atom)
Ni = self.rester.get_structure_by_material_id(
"mp-23", conventional_unit_cell=True)
self.assertEqual(Ni.lattice.a, Ni.lattice.b)
self.assertEqual(Ni.lattice.a, Ni.lattice.c)
self.assertEqual(Ni.lattice.alpha, 90)
self.assertEqual(Ni.lattice.beta, 90)
self.assertEqual(Ni.lattice.gamma, 90)
# Test case where convs are different from initial and final
# th = self.rester.get_structure_by_material_id(
# "mp-37", conventional_unit_cell=True)
# th_entry = self.rester.get_entry_by_material_id(
# "mp-37", inc_structure=True, conventional_unit_cell=True)
# th_entry_initial = self.rester.get_entry_by_material_id(
# "mp-37", inc_structure="initial", conventional_unit_cell=True)
# self.assertEqual(th, th_entry.structure)
# self.assertEqual(len(th_entry.structure), 4)
# self.assertEqual(len(th_entry_initial.structure), 2)
# Test if the polymorphs of Fe are properly sorted
# by e_above_hull when sort_by_e_above_hull=True
Fe_entries = self.rester.get_entries("Fe", sort_by_e_above_hull=True)
self.assertEqual(Fe_entries[0].data["e_above_hull"], 0)
def test_get_pourbaix_entries(self):
pbx_entries = self.rester.get_pourbaix_entries(["Fe", "Cr"])
for pbx_entry in pbx_entries:
self.assertTrue(isinstance(pbx_entry, PourbaixEntry))
# Ensure entries are pourbaix compatible
pbx = PourbaixDiagram(pbx_entries)
# Try binary system
# pbx_entries = self.rester.get_pourbaix_entries(["Fe", "Cr"])
# pbx = PourbaixDiagram(pbx_entries)
# TODO: Shyue Ping: I do not understand this test. You seem to
# be grabbing Zn-S system, but I don't see proper test for anything,
# including Na ref. This test also takes a long time.
# Test Zn-S, which has Na in reference solids
# pbx_entries = self.rester.get_pourbaix_entries(["Zn", "S"])
def test_get_exp_entry(self):
entry = self.rester.get_exp_entry("Fe2O3")
self.assertEqual(entry.energy, -825.5)
def test_submit_query_delete_snl(self):
s = Structure([[5, 0, 0], [0, 5, 0], [0, 0, 5]], ["Fe"], [[0, 0, 0]])
# d = self.rester.submit_snl(
# [s, s], remarks=["unittest"],
# authors="Test User <test@materialsproject.com>")
# self.assertEqual(len(d), 2)
# data = self.rester.query_snl({"about.remarks": "unittest"})
# self.assertEqual(len(data), 2)
# snlids = [d["_id"] for d in data]
# self.rester.delete_snl(snlids)
# data = self.rester.query_snl({"about.remarks": "unittest"})
# self.assertEqual(len(data), 0)
def test_get_stability(self):
entries = self.rester.get_entries_in_chemsys(["Fe", "O"])
modified_entries = []
for entry in entries:
# Create modified entries with energies that are 0.01eV higher
# than the corresponding entries.
if entry.composition.reduced_formula == "Fe2O3":
modified_entries.append(
ComputedEntry(entry.composition,
entry.uncorrected_energy + 0.01,
parameters=entry.parameters,
entry_id="mod_{}".format(entry.entry_id)))
rest_ehulls = self.rester.get_stability(modified_entries)
all_entries = entries + modified_entries
compat = MaterialsProjectCompatibility()
all_entries = compat.process_entries(all_entries)
pd = PhaseDiagram(all_entries)
for e in all_entries:
if str(e.entry_id).startswith("mod"):
for d in rest_ehulls:
if d["entry_id"] == e.entry_id:
data = d
break
self.assertAlmostEqual(pd.get_e_above_hull(e),
data["e_above_hull"])
def test_get_reaction(self):
rxn = self.rester.get_reaction(["Li", "O"], ["Li2O"])
self.assertIn("Li2O", rxn["Experimental_references"])
def test_get_substrates(self):
substrate_data = self.rester.get_substrates('mp-123', 5, [1, 0, 0])
substrates = [sub_dict['sub_id'] for sub_dict in substrate_data]
self.assertIn("mp-2534", substrates)
def test_get_surface_data(self):
data = self.rester.get_surface_data("mp-126") # Pt
one_surf = self.rester.get_surface_data('mp-129', miller_index=[-2, -3, 1])
self.assertAlmostEqual(one_surf['surface_energy'], 2.99156963, places=2)
self.assertArrayAlmostEqual(one_surf['miller_index'], [3, 2, 1])
self.assertIn("surfaces", data)
surfaces = data["surfaces"]
self.assertTrue(len(surfaces) > 0)
surface = surfaces.pop()
self.assertIn("miller_index", surface)
self.assertIn("surface_energy", surface)
self.assertIn("is_reconstructed", surface)
data_inc = self.rester.get_surface_data("mp-126", inc_structures=True)
self.assertIn("structure", data_inc["surfaces"][0])
def test_get_wulff_shape(self):
ws = self.rester.get_wulff_shape("mp-126")
self.assertTrue(isinstance(ws, WulffShape))
def test_get_cohesive_energy(self):
ecoh = self.rester.get_cohesive_energy("mp-13")
self.assertTrue(ecoh, 5.04543279)
def test_get_gb_data(self):
mo_gbs = self.rester.get_gb_data(chemsys='Mo')
self.assertEqual(len(mo_gbs), 10)
mo_gbs_s5 = self.rester.get_gb_data(pretty_formula='Mo', sigma=5)
self.assertEqual(len(mo_gbs_s5), 3)
mo_s3_112 = self.rester.get_gb_data(material_id='mp-129', sigma=3,
gb_plane=[1, -1, -2],
include_work_of_separation=True)
self.assertEqual(len(mo_s3_112), 1)
gb_f = mo_s3_112[0]['final_structure']
self.assertArrayAlmostEqual(gb_f.rotation_axis, [1, 1, 0])
self.assertAlmostEqual(gb_f.rotation_angle, 109.47122, places=4)
self.assertAlmostEqual(mo_s3_112[0]['gb_energy'], 0.47965, places=2)
self.assertAlmostEqual(mo_s3_112[0]['work_of_separation'], 6.318144, places=2)
self.assertIn("Mo24", gb_f.formula)
hcp_s7 = self.rester.get_gb_data(material_id='mp-87', gb_plane=[0, 0, 0, 1],
include_work_of_separation=True)
self.assertAlmostEqual(hcp_s7[0]['gb_energy'], 1.12, places=2)
self.assertAlmostEqual(hcp_s7[0]['work_of_separation'], 2.47, places=2)
def test_get_interface_reactions(self):
kinks = self.rester.get_interface_reactions("LiCoO2", "Li3PS4")
self.assertTrue(len(kinks) > 0)
kink = kinks[0]
self.assertIn("energy", kink)
self.assertIn("ratio_atomic", kink)
self.assertIn("rxn", kink)
self.assertTrue(isinstance(kink['rxn'], Reaction))
kinks_open_O = self.rester.get_interface_reactions(
"LiCoO2", "Li3PS4", open_el="O", relative_mu=-1)
self.assertTrue(len(kinks_open_O) > 0)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always", message="The reactant.+")
self.rester.get_interface_reactions("LiCoO2", "MnO9")
self.assertTrue("The reactant" in str(w[-1].message))
def test_download_info(self):
material_ids = ['mp-32800', 'mp-23494']
task_types = [TaskType.GGA_OPT, TaskType.GGA_UNIFORM]
file_patterns = ['vasprun*', 'OUTCAR*']
meta, urls = self.rester.get_download_info(
material_ids, task_types=task_types,
file_patterns=file_patterns
)
self.assertEqual(meta, {
'mp-23494': [
{'task_id': 'mp-669929', 'task_type': 'GGA NSCF Uniform'},
{'task_id': 'mp-23494', 'task_type': 'GGA Structure Optimization'},
# for provenance {'task_id': 'mp-688563', 'task_type': 'GGA NSCF Line'},
],
'mp-32800': [
{'task_id': 'mp-739635', 'task_type': 'GGA NSCF Uniform'},
{'task_id': 'mp-32800', 'task_type': 'GGA Structure Optimization'},
# for provenance {'task_id': 'mp-746913', 'task_type': 'GGA NSCF Line'},
]
})
prefix = 'http://labdev-nomad.esc.rzg.mpg.de/fairdi/nomad/mp/api/raw/query?'
# previous test
# ids = 'mp-23494,mp-688563,mp-32800,mp-746913'
ids = 'mp-669929,mp-23494,mp-739635,mp-32800'
self.assertEqual(
urls[0], f'{prefix}file_pattern=vasprun*&file_pattern=OUTCAR*&external_id={ids}'
)
def test_parse_criteria(self):
crit = MPRester.parse_criteria("mp-1234 Li-*")
self.assertIn("Li-O", crit["$or"][1]["chemsys"]["$in"])
self.assertIn({"task_id": "mp-1234"}, crit["$or"])
crit = MPRester.parse_criteria("Li2*")
self.assertIn("Li2O", crit["pretty_formula"]["$in"])
self.assertIn("Li2I", crit["pretty_formula"]["$in"])
self.assertIn("CsLi2", crit["pretty_formula"]["$in"])
crit = MPRester.parse_criteria("Li-*-*")
self.assertIn("Li-Re-Ru", crit["chemsys"]["$in"])
self.assertNotIn("Li-Li", crit["chemsys"]["$in"])
comps = MPRester.parse_criteria("**O3")["pretty_formula"]["$in"]
for c in comps:
self.assertEqual(len(Composition(c)), 3, "Failed in %s" % c)
chemsys = MPRester.parse_criteria("{Fe,Mn}-O")["chemsys"]["$in"]
self.assertEqual(len(chemsys), 2)
comps = MPRester.parse_criteria("{Fe,Mn,Co}O")["pretty_formula"]["$in"]
self.assertEqual(len(comps), 3, comps)
# Let's test some invalid symbols
self.assertRaises(ValueError, MPRester.parse_criteria, "li-fe")
self.assertRaises(ValueError, MPRester.parse_criteria, "LO2")
crit = MPRester.parse_criteria("POPO2")
self.assertIn("P2O3", crit["pretty_formula"]["$in"])
def test_include_user_agent(self):
headers = self.rester.session.headers
self.assertIn("user-agent", headers, msg="Include user-agent header by default")
m = re.match(
r"pymatgen/(\d+)\.(\d+)\.(\d+) \(Python/(\d+)\.(\d)+\.(\d+) ([^\/]*)/([^\)]*)\)",
headers['user-agent'])
self.assertIsNotNone(m, msg="Unexpected user-agent value {}".format(headers['user-agent']))
self.assertEqual(m.groups()[:3], tuple(pmg_version.split(".")))
self.assertEqual(
m.groups()[3:6],
tuple(str(n) for n in (sys.version_info.major, sys.version_info.minor, sys.version_info.micro))
)
self.rester = MPRester(include_user_agent=False)
self.assertNotIn("user-agent", self.rester.session.headers, msg="user-agent header unwanted")
if __name__ == "__main__":
unittest.main()
| fraricci/pymatgen | pymatgen/ext/tests/test_matproj.py | Python | mit | 21,952 | [
"pymatgen"
] | 2eb58bf533c47959ca9264eb2fc53458fbf5c19051f3d7dede69394f33712e2c |
from HTTPFeedApiModule import get_indicators_command, Client, datestring_to_server_format, feed_main,\
fetch_indicators_command, get_no_update_value
import requests_mock
import demistomock as demisto
def test_get_indicators():
with open('test_data/asn_ranges.txt') as asn_ranges_txt:
asn_ranges = asn_ranges_txt.read().encode('utf8')
with requests_mock.Mocker() as m:
itype = 'ASN'
args = {
'indicator_type': itype,
'limit': 35
}
feed_type = {
'https://www.spamhaus.org/drop/asndrop.txt': {
'indicator_type': 'ASN',
'indicator': {
'regex': '^AS[0-9]+'
},
'fields': [
{
'asndrop_country': {
'regex': r'^.*;\W([a-zA-Z]+)\W+',
'transform': r'\1'
}
},
{
'asndrop_org': {
'regex': r'^.*\|\W+(.*)',
'transform': r'\1'
}
}
]
}
}
m.get('https://www.spamhaus.org/drop/asndrop.txt', content=asn_ranges)
client = Client(
url="https://www.spamhaus.org/drop/asndrop.txt",
source_name='spamhaus',
ignore_regex='^;.*',
feed_url_to_config=feed_type
)
args['indicator_type'] = 'ASN'
_, _, raw_json = get_indicators_command(client, args)
for ind_json in raw_json:
ind_val = ind_json.get('value')
ind_type = ind_json.get('type')
ind_rawjson = ind_json.get('rawJSON')
assert ind_val
assert ind_type == itype
assert ind_rawjson['value'] == ind_val
assert ind_rawjson['type'] == ind_type
def test_get_indicators_json_params():
with open('test_data/asn_ranges.txt') as asn_ranges_txt:
asn_ranges = asn_ranges_txt.read().encode('utf8')
with requests_mock.Mocker() as m:
itype = 'ASN'
args = {
'indicator_type': itype,
'limit': 35
}
indicator_json = '''
{
"regex": "^AS[0-9]+"
}
'''
fields_json = r'''
{
"asndrop_country": {
"regex":"^.*;\\W([a-zA-Z]+)\\W+",
"transform":"\\1"
},
"asndrop_org": {
"regex":"^.*\\|\\W+(.*)",
"transform":"\\1"
}
}
'''
m.get('https://www.spamhaus.org/drop/asndrop.txt', content=asn_ranges)
client = Client(
url="https://www.spamhaus.org/drop/asndrop.txt",
source_name='spamhaus',
ignore_regex='^;.*',
indicator=indicator_json,
fields=fields_json,
indicator_type='ASN'
)
args['indicator_type'] = 'ASN'
_, _, raw_json = get_indicators_command(client, args)
for ind_json in raw_json:
ind_val = ind_json.get('value')
ind_type = ind_json.get('type')
ind_rawjson = ind_json.get('rawJSON')
assert ind_val
assert ind_type == itype
assert ind_rawjson['value'] == ind_val
assert ind_rawjson['type'] == ind_type
def test_custom_fields_creator():
custom_fields_mapping = {
"old_field1": "new_field1",
"old_field2": "new_field2"
}
client = Client(
url="https://www.spamhaus.org/drop/asndrop.txt",
feed_url_to_config="some_stuff",
custom_fields_mapping=custom_fields_mapping
)
attributes = {
'old_field1': "value1",
'old_field2': "value2"
}
custom_fields = client.custom_fields_creator(attributes)
assert custom_fields.get('new_field1') == "value1"
assert custom_fields.get('new_field2') == "value2"
assert "old_field1" not in custom_fields.keys()
assert "old_filed2" not in custom_fields.keys()
def test_datestring_to_server_format():
"""
Given
- A string represting a date.
When
- running datestring_to_server_format on the date.
Then
- Ensure the datestring is converted to the ISO-8601 format.
"""
datestring1 = "2020-02-10 13:39:14"
datestring2 = "2020-02-10T13:39:14"
datestring3 = "2020-02-10 13:39:14.123"
datestring4 = "2020-02-10T13:39:14.123"
datestring5 = "2020-02-10T13:39:14Z"
datestring6 = "2020-11-01T04:16:13-04:00"
assert '2020-02-10T13:39:14Z' == datestring_to_server_format(datestring1)
assert '2020-02-10T13:39:14Z' == datestring_to_server_format(datestring2)
assert '2020-02-10T13:39:14Z' == datestring_to_server_format(datestring3)
assert '2020-02-10T13:39:14Z' == datestring_to_server_format(datestring4)
assert '2020-02-10T13:39:14Z' == datestring_to_server_format(datestring5)
assert '2020-11-01T08:16:13Z' == datestring_to_server_format(datestring6)
def test_get_feed_config():
custom_fields_mapping = {
"old_field1": "new_field1",
"old_field2": "new_field2"
}
client = Client(
url="https://www.spamhaus.org/drop/asndrop.txt",
feed_url_to_config="some_stuff",
custom_fields_mapping=custom_fields_mapping
)
# Check that if an empty .get_feed_config is called, an empty dict returned
assert {} == client.get_feed_config()
def test_feed_main_fetch_indicators(mocker, requests_mock):
"""
Given
- Parameters (url, ignore_regex, feed_url_to_config and tags) to configure a feed.
When
- Fetching indicators.
Then
- Ensure createIndicators is called with 466 indicators to fetch.
- Ensure one of the indicators is fetched as expected.
"""
feed_url = 'https://www.spamhaus.org/drop/asndrop.txt'
indicator_type = 'ASN'
tags = 'tag1,tag2'
tlp_color = 'AMBER'
feed_url_to_config = {
'https://www.spamhaus.org/drop/asndrop.txt': {
'indicator_type': indicator_type,
'indicator': {
'regex': '^AS[0-9]+'
},
'fields': [
{
'asndrop_country': {
'regex': r'^.*;\W([a-zA-Z]+)\W+',
'transform': r'\1'
}
},
{
'asndrop_org': {
'regex': r'^.*\|\W+(.*)',
'transform': r'\1'
}
}
]
}
}
mocker.patch.object(
demisto, 'params',
return_value={
'url': feed_url,
'ignore_regex': '^;.*',
'feed_url_to_config': feed_url_to_config,
'feedTags': tags,
'tlp_color': tlp_color
}
)
mocker.patch.object(demisto, 'command', return_value='fetch-indicators')
mocker.patch.object(demisto, 'createIndicators')
with open('test_data/asn_ranges.txt') as asn_ranges_txt:
asn_ranges = asn_ranges_txt.read().encode('utf8')
requests_mock.get(feed_url, content=asn_ranges)
feed_main('great_feed_name')
# verify createIndicators was called with 466 indicators
assert demisto.createIndicators.call_count == 1
indicators = demisto.createIndicators.call_args[0][0]
assert len(indicators) == 466
# verify one of the expected indicators
assert {
'rawJSON': {
'asndrop_country': 'US',
'asndrop_org': 'LAKSH CYBERSECURITY AND DEFENSE LLC',
'tags': tags.split(','),
'trafficlightprotocol': 'AMBER',
'type': indicator_type,
'value': 'AS397539'
},
'type': indicator_type,
'value': 'AS397539',
'fields': {'tags': ['tag1', 'tag2'], 'trafficlightprotocol': 'AMBER'}
} in indicators
def test_feed_main_test_module(mocker, requests_mock):
"""
Given
- Parameters (url, ignore_regex, feed_url_to_config and tags) to configure a feed.
When
- Running test-module (clicking on Test).
Then
- Ensure 'ok' is returned.
"""
feed_url = 'https://www.spamhaus.org/drop/asndrop.txt'
indicator_type = 'ASN'
tags = 'tag1,tag2'
tlp_color = 'AMBER'
feed_url_to_config = {
'https://www.spamhaus.org/drop/asndrop.txt': {
'indicator_type': indicator_type,
'indicator': {
'regex': '^AS[0-9]+'
},
'fields': [
{
'asndrop_country': {
'regex': r'^.*;\W([a-zA-Z]+)\W+',
'transform': r'\1'
}
},
{
'asndrop_org': {
'regex': r'^.*\|\W+(.*)',
'transform': r'\1'
}
}
]
}
}
mocker.patch.object(
demisto, 'params',
return_value={
'url': feed_url,
'ignore_regex': '^;.*',
'feed_url_to_config': feed_url_to_config,
'feedTags': tags,
'tlp_color': tlp_color
}
)
mocker.patch.object(demisto, 'command', return_value='test-module')
mocker.patch.object(demisto, 'results')
with open('test_data/asn_ranges.txt') as asn_ranges_txt:
asn_ranges = asn_ranges_txt.read().encode('utf8')
requests_mock.get(feed_url, content=asn_ranges)
feed_main('great_feed_name')
assert demisto.results.call_count == 1
results = demisto.results.call_args[0][0]
assert results['HumanReadable'] == 'ok'
def test_get_indicators_with_relations():
"""
Given:
- feed url config including relations values
When:
- Fetching indicators
- create_relationships param is set to True
Then:
- Validate the returned list of indicators return relationships.
"""
feed_url_to_config = {
'https://www.spamhaus.org/drop/asndrop.txt': {
"indicator_type": 'IP',
"indicator": {
"regex": r"^.+,\"?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\"?",
"transform": "\\1"
},
'relationship_name': 'indicator-of',
'relationship_entity_b_type': 'STIX Malware',
"fields": [{
'firstseenbysource': {
"regex": r"^(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})",
"transform": "\\1"
},
"port": {
"regex": r"^.+,.+,(\d{1,5}),",
"transform": "\\1"
},
"updatedate": {
"regex": r"^.+,.+,.+,(\d{4}-\d{2}-\d{2})",
"transform": "\\1"
},
"malwarefamily": {
"regex": r"^.+,.+,.+,.+,(.+)",
"transform": "\\1"
},
"relationship_entity_b": {
"regex": r"^.+,.+,.+,.+,\"(.+)\"",
"transform": "\\1"
}
}],
}
}
expected_res = ([{'value': '127.0.0.1', 'type': 'IP',
'rawJSON': {'malwarefamily': '"Test"', 'relationship_entity_b': 'Test', 'value': '127.0.0.1',
'type': 'IP', 'tags': []},
'relationships': [
{'name': 'indicator-of', 'reverseName': 'indicated-by', 'type': 'IndicatorToIndicator',
'entityA': '127.0.0.1', 'entityAFamily': 'Indicator', 'entityAType': 'IP',
'entityB': 'Test',
'entityBFamily': 'Indicator', 'entityBType': 'STIX Malware', 'fields': {}}],
'fields': {'tags': []}}], True)
asn_ranges = '"2021-01-17 07:44:49","127.0.0.1","3889","online","2021-04-22","Test"'
with requests_mock.Mocker() as m:
m.get('https://www.spamhaus.org/drop/asndrop.txt', content=asn_ranges.encode('utf-8'))
client = Client(
url="https://www.spamhaus.org/drop/asndrop.txt",
source_name='spamhaus',
ignore_regex='^;.*',
feed_url_to_config=feed_url_to_config,
indicator_type='ASN'
)
indicators = fetch_indicators_command(client, feed_tags=[], tlp_color=[], itype='IP', auto_detect=False,
create_relationships=True)
assert indicators == expected_res
def test_get_indicators_without_relations():
"""
Given:
- feed url config including relations values
When:
- Fetching indicators
- create_relationships param is set to False
Then:
- Validate the returned list of indicators dont return relationships.
"""
feed_url_to_config = {
'https://www.spamhaus.org/drop/asndrop.txt': {
"indicator_type": 'IP',
"indicator": {
"regex": r"^.+,\"?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\"?",
"transform": "\\1"
},
'relationship_name': 'indicator-of',
'relationship_entity_b_type': 'STIX Malware',
"fields": [{
'firstseenbysource': {
"regex": r"^(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})",
"transform": "\\1"
},
"port": {
"regex": r"^.+,.+,(\d{1,5}),",
"transform": "\\1"
},
"updatedate": {
"regex": r"^.+,.+,.+,(\d{4}-\d{2}-\d{2})",
"transform": "\\1"
},
"malwarefamily": {
"regex": r"^.+,.+,.+,.+,(.+)",
"transform": "\\1"
},
"relationship_entity_b": {
"regex": r"^.+,.+,.+,.+,\"(.+)\"",
"transform": "\\1"
}
}],
}
}
expected_res = ([{'value': '127.0.0.1', 'type': 'IP',
'rawJSON': {'malwarefamily': '"Test"', 'relationship_entity_b': 'Test', 'value': '127.0.0.1',
'type': 'IP', 'tags': []},
'fields': {'tags': []}}], True)
asn_ranges = '"2021-01-17 07:44:49","127.0.0.1","3889","online","2021-04-22","Test"'
with requests_mock.Mocker() as m:
m.get('https://www.spamhaus.org/drop/asndrop.txt', content=asn_ranges.encode('utf-8'))
client = Client(
url="https://www.spamhaus.org/drop/asndrop.txt",
source_name='spamhaus',
ignore_regex='^;.*',
feed_url_to_config=feed_url_to_config,
indicator_type='ASN'
)
indicators = fetch_indicators_command(client, feed_tags=[], tlp_color=[], itype='IP', auto_detect=False,
create_relationships=False)
assert indicators == expected_res
def test_get_no_update_value(mocker):
"""
Given
- response with last_modified and etag headers with the same values like in the integration context.
When
- Running get_no_update_value method.
Then
- Ensure that the response is False
"""
mocker.patch.object(demisto, 'debug')
class MockResponse:
headers = {'Last-Modified': 'Fri, 30 Jul 2021 00:24:13 GMT', # guardrails-disable-line
'ETag': 'd309ab6e51ed310cf869dab0dfd0d34b'} # guardrails-disable-line
status_code = 200
no_update = get_no_update_value(MockResponse(), 'https://www.spamhaus.org/drop/asndrop.txt')
assert not no_update
assert demisto.debug.call_args[0][0] == 'New indicators fetched - the Last-Modified value has been updated,' \
' createIndicators will be executed with noUpdate=False.'
def test_build_iterator_not_modified_header(mocker):
"""
Given
- response with status code 304(Not Modified)
When
- Running build_iterator method.
Then
- Ensure that the results are empty and No_update value is True.
"""
mocker.patch.object(demisto, 'debug')
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.5.0"})
with requests_mock.Mocker() as m:
m.get('https://api.github.com/meta', status_code=304)
client = Client(
url='https://api.github.com/meta'
)
result = client.build_iterator()
assert result
assert result[0]['https://api.github.com/meta']
assert list(result[0]['https://api.github.com/meta']['result']) == []
assert result[0]['https://api.github.com/meta']['no_update']
assert demisto.debug.call_args[0][0] == 'No new indicators fetched, ' \
'createIndicators will be executed with noUpdate=True.'
def test_build_iterator_with_version_6_2_0(mocker):
"""
Given
- server version 6.2.0
When
- Running build_iterator method.
Then
- Ensure that the no_update value is True
- Request is called without headers "If-None-Match" and "If-Modified-Since"
"""
mocker.patch.object(demisto, 'debug')
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
with requests_mock.Mocker() as m:
m.get('https://api.github.com/meta', status_code=304)
client = Client(
url='https://api.github.com/meta',
headers={}
)
result = client.build_iterator()
assert result[0]['https://api.github.com/meta']['no_update']
assert list(result[0]['https://api.github.com/meta']['result']) == []
assert 'If-None-Match' not in client.headers
assert 'If-Modified-Since' not in client.headers
def test_get_no_update_value_without_headers(mocker):
"""
Given
- response without last_modified and etag headers.
When
- Running get_no_update_value.
Then
- Ensure that the response is False.
"""
mocker.patch.object(demisto, 'debug')
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.5.0"})
class MockResponse:
headers = {}
status_code = 200
no_update = get_no_update_value(MockResponse(), 'https://www.spamhaus.org/drop/asndrop.txt')
assert not no_update
assert demisto.debug.call_args[0][0] == 'Last-Modified and Etag headers are not exists,' \
'createIndicators will be executed with noUpdate=False.'
| demisto/content | Packs/ApiModules/Scripts/HTTPFeedApiModule/HTTPFeedApiModule_test.py | Python | mit | 18,734 | [
"Amber"
] | 62fa72fce6e07341ec4c84cd3df728ab7218973480d0d3f27805139cc2efd1ac |
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Config.py,v 1.9 2004/01/31 04:20:05 warnes Exp $'
from version import __version__
import copy, socket
from types import *
from NS import NS
################################################################################
# Configuration class
################################################################################
class SOAPConfig:
__readonly = ('SSLserver', 'SSLclient', 'GSIserver', 'GSIclient')
def __init__(self, config = None, **kw):
d = self.__dict__
if config:
if not isinstance(config, SOAPConfig):
raise AttributeError, \
"initializer must be SOAPConfig instance"
s = config.__dict__
for k, v in s.items():
if k[0] != '_':
d[k] = v
else:
# Setting debug also sets returnFaultInfo,
# dumpHeadersIn, dumpHeadersOut, dumpSOAPIn, and dumpSOAPOut
self.debug = 0
self.dumpFaultInfo = 1
# Setting namespaceStyle sets typesNamespace, typesNamespaceURI,
# schemaNamespace, and schemaNamespaceURI
self.namespaceStyle = '1999'
self.strictNamespaces = 0
self.typed = 1
self.buildWithNamespacePrefix = 1
self.returnAllAttrs = 0
# Strict checking of range for floats and doubles
self.strict_range = 0
# Default encoding for dictionary keys
self.dict_encoding = 'ascii'
# New argument name handling mechanism. See
# README.MethodParameterNaming for details
self.specialArgs = 1
# If unwrap_results=1 and there is only element in the struct,
# SOAPProxy will assume that this element is the result
# and return it rather than the struct containing it.
# Otherwise SOAPproxy will return the struct with all the
# elements as attributes.
self.unwrap_results = 1
# Automatically convert SOAP complex types, and
# (recursively) public contents into the corresponding
# python types. (Private subobjects have names that start
# with '_'.)
#
# Conversions:
# - faultType --> raise python exception
# - arrayType --> array
# - compoundType --> dictionary
#
self.simplify_objects = 0
# Per-class authorization method. If this is set, before
# calling a any class method, the specified authorization
# method will be called. If it returns 1, the method call
# will proceed, otherwise the call will throw with an
# authorization error.
self.authMethod = None
# Globus Support if pyGlobus.io available
try:
from pyGlobus import io;
d['GSIserver'] = 1
d['GSIclient'] = 1
except:
d['GSIserver'] = 0
d['GSIclient'] = 0
# Server SSL support if M2Crypto.SSL available
try:
from M2Crypto import SSL
d['SSLserver'] = 1
except:
d['SSLserver'] = 0
# Client SSL support if socket.ssl available
try:
from socket import ssl
d['SSLclient'] = 1
except:
d['SSLclient'] = 0
for k, v in kw.items():
if k[0] != '_':
setattr(self, k, v)
def __setattr__(self, name, value):
if name in self.__readonly:
raise AttributeError, "readonly configuration setting"
d = self.__dict__
if name in ('typesNamespace', 'typesNamespaceURI',
'schemaNamespace', 'schemaNamespaceURI'):
if name[-3:] == 'URI':
base, uri = name[:-3], 1
else:
base, uri = name, 0
if type(value) == StringType:
if NS.NSMAP.has_key(value):
n = (value, NS.NSMAP[value])
elif NS.NSMAP_R.has_key(value):
n = (NS.NSMAP_R[value], value)
else:
raise AttributeError, "unknown namespace"
elif type(value) in (ListType, TupleType):
if uri:
n = (value[1], value[0])
else:
n = (value[0], value[1])
else:
raise AttributeError, "unknown namespace type"
d[base], d[base + 'URI'] = n
try:
d['namespaceStyle'] = \
NS.STMAP_R[(d['typesNamespace'], d['schemaNamespace'])]
except:
d['namespaceStyle'] = ''
elif name == 'namespaceStyle':
value = str(value)
if not NS.STMAP.has_key(value):
raise AttributeError, "unknown namespace style"
d[name] = value
n = d['typesNamespace'] = NS.STMAP[value][0]
d['typesNamespaceURI'] = NS.NSMAP[n]
n = d['schemaNamespace'] = NS.STMAP[value][1]
d['schemaNamespaceURI'] = NS.NSMAP[n]
elif name == 'debug':
d[name] = \
d['returnFaultInfo'] = \
d['dumpHeadersIn'] = \
d['dumpHeadersOut'] = \
d['dumpSOAPIn'] = \
d['dumpSOAPOut'] = value
else:
d[name] = value
Config = SOAPConfig()
| 2ndy/RaspIM | usr/share/python-support/python-soappy/SOAPpy/Config.py | Python | gpl-2.0 | 7,423 | [
"Brian"
] | b77a3745f9d05ac7dc6c26804f49c79946626da4015b545eb40c2492e4f7eb54 |
# copyright 2004-2005 Samuele Pedroni
import sys
from java_lexer import Token
import java_parser
import java_nodes
from simpleprettyspec import simplepretty,p_space,p_nl,p_repeat,p_dependent
from simpleprettyspec import p_indent,p_dedent,p_morep,P_LAST,P_EFFECT
from simpleprettyspec import p_expect_skip
class IndentPrinter:
clashes = ()
def __init__(self,output):
self.out = output
self.col = 0
self.last = '\n'
self.soft_nl = 0
self.indent_level = [0]
self.indent_step = 4
def write(self,text):
if self.last == '\n':
self.out.write(' '*self.col)
self.out.write(text)
self.col += len(text)
self.last = text
def linehome(self):
return self.last == '\n'
def nl(self,soft=0):
if self.last != '\n' or not self.soft_nl:
self.out.write('\n')
self.soft_nl = soft
self.col = self.indent_level[-1]
self.last = '\n'
def emit(self,text):
if not text: return
if text.isspace():
if self.last in " \n":
return
self.write(" ")
else:
tail = self.last[-1]
head = text[0]
if (tail+head in self.clashes or
(tail.isalnum() or tail in "$_") and
(head.isalnum() or head in "$_`")): # !!! factor out `
self.write(" ")
self.write(text)
def here(self,delta=0):
self.indent_level.append(self.col+delta)
def indent(self):
assert self.last == '\n'
self.col = self.indent_level[-1] + self.indent_step
self.indent_level.append(self.col)
def dedent(self):
self.indent_level.pop()
self.col = self.indent_level[-1]
class substs:
def __init__(self, dic):
self.dic = dic
def __getitem__(self, key):
try:
return self.dic[key]
except KeyError:
return "<%s>" % key
class JavaPrinter(IndentPrinter):
clashes = ("//","/*","++","--")
def __init__(self,output):
IndentPrinter.__init__(self,output)
def space(self):
self.emit(" ")
def vertical_space(self,delta,soft=0):
if delta > 1:
self.nl()
self.nl(soft)
def emit_comment(self,comment, subst): # doesn't write final \n!
col = comment.col
comment = comment.value
if subst is not None:
comment = comment % substs(subst)
lines = comment.split('\n')
first = lines[0]
self.write(first)
lines = lines[1:]
tostrip = col
prefix = ' '*col
for line in lines:
if not line.startswith(prefix):
tostrip = 0
for line in lines:
self.nl()
self.write(line[tostrip:])
def emit_horizontal_comments(self,comments, subst):
# space + space separated comments...
for comment in comments:
self.space()
self.emit_comment(comment, subst)
def emit_vertical_comments(self,comments, subst):
end = comments[0].end
self.emit_comment(comments[0], subst)
for comment in comments[1:]:
self.vertical_space(comment.start - end)
end = comment.end
self.emit_comment(comment, subst)
def emit_infront_comments(self,tok, subst):
if not self.linehome():
self.nl()
comments = tok.infront_comments
self.emit_vertical_comments(comments, subst)
self.vertical_space(tok.lineno - comments[-1].end)
def emit_attached_comments(self,tok, subst):
comments = tok.attached_comments
start = comments[0].start
horizontal = 1 # all on one line
for comment in comments:
if comment.end != start:
horizontal = 0
break
if horizontal:
self.emit_horizontal_comments(comments, subst)
else:
self.space()
self.here()
self.emit_vertical_comments(comments, subst)
self.dedent()
if comments[-1].value.startswith("//"): # // needs newline
delta = 1
else:
delta = 0
delta = max(delta,tok.attached_line_delta)
if delta == 0:
self.space()
else:
self.vertical_space(delta,soft=1)
def emit_tok(self,tok,ctl='ika', subst=None):
# ctl: i=>infront k=>token a=>attached
if 'i' in ctl and tok.infront_comments:
self.emit_infront_comments(tok, subst)
if 'k' in ctl:
self.emit(tok.value)
if 'a' in ctl and tok.attached_comments:
self.emit_attached_comments(tok, subst)
# ~~~
def hierarchy(cl):
hier = [cl.__name__]
while cl.__bases__:
assert len(cl.__bases__) == 1
cl = cl.__bases__[0]
hier.append(cl.__name__)
return hier
class NodeVisitTracker: # !!! polish
def __init__(self,visitor,node,left=None,right=None):
self.node = node
self.visitor = visitor
self.children = node.children
self.index = 0
self.stop = len(self.children)
self.left,self.right = left,right
def parent(self):
return self.node
def prev(self):
index = self.index
if index == 0:
return self.left
else:
return self.children[index-1]
def cur(self):
return self.children[self.index]
def next(self):
index = self.index
if index == self.stop-1:
return self.right
else:
return self.children[index+1]
def flag(self,name):
return self.flags.get(name,0)
def expect_skip(self):
self.index = -1
def skip(self):
# !!! but comments?
self.index += 1
def visit_cur_forward(self,**flags):
self.flags = flags
r = self.visitor.visit(self.children[self.index],self)
self.index += 1
return r
def exhausted(self):
return self.stop == self.index
def go(self):
r = 0
stop = self.stop
visit = self.visitor.visit
children = self.children
index = self.index
while index != stop:
self.flags = {}
r |= visit(children[index], self)
index = self.index = self.index + 1
return r
class JavaPretty:
def __init__(self,output=sys.stdout):
self.printer = JavaPrinter(output)
# shortcuts
self.nl = self.printer.nl
self.indent = self.printer.indent
self.dedent = self.printer.dedent
self.space = self.printer.space
self._separators = []
self._disp_cache = {}
def visit(self,node, ctxt=None):
cl = node.__class__
try:
before, do_visit, after = self._disp_cache[cl.__name__]
except KeyError:
hier = hierarchy(cl)
for name in hier:
do_visit = getattr(self,'visit_%s' % name,None)
if do_visit is not None:
break
else:
do_visit = self.default_visit
after = []
for name in hier:
after_meth = getattr(self,'after_%s' % name,None)
if after_meth is not None:
after.append(after_meth)
hier.reverse()
before = []
for name in hier:
before_meth = getattr(self,'before_%s' % name,None)
if before_meth is not None:
before.append(before_meth)
self._disp_cache[cl.__name__] = before, do_visit, after
for meth in before:
meth(node,ctxt)
r = do_visit(node,ctxt)
for meth in after:
meth(node,ctxt)
if r is None:
return 1
return r
def push_separator(self,tok):
self._separators.append(tok)
def pop_separator(self):
tok = self._separators.pop()
if tok is not None:
self.printer.emit_tok(tok,'ia')
def emit_tok(self,tok,ctl='ika', subst=None):
if 'k' in ctl:
seps = self._separators
for i in range(len(seps)-1,-1,-1):
if seps[i] is not None:
self.printer.emit_tok(seps[i])
seps[i] = None
else:
break
self.printer.emit_tok(tok,ctl, subst)
def default_visit(self,node,ctxt):
if node is None: return
if isinstance(node,Token):
self.emit_tok(node)
return
return NodeVisitTracker(self,node).go()
# specialized cases
# !!! Assigment: space = space
def visit_Expressions(self,node,ctxt):
tracker = NodeVisitTracker(self,node)
if tracker.exhausted(): return 0
prev = 0
last = tracker.visit_cur_forward()
while not tracker.exhausted():
tok = tracker.cur()
tracker.skip()
prev |= last
if prev:
self.push_separator(tok)
else:
self.emit_tok(tok,'ia')
last = tracker.visit_cur_forward()
if prev:
self.pop_separator()
return last|prev
visit_FormalParameterList = visit_Expressions
def visit_ClassCreator(self,node,ctxt):
tracker = NodeVisitTracker(self,node)
tracker.visit_cur_forward()
tracker.visit_cur_forward()
if node.ClassBodyOpt:
self.printer.here(+1)
tracker.visit_cur_forward()
self.dedent()
def visit_ArrayCreator(self,node,ctxt):
tracker = NodeVisitTracker(self,node)
tracker.visit_cur_forward()
tracker.visit_cur_forward()
if node.has('ArrayInitializer'):
self.space()
tracker.visit_cur_forward()
def before_Block(self,node,ctxt):
if ctxt and isinstance(ctxt.parent(),java_parser.TakesBlock):
self.space()
def after_Block(self,node,ctxt):
if (ctxt and isinstance(ctxt.parent(),java_parser.TakesBlock)
and ctxt.flag('chain')):
self.space()
visit_Block = simplepretty[
'{',p_nl,p_indent,
'BlockStatements',
p_dependent[p_nl],
p_dedent,'}']
visit_BlockStatements = simplepretty[P_LAST,
p_repeat[p_dependent[p_nl],
'BlockStatement',]]
def before_Statement(self,node,ctxt):
if isinstance(ctxt.parent(),java_parser.TakesBlock):
self.nl()
self.indent()
def after_Statement(self,node,ctxt):
if isinstance(ctxt.parent(),java_parser.TakesBlock):
self.dedent()
if ctxt.flag('chain'):
self.nl()
def visit_IfStatement(self,node,ctxt):
tracker = NodeVisitTracker(self,node)
tracker.visit_cur_forward()
self.space()
tracker.visit_cur_forward()
tracker.visit_cur_forward()
def visit_IfElseStatement(self,node,ctxt):
tracker = NodeVisitTracker(self,node)
tracker.visit_cur_forward()
self.space()
tracker.visit_cur_forward()
tracker.visit_cur_forward(chain=1)
tracker.visit_cur_forward()
tracker.visit_cur_forward()
def visit_SwitchStatement(self,node,ctxt):
tracker = NodeVisitTracker(self,node)
tracker.visit_cur_forward()
self.space()
tracker.visit_cur_forward()
self.space()
tracker.visit_cur_forward()
self.nl()
tracker.visit_cur_forward()
tracker.visit_cur_forward()
visit_SwitchBlockStatementGroup = simplepretty[
'SwitchLabel',
p_nl,p_indent,
'BlockStatements',
p_dependent[p_nl],
p_dedent]
def visit_SwitchLabel(self,node,ctxt):
if node.has('CASE'):
tracker = NodeVisitTracker(self,node)
tracker.visit_cur_forward()
self.space()
tracker.visit_cur_forward()
tracker.visit_cur_forward()
else:
self.default_visit(node,ctxt)
def visit_TryStatement(self,node,ctxt):
tracker = NodeVisitTracker(self,node)
tracker.visit_cur_forward()
self.space()
while not tracker.exhausted():
tracker.visit_cur_forward()
def visit_CatchClause(self,node,ctxt):
tracker = NodeVisitTracker(self,node)
self.space()
tracker.visit_cur_forward()
self.space()
tracker.visit_cur_forward()
tracker.visit_cur_forward()
tracker.visit_cur_forward()
self.space()
tracker.visit_cur_forward()
visit_FinallyClause = simplepretty[p_space,"finally",p_space,"Block"]
visit_ClassBody = simplepretty[p_space,'{',
p_nl,p_indent,p_nl,
'ClassBodyDeclarations',
p_dependent[p_nl,p_nl],
p_dedent,'}']
visit_InterfaceBody = simplepretty[p_space,'{',
p_nl,p_indent,p_nl,
'InterfaceBodyDeclarations',
p_dependent[p_nl,p_nl],
p_dedent,'}']
visit_ClassBodyDeclarations = simplepretty[P_LAST,
p_repeat[p_dependent[p_nl,p_nl],'ClassBodyDeclaration',
]]
visit_InterfaceBodyDeclarations = simplepretty[P_LAST,
p_repeat[p_dependent[p_nl,p_nl],'InterfaceBodyDeclaration',
]]
def visit_InitBody(self,node,ctxt):
tracker = NodeVisitTracker(self,node)
if node.has('STATIC'):
tracker.visit_cur_forward()
self.space()
tracker.visit_cur_forward()
visit_CompilationUnit = simplepretty[
'PackageClauseOpt',
p_dependent[p_nl,p_nl],
'ImportDeclarations',
p_dependent[p_nl,p_nl],
'TypeDeclarations',
]
visit_ImportDeclarations = simplepretty[P_LAST,
p_repeat[p_dependent[p_nl],'ImportDeclaration',
]]
visit_TypeDeclarations = simplepretty[P_LAST,
p_repeat[p_dependent[p_nl,p_nl],'TypeDeclaration',
]]
def pretty(source,start='Statement',output=None):
if isinstance(source,java_parser.Node):
ast = source
else:
ast = java_parser.parse(source,start)
if output is None:
JavaPretty().visit(ast)
else:
JavaPretty(output).visit(ast)
# Statement
TEST_STUFF = """
{
x = xeon(1,2,3);
`a `([`x],[`y,`z]);
}
"""
TEST_CREATOR = """
{
x = xeon(new Beh()) +3 ;
x = xeon(new Beh() {
public void evoke() {} }) + 2;
x = new Object[][] { {a,b},
{c,d} };
}
"""
TEST_IF = """
{ if (cond) A(); if(cond) {} if (cond) { A(); }
if (cond) { A(); { B(); } }
if (cond) A(); else B(); if(cond) {} else {} if (cond) { A(); } else B();
if (cond) { A(); { B(); } } else B(); } """
TEST_SWITCH = """
{ if(cond) switch(x) {}
switch(x) { case 0: A(); }
switch(x) { default: A(); }
switch(x) { case +1: default: { A(); } }
switch(x) { case 0: A(); case 1: default: { A(); } }
switch(x) { case 0: A(); case 1: case 2: { } }
switch(x) { default: { A(); } }
} """
TEST_TRY = """
{
try { a(); } finally {}
try { a(); } catch(E e) {}
try { a(); } catch(E e) {} finally {}
try { a(); } catch(E e) {} catch(F e) {} finally {}
}
"""
# other
TEST_COMPUNIT0 = 'CompilationUnit',"""
package a;
import b.B;
import c.*;
"""
TEST_COMPUNIT = 'CompilationUnit',"""
package a;
import b.B;
import c.*;
public class Y {
}
class X {
}
"""
TEST_COMPUNIT1 = 'CompilationUnit',"""
package a;
public class Y {
}
"""
TEST_METH = 'MethodDecl',"""
public void method(int x,int y) {
} """
TEST_CLASS = 'ClassDeclaration',"""
public class B extends C {
public int a,b;
public abstract method x();
public method y(int[] a) {
}
public class Z { }
{
while(false) {}
x();
}
static {
X();
}
}"""
def test():
for name,test in globals().items():
if name.startswith('TEST_'):
print name
if isinstance(test,tuple):
start, source = test
pretty(source,start)
else:
pretty(test)
| alvin319/CarnotKE | jyhton/src/templates/java_pretty.py | Python | apache-2.0 | 16,654 | [
"VisIt"
] | 184da53329c951473c32c389f6acb474fc9725b13727544d929c2ea7d83238c7 |
#!/usr/bin/env python
"""
Framework to start a simulated vehicle and connect it to MAVProxy.
Peter Barker, April 2016
based on sim_vehicle.sh by Andrew Tridgell, October 2011
"""
import atexit
import getpass
import optparse
import os
import os.path
import signal
import subprocess
import sys
import tempfile
import time
import shlex
# List of open terminal windows for macosx
windowID = []
class CompatError(Exception):
"""A custom exception class to hold state if we encounter the parse error we are looking for"""
def __init__(self, error, opts, rargs):
Exception.__init__(self, error)
self.opts = opts
self.rargs = rargs
class CompatOptionParser(optparse.OptionParser):
"""An option parser which emulates the behaviour of the old sim_vehicle.sh; if passed -C, the first argument not understood starts a list of arguments that are passed straight to mavproxy"""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, **kwargs)
def error(self, error):
"""Override default error handler called by optparse.OptionParser.parse_args when a parse error occurs; raise a detailed exception which can be caught"""
if error.find("no such option") != -1:
raise CompatError(error, self.values, self.rargs)
optparse.OptionParser.error(self, error)
def parse_args(self, args=None, values=None):
"""Wrap parse_args so we can catch the exception raised upon discovering the known parameter parsing error"""
try:
opts, args = optparse.OptionParser.parse_args(self)
except CompatError as e:
if not e.opts.sim_vehicle_sh_compatible:
print(e)
print("Perhaps you want --sim_vehicle_sh_compatible (-C)?")
sys.exit(1)
if e.opts.mavproxy_args:
print("--mavproxy-args not permitted in compat mode")
sys.exit(1)
args = []
opts = e.opts
mavproxy_args = [str(e)[16:]] # this trims "no such option" off
mavproxy_args.extend(e.rargs)
opts.ensure_value("mavproxy_args", " ".join(mavproxy_args))
return opts, args
def cygwin_pidof(proc_name):
""" Thanks to kata198 for this:
https://github.com/kata198/cygwin-ps-misc/blob/master/pidof
"""
pipe = subprocess.Popen("ps -ea | grep " + proc_name, shell=True, stdout=subprocess.PIPE)
output_lines = pipe.stdout.read().replace("\r", "").split("\n")
ret = pipe.wait()
pids = []
if ret != 0:
# No results
return []
for line in output_lines:
if not line:
continue
line_split = [item for item in line.split(' ') if item]
cmd = line_split[-1].split('/')[-1]
if cmd == proc_name:
try:
pid = int(line_split[0].strip())
except:
pid = int(line_split[1].strip())
if pid not in pids:
pids.append(pid)
return pids
def under_cygwin():
"""Return if Cygwin binary exist"""
return os.path.exists("/usr/bin/cygstart")
def under_macos():
return sys.platform == 'darwin'
def kill_tasks_cygwin(victims):
"""Shell out to ps -ea to find processes to kill"""
for victim in list(victims):
pids = cygwin_pidof(victim)
# progress("pids for (%s): %s" % (victim,",".join([ str(p) for p in pids])))
for apid in pids:
os.kill(apid, signal.SIGKILL)
def kill_tasks_macos():
for window in windowID:
cmd = "osascript -e \'tell application \"Terminal\" to close (window(get index of window id %s))\'" % window
os.system(cmd)
def kill_tasks_psutil(victims):
"""Use the psutil module to kill tasks by name. Sadly, this module is not available on Windows, but when it is we should be able to *just* use this routine"""
import psutil
for proc in psutil.process_iter():
if proc.status == psutil.STATUS_ZOMBIE:
continue
if proc.name in victims:
proc.kill()
def kill_tasks_pkill(victims):
"""Shell out to pkill(1) to kill processed by name"""
for victim in victims: # pkill takes a single pattern, so iterate
cmd = ["pkill", victim]
run_cmd_blocking("pkill", cmd, quiet=True)
class BobException(Exception):
"""Handle Bob's Exceptions"""
pass
def kill_tasks():
"""Clean up stray processes by name. This is a somewhat shotgun approach"""
progress("Killing tasks")
try:
victim_names = {
'JSBSim',
'lt-JSBSim',
'ArduPlane.elf',
'ArduCopter.elf',
'APMrover2.elf',
'AntennaTracker.elf',
'JSBSIm.exe',
'MAVProxy.exe',
'runsim.py',
'AntennaTracker.elf',
}
for frame in _options_for_frame.keys():
if "waf_target" not in _options_for_frame[frame]:
continue
exe_name = os.path.basename(_options_for_frame[frame]["waf_target"])
victim_names.add(exe_name)
if under_cygwin():
return kill_tasks_cygwin(victim_names)
if under_macos():
return kill_tasks_macos()
try:
kill_tasks_psutil(victim_names)
except ImportError:
kill_tasks_pkill(victim_names)
except Exception as e:
progress("kill_tasks failed: {}".format(str(e)))
def check_jsbsim_version():
"""Assert that the JSBSim we will run is the one we expect to run"""
jsbsim_cmd = ["JSBSim", "--version"]
progress_cmd("Get JSBSim version", jsbsim_cmd)
try:
jsbsim_version = subprocess.Popen(jsbsim_cmd, stdout=subprocess.PIPE).communicate()[0]
except OSError:
jsbsim_version = '' # this value will trigger the ".index"
# check below and produce a reasonable
# error message
try:
jsbsim_version.index("ArduPilot")
except ValueError:
print(r"""
=========================================================
You need the latest ArduPilot version of JSBSim installed
and in your \$PATH
Please get it from git://github.com/tridge/jsbsim.git
See
http://ardupilot.org/dev/docs/setting-up-sitl-on-linux.html
for more details
=========================================================
""")
sys.exit(1)
def progress(text):
"""Display sim_vehicle progress text"""
print("SIM_VEHICLE: " + text)
def find_autotest_dir():
"""Return path to autotest directory"""
return os.path.dirname(os.path.realpath(__file__))
def find_root_dir():
"""Return path to root directory"""
return os.path.realpath(os.path.join(find_autotest_dir(), '../..'))
# define and run parser
parser = CompatOptionParser("sim_vehicle.py",
epilog="eeprom.bin in the starting directory contains the parameters for your " \
"simulated vehicle. Always start from the same directory. It is "\
"recommended that you start in the main vehicle directory for the vehicle" \
"you are simulating, for example, start in the ArduPlane directory to " \
"simulate ArduPlane")
parser.add_option("-v", "--vehicle", type='string', default=None, help="vehicle type (ArduPlane, ArduCopter or APMrover2)")
parser.add_option("-f", "--frame", type='string', default=None, help="""set aircraft frame type
for copters can choose +, X, quad or octa
for planes can choose elevon or vtail""")
parser.add_option("-C", "--sim_vehicle_sh_compatible", action='store_true', default=False, help="be compatible with the way sim_vehicle.sh works; make this the first option")
parser.add_option("-H", "--hil", action='store_true', default=False, help="start HIL")
group_build = optparse.OptionGroup(parser, "Build options")
group_build.add_option("-N", "--no-rebuild", action='store_true', default=False, help="don't rebuild before starting ardupilot")
group_build.add_option("-D", "--debug", action='store_true', default=False, help="build with debugging")
group_build.add_option("-c", "--clean", action='store_true', default=False, help="do a make clean before building")
group_build.add_option("-j", "--jobs", default=None, type='int', help="number of processors to use during build (default for waf : number of processor, for make : 1)")
group_build.add_option("-b", "--build-target", default=None, type='string', help="override SITL build target")
group_build.add_option("-s", "--build-system", default="waf", type='choice', choices=["make", "waf"], help="build system to use")
group_build.add_option("", "--no-rebuild-on-failure", dest="rebuild_on_failure", action='store_false', default=True, help="if build fails, do not clean and rebuild")
group_build.add_option("", "--waf-configure-arg", action="append", dest="waf_configure_args", type="string", default=[], help="extra arguments to pass to waf in its configure step")
group_build.add_option("", "--waf-build-arg", action="append", dest="waf_build_args", type="string", default=[], help="extra arguments to pass to waf in its build step")
parser.add_option_group(group_build)
group_sim = optparse.OptionGroup(parser, "Simulation options")
group_sim.add_option("-I", "--instance", default=0, type='int', help="instance of simulator")
group_sim.add_option("-V", "--valgrind", action='store_true', default=False, help="enable valgrind for memory access checking (very slow!)")
group_sim.add_option("-T", "--tracker", action='store_true', default=False, help="start an antenna tracker instance")
group_sim.add_option("-A", "--sitl-instance-args", type='string', default=None, help="pass arguments to SITL instance")
# group_sim.add_option("-R", "--reverse-throttle", action='store_true', default=False, help="reverse throttle in plane")
group_sim.add_option("-G", "--gdb", action='store_true', default=False, help="use gdb for debugging ardupilot")
group_sim.add_option("-g", "--gdb-stopped", action='store_true', default=False, help="use gdb for debugging ardupilot (no auto-start)")
group_sim.add_option("-d", "--delay-start", default=0, type='float', help="delays the start of mavproxy by the number of seconds")
group_sim.add_option("-B", "--breakpoint", type='string', action="append", default=[], help="add a breakpoint at given location in debugger")
group_sim.add_option("-M", "--mavlink-gimbal", action='store_true', default=False, help="enable MAVLink gimbal")
group_sim.add_option("-L", "--location", type='string', default='CMAC', help="select start location from Tools/autotest/locations.txt")
group_sim.add_option("-l", "--custom-location", type='string', default=None, help="set custom start location")
group_sim.add_option("-S", "--speedup", default=1, type='int', help="set simulation speedup (1 for wall clock time)")
group_sim.add_option("-t", "--tracker-location", default='CMAC_PILOTSBOX', type='string', help="set antenna tracker start location")
group_sim.add_option("-w", "--wipe-eeprom", action='store_true', default=False, help="wipe EEPROM and reload parameters")
group_sim.add_option("-m", "--mavproxy-args", default=None, type='string', help="additional arguments to pass to mavproxy.py")
group_sim.add_option("", "--strace", action='store_true', default=False, help="strace the ArduPilot binary")
group_sim.add_option("", "--model", type='string', default=None, help="Override simulation model to use")
parser.add_option_group(group_sim)
# special-cased parameters for mavproxy, because some people's fingers
# have long memories, and they don't want to use -C :-)
group = optparse.OptionGroup(parser, "Compatibility MAVProxy options (consider using --mavproxy-args instead)")
group.add_option("", "--out", default=[], type='string', action="append", help="create an additional mavlink output")
group.add_option("", "--map", default=False, action='store_true', help="load map module on startup")
group.add_option("", "--console", default=False, action='store_true', help="load console module on startup")
group.add_option("", "--aircraft", default=None, help="store state and logs in named directory")
parser.add_option_group(group)
cmd_opts, cmd_args = parser.parse_args()
# clean up processes at exit:
atexit.register(kill_tasks)
progress("Start")
if cmd_opts.sim_vehicle_sh_compatible and cmd_opts.jobs is None:
cmd_opts.jobs = 1
# validate parameters
if cmd_opts.hil:
if cmd_opts.valgrind:
print("May not use valgrind with hil")
sys.exit(1)
if cmd_opts.gdb or cmd_opts.gdb_stopped:
print("May not use gdb with hil")
sys.exit(1)
if cmd_opts.strace:
print("May not use strace with hil")
sys.exit(1)
if cmd_opts.valgrind and (cmd_opts.gdb or cmd_opts.gdb_stopped):
print("May not use valgrind with gdb")
sys.exit(1)
if cmd_opts.strace and (cmd_opts.gdb or cmd_opts.gdb_stopped):
print("May not use strace with gdb")
sys.exit(1)
if cmd_opts.strace and cmd_opts.valgrind:
print("valgrind and strace almost certainly not a good idea")
# magically determine vehicle type (if required):
if cmd_opts.vehicle is None:
cwd = os.getcwd()
cmd_opts.vehicle = os.path.basename(cwd)
# determine a frame type if not specified:
default_frame_for_vehicle = {
"APMrover2": "rover",
"ArduPlane": "jsbsim",
"ArduCopter": "quad",
"AntennaTracker": "tracker",
}
if cmd_opts.vehicle not in default_frame_for_vehicle:
# try in parent directories, useful for having config in subdirectories
cwd = os.getcwd()
while cwd:
bname = os.path.basename(cwd)
if not bname:
break
if bname in default_frame_for_vehicle:
cmd_opts.vehicle = bname
break
cwd = os.path.dirname(cwd)
# try to validate vehicle
if cmd_opts.vehicle not in default_frame_for_vehicle:
progress("** Is (%s) really your vehicle type? Try -v VEHICLETYPE if not, or be in the e.g. ArduCopter subdirectory" % (cmd_opts.vehicle,))
# determine frame options (e.g. build type might be "sitl")
if cmd_opts.frame is None:
cmd_opts.frame = default_frame_for_vehicle[cmd_opts.vehicle]
# setup ports for this instance
mavlink_port = "tcp:127.0.0.1:" + str(5760 + 10 * cmd_opts.instance)
simout_port = "127.0.0.1:" + str(5501 + 10 * cmd_opts.instance)
"""
make_target: option passed to make to create binaries. Usually sitl, and "-debug" may be appended if -D is passed to sim_vehicle.py
default_params_filename: filename of default parameters file. Taken to be relative to autotest dir.
extra_mavlink_cmds: extra parameters that will be passed to mavproxy
"""
_options_for_frame = {
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
# COPTER
"+": {
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/copter.parm",
},
"quad": {
"model": "+",
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/copter.parm",
},
"X": {
"waf_target": "bin/arducopter-quad",
# this param set FRAME doesn't actually work because mavproxy
# won't set a parameter unless it knows of it, and the param fetch happens asynchronously
"default_params_filename": "default_params/copter.parm",
"extra_mavlink_cmds": "param fetch frame; param set FRAME 1;",
},
"hexa": {
"make_target": "sitl-hexa",
"waf_target": "bin/arducopter-hexa",
"default_params_filename": "default_params/copter.parm",
},
"octa": {
"make_target": "sitl-octa",
"waf_target": "bin/arducopter-octa",
"default_params_filename": "default_params/copter.parm",
},
"tri": {
"make_target": "sitl-tri",
"waf_target": "bin/arducopter-tri",
"default_params_filename": "default_params/copter-tri.parm",
},
"y6": {
"make_target": "sitl-y6",
"waf_target": "bin/arducopter-y6",
"default_params_filename": "default_params/copter-y6.parm",
},
# COPTER TYPES
"IrisRos": {
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/copter.parm",
},
"firefly": {
"waf_target": "bin/arducopter-firefly",
"default_params_filename": "default_params/firefly.parm",
},
# HELICOPTER
"heli": {
"make_target": "sitl-heli",
"waf_target": "bin/arducopter-heli",
"default_params_filename": "default_params/copter-heli.parm",
},
"heli-dual": {
"make_target": "sitl-heli-dual",
"waf_target": "bin/arducopter-coax", # is this correct? -pb201604301447
},
"heli-compound": {
"make_target": "sitl-heli-compound",
"waf_target": "bin/arducopter-coax", # is this correct? -pb201604301447
},
"singlecopter": {
"make_target": "sitl-single",
"waf_target": "bin/arducopter-single",
"default_params_filename": "default_params/copter-single.parm",
},
"coaxcopter": {
"make_target": "sitl-coax",
"waf_target": "bin/arducopter-coax",
"default_params_filename": "default_params/copter-coax.parm",
},
# PLANE
"quadplane-tilttri": {
"make_target": "sitl-tri",
"waf_target": "bin/arduplane-tri",
"default_params_filename": "default_params/quadplane-tilttri.parm",
},
"quadplane-tri": {
"make_target": "sitl-tri",
"waf_target": "bin/arduplane-tri",
"default_params_filename": "default_params/quadplane-tri.parm",
},
"quadplane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane.parm",
},
"plane-elevon": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-elevons.parm",
},
"plane-vtail": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-vtail.parm",
},
"plane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane.parm",
},
# ROVER
"rover": {
"waf_target": "bin/ardurover",
"default_params_filename": "default_params/rover.parm",
},
"rover-skid": {
"waf_target": "bin/ardurover",
"default_params_filename": "default_params/rover-skid.parm",
},
# SIM
"Gazebo": {
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/copter.parm",
},
"last_letter": {
"waf_target": "bin/arduplane",
},
"CRRCSim": {
"waf_target": "bin/arduplane",
},
"jsbsim": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-jsbsim.parm",
},
}
_default_waf_target = {
"ArduPlane": "bin/arduplane",
"ArduCopter": "bin/arducopter-quad",
"APMrover2": "bin/ardurover",
"AntennaTracker": "bin/antennatracker",
}
def default_waf_target(vehicle):
"""Returns a waf target based on vehicle type, which is often determined by which directory the user is in"""
return _default_waf_target[vehicle]
def options_for_frame(frame, vehicle, opts):
"""Return informatiom about how to sitl for frame e.g. build-type==sitl"""
ret = None
if frame in _options_for_frame:
ret = _options_for_frame[frame]
else:
for p in ["octa", "tri", "y6", "firefly", "heli", "last_letter", "jsbsim", "quadplane", "plane-elevon", "plane-vtail", "plane"]:
if frame.startswith(p):
ret = _options_for_frame[p]
break
if ret is None:
if frame.endswith("-heli"):
ret = _options_for_frame["heli"]
if ret is None:
ret = {}
if "model" not in ret:
ret["model"] = frame
if "sitl-port" not in ret:
ret["sitl-port"] = True
if opts.model is not None:
ret["model"] = opts.model
if (ret["model"].find("xplane") != -1 or ret["model"].find("flightaxis") != -1):
ret["sitl-port"] = False
if "make_target" not in ret:
ret["make_target"] = "sitl"
if "waf_target" not in ret:
ret["waf_target"] = default_waf_target(vehicle)
if opts.build_target is not None:
ret["make_target"] = opts.build_target
ret["waf_target"] = opts.build_target
return ret
def do_build_waf(opts, frame_options):
"""Build sitl using waf"""
progress("WAF build")
old_dir = os.getcwd()
root_dir = find_root_dir()
os.chdir(root_dir)
waf_light = os.path.join(root_dir, "modules/waf/waf-light")
cmd_configure = [waf_light, "configure", "--board", "sitl"]
if opts.debug:
cmd_configure.append("--debug")
pieces = [ shlex.split(x) for x in opts.waf_configure_args ]
for piece in pieces:
cmd_configure.extend(piece)
run_cmd_blocking("Configure waf", cmd_configure, check=True)
if opts.clean:
run_cmd_blocking("Building clean", [waf_light, "clean"])
cmd_build = [waf_light, "build", "--target", frame_options["waf_target"]]
if opts.jobs is not None:
cmd_build += ['-j', str(opts.jobs)]
pieces = [ shlex.split(x) for x in opts.waf_build_args ]
for piece in pieces:
cmd_build.extend(piece)
_, sts = run_cmd_blocking("Building", cmd_build)
if sts != 0: # build failed
if opts.rebuild_on_failure:
progress("Build failed; cleaning and rebuilding")
run_cmd_blocking("Building clean", [waf_light, "clean"])
_, sts = run_cmd_blocking("Building", cmd_build)
if sts != 0:
progress("Build failed")
sys.exit(1)
else:
progress("Build failed")
sys.exit(1)
os.chdir(old_dir)
def do_build(vehicledir, opts, frame_options):
"""Build build target (e.g. sitl) in directory vehicledir"""
if opts.build_system == 'waf':
return do_build_waf(opts, frame_options)
old_dir = os.getcwd()
os.chdir(vehicledir)
if opts.clean:
run_cmd_blocking("Building clean", ["make", "clean"])
build_target = frame_options["make_target"]
if opts.debug:
build_target += "-debug"
build_cmd = ["make", build_target]
if opts.jobs is not None:
build_cmd += ['-j', str(opts.jobs)]
_, sts = run_cmd_blocking("Building %s" % build_target, build_cmd)
if sts != 0:
progress("Build failed; cleaning and rebuilding")
run_cmd_blocking("Cleaning", ["make", "clean"])
_, sts = run_cmd_blocking("Building %s" % build_target, build_cmd)
if sts != 0:
progress("Build failed")
sys.exit(1)
os.chdir(old_dir)
def find_location_by_name(autotest, locname):
"""Search locations.txt for locname, return GPS coords"""
locations_filepath = os.path.join(autotest, "locations.txt")
for line in open(locations_filepath, 'r'):
line = line.rstrip("\n")
(name, loc) = line.split("=")
if name == locname:
return loc
print("Failed to find location (%s)" % cmd_opts.location)
sys.exit(1)
def progress_cmd(what, cmd):
"""Print cmd in a way a user could cut-and-paste to get the same effect"""
progress(what)
shell_text = "%s" % (" ".join(['"%s"' % x for x in cmd]))
progress(shell_text)
def run_cmd_blocking(what, cmd, quiet=False, check=False, **kw):
if not quiet:
progress_cmd(what, cmd)
p = subprocess.Popen(cmd, **kw)
ret = os.waitpid(p.pid, 0)
_, sts = ret
if check and sts != 0:
progress("(%s) exited with code %d" % (what,sts,))
sys.exit(1)
return ret
def run_in_terminal_window(autotest, name, cmd):
"""Execute the run_in_terminal_window.sh command for cmd"""
global windowID
runme = [os.path.join(autotest, "run_in_terminal_window.sh"), name]
runme.extend(cmd)
progress_cmd("Run " + name, runme)
if under_macos():
# on MacOS record the window IDs so we can close them later
out = subprocess.Popen(runme, stdout=subprocess.PIPE).communicate()[0]
import re
p = re.compile('tab 1 of window id (.*)')
windowID.append(p.findall(out)[0])
else:
p = subprocess.Popen(runme)
tracker_uarta = None # blemish
def start_antenna_tracker(autotest, opts):
"""Compile and run the AntennaTracker, add tracker to mavproxy"""
global tracker_uarta
progress("Preparing antenna tracker")
tracker_home = find_location_by_name(find_autotest_dir(), opts.tracker_location)
vehicledir = os.path.join(autotest, "../../" + "AntennaTracker")
tracker_frame_options = {
"waf_target": _default_waf_target["AntennaTracker"],
}
do_build(vehicledir, opts, tracker_frame_options)
tracker_instance = 1
os.chdir(vehicledir)
tracker_uarta = "tcp:127.0.0.1:" + str(5760 + 10 * tracker_instance)
exe = os.path.join(vehicledir, "AntennaTracker.elf")
run_in_terminal_window(autotest, "AntennaTracker", ["nice", exe, "-I" + str(tracker_instance), "--model=tracker", "--home=" + tracker_home])
def start_vehicle(binary, autotest, opts, stuff, loc):
"""Run the ArduPilot binary"""
cmd_name = opts.vehicle
cmd = []
if opts.valgrind:
cmd_name += " (valgrind)"
cmd.append("valgrind")
if opts.gdb:
cmd_name += " (gdb)"
cmd.append("gdb")
gdb_commands_file = tempfile.NamedTemporaryFile(delete=False)
atexit.register(os.unlink, gdb_commands_file.name)
for breakpoint in opts.breakpoint:
gdb_commands_file.write("b %s\n" % (breakpoint,))
gdb_commands_file.write("r\n")
gdb_commands_file.close()
cmd.extend(["-x", gdb_commands_file.name])
cmd.append("--args")
if opts.strace:
cmd_name += " (strace)"
cmd.append("strace")
strace_options = ['-o', binary + '.strace', '-s', '8000', '-ttt']
cmd.extend(strace_options)
cmd.append(binary)
cmd.append("-S")
cmd.append("-I" + str(opts.instance))
cmd.extend(["--home", loc])
if opts.wipe_eeprom:
cmd.append("-w")
cmd.extend(["--model", stuff["model"]])
cmd.extend(["--speedup", str(opts.speedup)])
if opts.sitl_instance_args:
cmd.extend(opts.sitl_instance_args.split(" ")) # this could be a lot better..
if opts.mavlink_gimbal:
cmd.append("--gimbal")
if "default_params_filename" in stuff:
path = os.path.join(autotest, stuff["default_params_filename"])
progress("Using defaults from (%s)" % (path,))
cmd.extend(["--defaults", path])
run_in_terminal_window(autotest, cmd_name, cmd)
def start_mavproxy(opts, stuff):
"""Run mavproxy"""
# FIXME: would be nice to e.g. "mavproxy.mavproxy(....).run" rather than shelling out
extra_cmd = ""
cmd = []
if under_cygwin():
cmd.append("/usr/bin/cygstart")
cmd.append("-w")
cmd.append("/cygdrive/c/Program Files (x86)/MAVProxy/mavproxy.exe")
else:
cmd.append("mavproxy.py")
if opts.hil:
cmd.extend(["--load-module", "HIL"])
else:
cmd.extend(["--master", mavlink_port])
if stuff["sitl-port"]:
cmd.extend(["--sitl", simout_port])
# If running inside of a vagrant guest, then we probably want to forward our mavlink out to the containing host OS
if getpass.getuser() == "vagrant":
cmd.extend(["--out", "10.0.2.2:14550"])
for port in [14550, 14551]:
cmd.extend(["--out", "127.0.0.1:" + str(port)])
if opts.tracker:
cmd.extend(["--load-module", "tracker"])
global tracker_uarta
# tracker_uarta is set when we start the tracker...
extra_cmd += "module load map; tracker set port %s; tracker start; tracker arm;" % (tracker_uarta,)
if opts.mavlink_gimbal:
cmd.extend(["--load-module", "gimbal"])
if "extra_mavlink_cmds" in stuff:
extra_cmd += " " + stuff["extra_mavlink_cmds"]
if opts.mavproxy_args:
cmd.extend(opts.mavproxy_args.split(" ")) # this could be a lot better..
# compatibility pass-through parameters (for those that don't want
# to use -C :-)
for out in opts.out:
cmd.extend(['--out', out])
if opts.map:
cmd.append('--map')
if opts.console:
cmd.append('--console')
if opts.aircraft is not None:
cmd.extend(['--aircraft', opts.aircraft])
if len(extra_cmd):
cmd.extend(['--cmd', extra_cmd])
local_mp_modules_dir = os.path.abspath(
os.path.join(__file__, '..', '..', 'mavproxy_modules'))
env = dict(os.environ)
env['PYTHONPATH'] = local_mp_modules_dir + os.pathsep + env.get('PYTHONPATH', '')
run_cmd_blocking("Run MavProxy", cmd, env=env)
progress("MAVProxy exitted")
frame_infos = options_for_frame(cmd_opts.frame, cmd_opts.vehicle, cmd_opts)
if frame_infos["model"] == "jsbsim":
check_jsbsim_version()
vehicle_dir = os.path.realpath(os.path.join(find_root_dir(), cmd_opts.vehicle))
if not os.path.exists(vehicle_dir):
print("vehicle directory (%s) does not exist" % (vehicle_dir,))
sys.exit(1)
if not cmd_opts.hil:
if cmd_opts.instance == 0:
kill_tasks()
if cmd_opts.tracker:
start_antenna_tracker(find_autotest_dir(), cmd_opts)
if cmd_opts.custom_location:
location = cmd_opts.custom_location
progress("Starting up at %s" % (location,))
else:
location = find_location_by_name(find_autotest_dir(), cmd_opts.location)
progress("Starting up at %s (%s)" % (location, cmd_opts.location))
if cmd_opts.hil:
# (unlikely)
run_in_terminal_window(find_autotest_dir(), "JSBSim", [os.path.join(find_autotest_dir(), "jsb_sim/runsim.py"), "--home", location, "--speedup=" + str(cmd_opts.speedup)])
else:
if not cmd_opts.no_rebuild: # i.e. we should rebuild
do_build(vehicle_dir, cmd_opts, frame_infos)
if cmd_opts.build_system == "waf":
if cmd_opts.debug:
binary_basedir = "build/sitl-debug"
else:
binary_basedir = "build/sitl"
vehicle_binary = os.path.join(find_root_dir(), binary_basedir, frame_infos["waf_target"])
else:
vehicle_binary = os.path.join(vehicle_dir, cmd_opts.vehicle + ".elf")
if not os.path.exists(vehicle_binary):
print("Vehicle binary (%s) does not exist" % (vehicle_binary,))
sys.exit(1)
start_vehicle(vehicle_binary, find_autotest_dir(), cmd_opts, frame_infos, location)
if cmd_opts.delay_start:
progress("Sleeping for %f seconds" % (cmd_opts.delay_start,))
time.sleep(float(cmd_opts.delay_start))
start_mavproxy(cmd_opts, frame_infos)
sys.exit(0)
| hsu/ardupilot | Tools/autotest/sim_vehicle.py | Python | gpl-3.0 | 30,806 | [
"Firefly"
] | 0c6bc55ce44fb86031743c319a80d83a03c75d56fda5d22578295a7430dbe2d7 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^', include('petycja_norweskie.petitions.urls', namespace='petitions')),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| watchdogpolska/petycja-norweskie | config/urls.py | Python | mit | 1,328 | [
"VisIt"
] | 0c2ec9090b95bbcce733f81165bc18c0b8f51a2b2c1900933bfdeb58bde13460 |
""" Sahana Eden Test Framework
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from time import time
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
import sys
import socket
from tests.web2unittest import Web2UnitTest
from gluon import current
try:
from twill import get_browser
from twill import set_output
from twill.browser import *
except ImportError:
raise NameError("Twill not installed")
try:
import mechanize
#from mechanize import BrowserStateError
#from mechanize import ControlNotFoundError
except ImportError:
raise NameError("Mechanize not installed")
class BrokenLinkTest(Web2UnitTest):
""" Smoke Test, visit every link it can find and report on the outcome """
def __init__(self):
Web2UnitTest.__init__(self)
self.b = get_browser()
self.b_data = StringIO()
set_output(self.b_data)
self.clearRecord()
# This string must exist in the URL for it to be followed
# Useful to avoid going to linked sites
self.homeURL = self.url
# Link used to identify a URL to a ticket
self.url_ticket = "/admin/default/ticket/"
# Tuple of strings that if in the URL will be ignored
# Useful to avoid dynamic URLs that trigger the same functionality
self.include_ignore = ("_language=",
"logout",
"appadmin",
"admin",
"delete",
)
# tuple of strings that should be removed from the URL before storing
# Typically this will be some variables passed in via the URL
self.strip_url = ("?_next=",
)
self.reportOnly = False
self.maxDepth = 16 # sanity check
self.setThreshold(10)
self.setUser("test@example.com/eden")
def clearRecord(self):
# the total url links visited
self.totalLinks = 0
# The number of unique urls found at depth i, where i is the index
self.linkDepth = []
# Dictionary of the parent for each URL
self.urlParentList = {}
# dictionary of ReportData objects indexed on the url
self.results = {}
def setReportOnly(self, action):
self.reportOnly = action
def setDepth(self, depth):
self.maxDepth = depth
def setUser(self, user):
self.credentials = user.split(",")
def setThreshold(self, value):
value = float(value)
self.threshold = value
# socket.setdefaulttimeout(value*2)
def login(self, credentials):
if credentials == "UNAUTHENTICATED":
url = "%s/default/user/logout" % self.homeURL
self.b.go(url)
return True
try:
(self.user, self.password) = credentials.split("/",1)
except:
msg = "Unable to split %s into a user name and password" % user
self.reporter(msg)
return False
url = "%s/default/user/login" % self.homeURL
self.b.go(url)
forms = self.b.get_all_forms()
for form in forms:
try:
if form["_formname"] == "login":
self.b._browser.form = form
form["email"] = self.user
form["password"] = self.password
self.b.submit("Login")
# If login is successful then should be redirected to the homepage
return self.b.get_url()[len(self.homeURL):] == "/default/index"
except:
# This should be a mechanize.ControlNotFoundError, but
# for some unknown reason that isn't caught on Windows or Mac
pass
return False
def runTest(self):
"""
Test to find all exposed links and check the http code returned.
This test doesn't run any javascript so some false positives
will be found.
The test can also display an histogram depicting the number of
links found at each depth.
"""
for user in self.credentials:
self.clearRecord()
if self.login(user):
self.reporter("Smoke Test for user %s" % self.user)
self.visitLinks()
def visitLinks(self):
url = self.homeURL + "/default/index"
to_visit = [url]
start = time()
if not self.reportOnly:
for depth in range(self.maxDepth):
if len(to_visit) == 0:
break
self.linkDepth.append(len(to_visit))
self.totalLinks += len(to_visit)
visit_start = time()
url_visited = "%d urls" % len(to_visit)
to_visit = self.visit(to_visit, depth)
msg = "%.2d Visited %s in %.3f seconds, %d more urls found" % (depth, url_visited, time()-visit_start, len(to_visit))
self.reporter(msg)
if self.config.verbose >= 2:
if self.config.verbose >= 3:
print >> self.stdout
if self.stdout.isatty(): # terminal should support colour
msg = "%.2d Visited \033[1;32m%s\033[0m in %.3f seconds, \033[1;31m%d\033[0m more urls found" % (depth, url_visited, time()-visit_start, len(to_visit))
print >> self.stdout, msg
if len(to_visit) > 0:
self.linkDepth.append(len(to_visit))
finish = time()
self.reporter("Finished took %.3f seconds" % (finish - start))
self.report()
def visit(self, url_list, depth):
repr_list = [".pdf", ".xls", ".rss", ".kml"]
to_visit = []
record_data = self.config.verbose > 0
for visited_url in url_list:
index_url = visited_url[len(self.homeURL):]
if record_data:
if index_url in self.results.keys():
print >> self.stdout, "Warning duplicated url: %s" % index_url
self.results[index_url] = ReportData()
current_results = self.results[index_url]
current_results.depth = depth
# Find out if the page can be visited
open_novisit = False
for repr in repr_list:
if repr in index_url:
open_novisit = True
break
try:
if open_novisit:
action = "open_novisit"
else:
action = "open"
visit_start = time()
self.b._journey(action, visited_url)
http_code = self.b.get_code()
duration = time() - visit_start
if record_data:
current_results.duration = duration
if duration > self.threshold:
if self.config.verbose >= 3:
print >> self.stdout, "%s took %.3f seconds" % (visited_url, duration)
except Exception as e:
duration = time() - visit_start
import traceback
print traceback.format_exc()
if record_data:
current_results.broken = True
current_results.exception = True
current_results.duration = duration
continue
http_code = self.b.get_code()
if http_code != 200:
if record_data:
current_results.broken = True
current_results.http_code = http_code
elif open_novisit:
continue
links = []
try:
if self.b._browser.viewing_html():
links = self.b._browser.links()
else:
continue
except Exception as e:
import traceback
print traceback.format_exc()
if record_data:
current_results.broken = True
current_results.exception = True
continue
for link in (links):
url = link.absolute_url
if url.find(self.url_ticket) != -1:
# A ticket was raised so...
# capture the details and add to brokenLinks
if record_data:
current_results.broken = True
current_results.ticket = url
break # no need to check any other links on this page
if url.find(self.homeURL) == -1:
continue
ignore_link = False
for ignore in self.include_ignore:
if url.find(ignore) != -1:
ignore_link = True
break
if ignore_link:
continue
for strip in self.strip_url:
location = url.find(strip)
if location != -1:
url = url[0:location]
short_url = url[len(self.homeURL):]
if url not in url_list and \
short_url != "" and \
short_url not in self.results.keys() and \
url not in to_visit:
self.urlParentList[short_url] = index_url
to_visit.append(url)
return to_visit
def report(self):
self.reporter("%d URLs visited" % self.totalLinks)
self.brokenReport()
self.timeReport()
if self.config.record_timings:
if not self.reportOnly:
self.record_timings()
self.scatterplot()
self.depthReport()
def record_timings(self):
import_error = ""
try:
import xlrd
except:
import_error += "ERROR: the xlrd modules is needed to record timings\n"
try:
import xlwt
except:
import_error += "ERROR: the xlwt modules is needed to record timings\n"
if import_error != "":
print >> self.stderr, import_error
return
rec_time_filename = self.config.record_timings_filename
try:
workbook = xlrd.open_workbook(filename=rec_time_filename,
formatting_info=True)
except:
workbook = None
summary = {}
if workbook:
summary = self.read_timings_sheet(workbook)
if len(summary["date"]) > 100:
# Need to rotate the file
# 1) make a summary and save this
self.report_timings_summary(summary, rec_time_filename)
# 2) archive the file
from zipfile import ZipFile
import os
zip_filename = os.path.join(self.config.path, "rec_time.zip")
archive = ZipFile(zip_filename, "a")
arc_name = "%s-%s.xls" % (rec_time_filename[len(self.config.path):-4],
current.request.now.date()
)
archive.write(rec_time_filename,arc_name)
archive.close()
# 3) clear the current file
os.unlink(rec_time_filename)
summary = {}
if "date" not in summary:
last_col = 0
summary["date"] = [current.request.now.date()]
else:
last_col = len(summary["date"])
summary["date"].append(current.request.now.date())
for (url, rd_obj) in self.results.items():
if url not in summary:
summary[url] = []
# ensure that the row is as long as the number of dates
shortage = last_col - len(summary[url])
if shortage > 0:
summary[url] = summary[url] + ['']*shortage
summary[url].append((rd_obj.get_duration(), rd_obj.is_broken()))
self.write_timings_sheet(summary, rec_time_filename)
def read_timings_sheet(self, workbook):
"""
This will extract all the details from the xls sheet
"""
sheet = workbook.sheet_by_name("Timings")
summary = {}
RED = 0x0A
num_cells = sheet.ncols
summary["date"] = []
for col in range(1, num_cells):
summary["date"].append(sheet.cell_value(0, col))
for row in range(1,sheet.nrows):
url = sheet.cell_value(row, 0)
summary[url] = []
for col in range(1, num_cells):
duration = sheet.cell_value(row, col)
xf = sheet.cell_xf_index(row, col)
bg = workbook.xf_list[xf].background
broken = (bg.pattern_colour_index == RED)
summary[url].append((duration, broken))
return summary
def write_timings_sheet(self, summary, filename=None):
import xlwt
RED = 0x0A
book = xlwt.Workbook(encoding="utf-8")
sheet = book.add_sheet("Timings")
stylebroken = xlwt.XFStyle()
stylebroken.pattern.pattern = stylebroken.pattern.SOLID_PATTERN
stylebroken.pattern.pattern_fore_colour = RED
col = 1
for date in summary["date"]:
sheet.write(0,col,str(date))
col += 1
row = 1
for (url, results) in summary.items():
if url == "date":
continue
sheet.write(row,0,url)
col = 1
for data in results:
if len(data) == 2 and data[1]:
sheet.write(row,col,data[0],stylebroken)
elif len(data) > 0:
sheet.write(row,col,data[0])
col += 1
row += 1
if filename:
book.save(filename)
return book
def report_timings_summary(self,
summary,
summary_file_name = None,
mean_threshold = 1):
"""
This will extract the details from the sheet and optionally save
them to a summary file
summary: the summary details returned from the spreadsheet (read_timings_sheet)
summary_file_name: name of the file to record the summary details (if required)
mean_threshold: The minimum number of values required to include
the mean in the regression calculations
"""
import numpy
import datetime
good_values = []
other_values = []
total_values = []
for date in summary["date"]:
good_values.append([])
other_values.append([])
total_values.append([])
for (url,results) in summary.items():
if url == "date":
continue
else:
cnt = 0
for (duration, broken) in results:
if duration != "":
total_values[cnt].append(duration)
if broken:
other_values[cnt].append(duration)
else:
good_values[cnt].append(duration)
cnt += 1
# get the number of days each entry is after the first date
# and calculate the average, if the average is NAN then ignore both
date_summary = []
gv_mean = []
gv_std = []
gv_date = []
cnt = 0
start = datetime.datetime.strptime(summary["date"][0],"%Y-%m-%d")
for list in good_values:
if len(list) > mean_threshold:
mean = numpy.mean(list)
std = numpy.std(list)
if not numpy.isnan(mean):
this_date = datetime.datetime.strptime(summary["date"][cnt],"%Y-%m-%d")
date_summary.append((this_date - start).days)
gv_mean.append(mean)
gv_std.append(std)
gv_date.append(summary["date"][cnt])
cnt += 1
# calculate the regression line
if len(gv_mean) > 2:
(m,b) = numpy.polyfit(date_summary, gv_mean, 1)
else:
m = b = 0
if summary_file_name != None:
book = self.write_timings_sheet(summary)
sheet = book.add_sheet("summary")
row = 0
for date in gv_date:
sheet.write(row,0,str(date))
sheet.write(row,1,gv_mean[row])
row += 1
sheet.write(row,0,"Trend")
sheet.write(row,1,m)
# Save the details to the summary file
book.save(summary_file_name)
return (date_summary, gv_mean, gv_std, m, b)
def report_model_url(self):
print "Report breakdown by module"
for (model, value) in self.model_url.items():
print model
for ud in value:
url = ud[0]
depth = ud[1]
parent = ud[2]
tabs = "\t" * depth
print "%s %s-%s (parent url - %s)" % (tabs, depth, url, parent)
def brokenReport(self):
self.reporter("Broken Links")
as_html = current.test_config.html
n = 1
for (url, rd_obj) in self.results.items():
if as_html:
print_url = "<a href=%s%s target=\"_blank\">%s</a>" % (self.homeURL, url, url)
else:
print_url = url
if rd_obj.is_broken():
if rd_obj.threw_exception():
msg = "(Exception) %s" % print_url
else:
http_code = rd_obj.return_http_code()
ticket = rd_obj.the_ticket(as_html)
try:
parent = self.urlParentList[url]
if as_html:
parent = "<a href=%s%s target=\"_blank\">Parent</a>" % (self.homeURL, parent)
except:
parent = "unknown"
msg = "%3d. (%s - %s) %s called from %s" % (n,
http_code,
ticket,
print_url,
parent
)
self.reporter(msg)
n += 1
# If there are any broken links, report failed test.
self.assertTrue(n == 1, "Found Broken Links")
def timeReport(self):
from operator import itemgetter
import numpy
thresholdLink = {}
linktimes = []
for (url, rd_obj) in self.results.items():
duration = rd_obj.get_duration()
linktimes.append(duration)
if duration > self.threshold:
thresholdLink[url] = duration
self.reporter("Time Analysis - Links beyond threshold")
for (visited_url, duration) in sorted(thresholdLink.iteritems(),
key=itemgetter(1),
reverse=True):
self.reporter( "%s took %.3f seconds" % (visited_url, duration))
self.reporter("Time Analysis - summary")
total = len(linktimes)
average = numpy.mean(linktimes)
std = numpy.std(linktimes)
msg = "%s links visited with an average time of %s and standard deviation of %s" % (total, average, std)
self.reporter(msg)
def scatterplot(self):
"""
Method to draw a scatterplot of the average time to download links
against time. Add a regression line to show the trend over time.
"""
try:
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
self.FigureCanvas = FigureCanvas
from matplotlib.figure import Figure
self.Figure = Figure
import numpy
except ImportError:
return
try:
import xlrd
except:
import_error += "ERROR: the xlrd modules is needed to record timings\n"
rec_time_filename = self.config.record_timings_filename
try:
workbook = xlrd.open_workbook(filename=rec_time_filename,
formatting_info=True)
except:
return
import numpy
# Only include the mean in the regression values if there are at least 10 URL timings
summary = self.read_timings_sheet(workbook)
(date_summary, gv_mean, gv_std, m, b) = self.report_timings_summary(summary, mean_threshold=10)
if len(gv_mean) <= 2:
return
fig = Figure(figsize=(5, 2.5))
canvas = self.FigureCanvas(fig)
ax = fig.add_subplot(111)
linear = numpy.poly1d([m,b])
denom = numpy.max(gv_std)/50
size = gv_std/denom
ax.scatter(date_summary, gv_mean, marker="d", s=size)
ax.plot(date_summary, linear(date_summary), '--r')
chart = StringIO()
canvas.print_figure(chart)
image = chart.getvalue()
import base64
base64Img = base64.b64encode(image)
image = "<img src=\"data:image/png;base64,%s\">" % base64Img
self.reporter("Scatterplot of average link times per successful run")
self.reporter(image)
self.reporter("The trend line has a current slope of %s" % m)
self.reporter("The y-intercept is %s seconds" % b)
def depthReport(self):
"""
Method to draw a histogram of the number of new links
discovered at each depth.
(i.e. show how many links are required to reach a link)
"""
try:
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
self.FigureCanvas = FigureCanvas
from matplotlib.figure import Figure
self.Figure = Figure
from numpy import arange
except ImportError:
return
self.reporter("Analysis of link depth")
fig = Figure(figsize=(4, 2.5))
# Draw a histogram
width = 0.9
rect = [0.12, 0.08, 0.9, 0.85]
ax = fig.add_axes(rect)
left = arange(len(self.linkDepth))
plot = ax.bar(left, self.linkDepth, width=width)
# Add the x axis labels
ax.set_xticks(left+(width*0.5))
ax.set_xticklabels(left)
chart = StringIO()
canvas = self.FigureCanvas(fig)
canvas.print_figure(chart)
image = chart.getvalue()
import base64
base64Img = base64.b64encode(image)
image = "<img src=\"data:image/png;base64,%s\">" % base64Img
self.reporter(image)
class ReportData():
"""
Class to hold the data collected from the smoke test ready for reporting
Instances of this class will be held in the dictionary results which will
be keyed on the url. This way, in an attempt to minimise the memory used,
the url doesn't need to be stored in this class.
The class will have the following properties
broken: boolean
exception: boolean
http_code: integer
ticket: URL of any ticket linked with this url
parent: the parent URL of this url
depth: how deep is this url
duration: how long did it take to get the url
"""
def is_broken(self):
if hasattr(self, "broken"):
return self.broken
return False
def threw_exception(self):
if hasattr(self, "exception"):
return self.exception
return False
def return_http_code(self):
if hasattr(self, "http_code"):
return self.http_code
return "-"
def the_ticket(self, html):
"""
Should only have a ticket if it is broken,
but won't always have a ticket to display.
"""
if hasattr(self, "ticket"):
if html:
return "<a href=%s target=\"_blank\">Ticket</a>" % (self.ticket)
else:
return "Ticket: %s" % (self.ticket)
return "no ticket"
def get_parent(self):
if hasattr(self, "parent"):
return self.parent
return ""
def get_depth(self):
if hasattr(self, "depth"):
return self.depth
return 0
def get_duration(self):
if hasattr(self, "duration"):
return self.duration
return 0
| flavour/tldrmp | modules/tests/smoke/broken_links.py | Python | mit | 26,011 | [
"VisIt"
] | 8806cc21c65c14e4e935aebaa8dc0cf9eddc51c2c5716a2f72e2873a3ecc3631 |
from aiida.parsers.parser import Parser
from aiida.parsers.exceptions import OutputParsingError
from aiida.orm.data.array import ArrayData
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.array.trajectory import TrajectoryData
import numpy as np
def read_lammps_trajectory(file_name,
limit_number_steps=100000000,
initial_cut=1,
end_cut=None,
timestep=1):
import mmap
#Time in picoseconds
#Coordinates in Angstroms
#Starting reading
print("Reading LAMMPS trajectory")
print("This could take long, please wait..")
#Dimensionality of LAMMP calculation
number_of_dimensions = 3
step_ids = []
data = []
cells = []
counter = 0
bounds = None
number_of_atoms = None
lammps_labels = False
with open(file_name, "r+") as f:
file_map = mmap.mmap(f.fileno(), 0)
while True:
counter += 1
#Read time steps
position_number=file_map.find('TIMESTEP')
if position_number < 0: break
file_map.seek(position_number)
file_map.readline()
step_ids.append(float(file_map.readline()))
if number_of_atoms is None:
#Read number of atoms
position_number=file_map.find('NUMBER OF ATOMS')
file_map.seek(position_number)
file_map.readline()
number_of_atoms = int(file_map.readline())
if True:
#Read cell
position_number=file_map.find('ITEM: BOX')
file_map.seek(position_number)
file_map.readline()
bounds = []
for i in range(3):
bounds.append(file_map.readline().split())
bounds = np.array(bounds, dtype=float)
if bounds.shape[1] == 2:
bounds = np.append(bounds, np.array([0, 0, 0])[None].T ,axis=1)
xy = bounds[0, 2]
xz = bounds[1, 2]
yz = bounds[2, 2]
xlo = bounds[0, 0] - np.min([0.0, xy, xz, xy+xz])
xhi = bounds[0, 1] - np.max([0.0, xy, xz, xy+xz])
ylo = bounds[1, 0] - np.min([0.0, yz])
yhi = bounds[1, 1] - np.max([0.0, yz])
zlo = bounds[2, 0]
zhi = bounds[2, 1]
super_cell = np.array([[xhi-xlo, xy, xz],
[0, yhi-ylo, yz],
[0, 0, zhi-zlo]])
cells.append(super_cell.T)
position_number = file_map.find('ITEM: ATOMS')
file_map.seek(position_number)
lammps_labels=file_map.readline()
#Initial cut control
if initial_cut > counter:
continue
#Reading coordinates
read_coordinates = []
read_elements = []
for i in range (number_of_atoms):
line = file_map.readline().split()[0:number_of_dimensions+1]
read_coordinates.append(line[1:number_of_dimensions+1])
read_elements.append(line[0])
try:
data.append(np.array(read_coordinates, dtype=float)) #in angstroms
# print read_coordinates
except ValueError:
print("Error reading step {0}".format(counter))
break
# print(read_coordinates)
#security routine to limit maximum of steps to read and put in memory
if limit_number_steps+initial_cut < counter:
print("Warning! maximum number of steps reached! No more steps will be read")
break
if end_cut is not None and end_cut <= counter:
break
file_map.close()
data = np.array(data)
step_ids = np.array(step_ids, dtype=int)
cells = np.array(cells)
elements = np.array(read_elements)
time = np.array(step_ids)*timestep
return data, step_ids, cells, elements, time
class MdParser(Parser):
"""
Simple Parser for LAMMPS.
"""
def __init__(self, calc):
"""
Initialize the instance of LammpsParser
"""
super(MdParser, self).__init__(calc)
def parse_with_retrieved(self, retrieved):
"""
Parses the datafolder, stores results.
"""
# suppose at the start that the job is successful
successful = True
# select the folder object
# Check that the retrieved folder is there
try:
out_folder = retrieved[self._calc._get_linkname_retrieved()]
except KeyError:
self.logger.error("No retrieved folder found")
return False, ()
# check what is inside the folder
list_of_files = out_folder.get_folder_list()
# OUTPUT file should exist
if not self._calc._OUTPUT_FILE_NAME in list_of_files:
successful = False
self.logger.error("Output file not found")
return successful, ()
# Get file and do the parsing
outfile = out_folder.get_abs_path( self._calc._OUTPUT_FILE_NAME)
ouput_trajectory = out_folder.get_abs_path( self._calc._OUTPUT_TRAJECTORY_FILE_NAME)
timestep = self._calc.inp.parameters.dict.timestep
positions, step_ids, cells, symbols, time = read_lammps_trajectory(ouput_trajectory, timestep=timestep)
# Delete trajectory once parsed
try:
import os
os.remove(ouput_trajectory)
except:
pass
# force_constants = parse_FORCE_CONSTANTS(outfile)
# look at warnings
warnings = []
with open(out_folder.get_abs_path( self._calc._SCHED_ERROR_FILE )) as f:
errors = f.read()
if errors:
warnings = [errors]
# ====================== prepare the output node ======================
# save the outputs
new_nodes_list = []
# save trajectory into node
try:
trajectory_data = TrajectoryData()
trajectory_data.set_trajectory(step_ids, cells, symbols, positions, times=time)
new_nodes_list.append(('trajectory_data', trajectory_data))
except KeyError: # keys not found in json
pass
# add the dictionary with warnings
new_nodes_list.append((self.get_linkname_outparams(), ParameterData(dict={'warnings': warnings})))
return successful, new_nodes_list
| abelcarreras/aiida_extensions | plugins/parsers/lammps/md.py | Python | mit | 6,639 | [
"LAMMPS"
] | c7eb7c9f47943b0a267032302a72875aabd845741f9ccef31b73fa49bfd0437b |
import unittest
from ...dictionaries import moe
class TestyDict(unittest.TestCase):
def setUp(self):
self.dict = moe()
def test__get_url(self):
self.assertEqual(
'https://www.moedict.tw/uni/萌',
self.dict._get_url('萌')
)
| LeaYeh/zdict | zdict/tests/dictionaries/test_moe.py | Python | gpl-3.0 | 284 | [
"MOE"
] | d4e5d0e0a4a4f943b52c1b760c055679a3a1bdcc868836749903259df4d595f6 |
from twisted.logger import Logger
from pathlib import Path
from typing import List
import sys
if sys.version_info < (3, 8):
# https://packaging.python.org/guides/creating-and-discovering-plugins/
# Actually 3.8 is the minimum version for importlib.metadata
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
log = Logger()
BLOCKS_ENTRY_POINT = 'blocktopus_blocks'
BLOCKTOPUS_DIR = Path(__file__).resolve().parent.parent / 'octopus' / 'blocktopus'
def add_plugins_dir(plugins_dir: Path):
"""
Add a directory contining plugins. Allows plugins to be stored locally for development
rather than having to be installed.
Any folder within the directory that is a python distribution (has a setup.py file)
will be analysed, and any 'blocktopus_blocks' entry points will be registered as blocks.
"""
import sys
import setuptools
from distutils.core import run_setup
from importlib import import_module
from .block_registry import register_block
if not plugins_dir.is_dir():
log.warn("Plugins directory {plugin_dir} not found.", plugin_dir=plugins_dir)
return
for child_dir in plugins_dir.iterdir():
setup_file = child_dir / 'setup.py'
if not setup_file.is_file():
continue
log.info("Adding plugin directory {plugin_dir} to sys.path", plugin_dir=child_dir)
sys.path.append(str(child_dir))
setup_result = run_setup(setup_file, stop_after='init')
if setup_result.entry_points is None or BLOCKS_ENTRY_POINT not in setup_result.entry_points:
continue
for entry_point in setup_result.entry_points[BLOCKS_ENTRY_POINT]:
ep_name, ep_value = entry_point.split('=', 1)
entry_point = importlib_metadata.EntryPoint(ep_name.strip(), ep_value.strip(), BLOCKS_ENTRY_POINT)
block_cls = entry_point.load()
log.info(
"Found local plugin entry-point block definition {block_name} {block_cls}",
entry_point=entry_point,
block_name=block_cls.__name__,
block_cls=block_cls,
)
register_block(block_cls.__name__, block_cls)
def register_installed_entrypoint_blocks():
"""
Load and register any blocks that are exposed by the 'blocktopus_blocks' entry point in
any installed Python package.
https://packaging.python.org/guides/creating-and-discovering-plugins/#using-package-metadata
"""
from .block_registry import register_block
entry_points = importlib_metadata.entry_points()
if BLOCKS_ENTRY_POINT not in entry_points:
return
for entry_point in entry_points[BLOCKS_ENTRY_POINT]:
block_cls = entry_point.load()
log.info(
"Found installed plugin entry-point block definition {block_name} {block_cls}",
entry_point=entry_point,
block_name=block_cls.__name__,
block_cls=block_cls,
)
register_block(block_cls.__name__, block_cls)
def get_block_plugin_modules():
"""
(To be deprecated) Find all packages under octopus.blocks.
"""
# Add plugin machine blocks
# https://packaging.python.org/guides/creating-and-discovering-plugins/
import importlib
import pkgutil
import octopus.blocks
def iter_namespace(ns_pkg):
# Specifying the second argument (prefix) to iter_modules makes the
# returned name an absolute name instead of a relative one. This allows
# import_module to work without having to do additional modification to
# the name.
return pkgutil.walk_packages(ns_pkg.__path__, ns_pkg.__name__ + ".")
return {
name: importlib.import_module(name)
for finder, name, ispkg
in iter_namespace(octopus.blocks)
}
def get_block_plugin_block_names(check_subclass: type) -> List[str]:
"""
Return a list of names of block classes within the octopus.blocks namespace
that are subclasses of the passed check_subclass, but are not check_subclass
itself.
"""
return [
name
for mod in get_block_plugin_modules().values()
for name, cls in mod.__dict__.items()
if isinstance(cls, type)
and issubclass(cls, check_subclass)
and cls is not check_subclass
]
def _subclasses(cls):
return cls.__subclasses__() + [
g for s in cls.__subclasses__()
for g in _subclasses(s)
]
def get_machine_js_definitions():
from octopus.blocktopus.blocks.machines import machine_declaration
for block_cls in _subclasses(machine_declaration):
try:
yield (block_cls.__name__, block_cls.get_interface_definition())
except AttributeError:
pass
def get_connection_js_definitions():
from octopus.blocktopus.blocks.machines import connection_declaration
for connection_cls in _subclasses(connection_declaration):
try:
yield (connection_cls.__name__, connection_cls.get_interface_definition())
except AttributeError:
pass
def build_machine_block_definition_js(filename):
import json
with open(filename, 'w') as fp:
fp.write("// Auto-generated file\n\n")
for name, definition in get_machine_js_definitions():
fp.write(f"Blockly.Blocks.addMachineBlock('{name}', {json.dumps(definition)});\n")
def build_connection_block_definition_js(filename):
import json
with open(filename, 'w') as fp:
fp.write("// Auto-generated file\n\n")
for name, definition in get_connection_js_definitions():
fp.write(f"Blockly.Blocks.addConnectionBlock('{name}', {json.dumps(definition)});\n")
| richardingham/octopus | octopus/blocktopus/plugins.py | Python | mit | 5,791 | [
"Octopus"
] | e7de00e9acbf9308c7eff08a3863de1e0383f4f5248e81077a4159514a7662f2 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
"""
import os.path
import math
import logging
import numpy
from copy import deepcopy
from base import Database, Entry, makeLogicNode, DatabaseError
import rmgpy.constants as constants
from rmgpy.thermo import NASAPolynomial, NASA, ThermoData, Wilhoit
from rmgpy.molecule import Molecule, Atom, Bond, Group
import rmgpy.molecule
from rmgpy.species import Species
#: This dictionary is used to add multiplicity to species label
_multiplicity_labels = {1:'S',2:'D',3:'T',4:'Q',5:'V',}
################################################################################
def saveEntry(f, entry):
"""
Write a Pythonic string representation of the given `entry` in the thermo
database to the file object `f`.
"""
f.write('entry(\n')
f.write(' index = {0:d},\n'.format(entry.index))
f.write(' label = "{0}",\n'.format(entry.label))
if isinstance(entry.item, Molecule):
f.write(' molecule = \n')
f.write('"""\n')
f.write(entry.item.toAdjacencyList(removeH=False))
f.write('""",\n')
elif isinstance(entry.item, Group):
f.write(' group = \n')
f.write('"""\n')
f.write(entry.item.toAdjacencyList())
f.write('""",\n')
else:
f.write(' group = "{0}",\n'.format(entry.item))
if isinstance(entry.data, ThermoData):
f.write(' thermo = ThermoData(\n')
f.write(' Tdata = {0!r},\n'.format(entry.data.Tdata))
f.write(' Cpdata = {0!r},\n'.format(entry.data.Cpdata))
f.write(' H298 = {0!r},\n'.format(entry.data.H298))
f.write(' S298 = {0!r},\n'.format(entry.data.S298))
if entry.data.Tmin is not None: f.write(' Tmin = {0!r},\n'.format(entry.data.Tmin))
if entry.data.Tmax is not None: f.write(' Tmax = {0!r},\n'.format(entry.data.Tmax))
f.write(' ),\n')
elif isinstance(entry.data, Wilhoit):
f.write(' thermo = Wilhoit(\n')
f.write(' cp0 = {0!r},\n'.format(entry.data.cp0))
f.write(' cpInf = {0!r},\n'.format(entry.data.cpInf))
f.write(' a0 = {0:g},\n'.format(entry.data.a0))
f.write(' a1 = {0:g},\n'.format(entry.data.a1))
f.write(' a2 = {0:g},\n'.format(entry.data.a2))
f.write(' a3 = {0:g},\n'.format(entry.data.a3))
f.write(' B = {0!r},\n'.format(entry.data.B))
f.write(' H0 = {0!r},\n'.format(entry.data.H0))
f.write(' S0 = {0!r},\n'.format(entry.data.S0))
if entry.data.Tmin is not None: f.write(' Tmin = {0!r},\n'.format(entry.data.Tmin))
if entry.data.Tmax is not None: f.write(' Tmax = {0!r},\n'.format(entry.data.Tmax))
f.write(' ),\n')
elif isinstance(entry.data, NASA):
f.write(' thermo = NASA(\n')
f.write(' polynomials = [\n')
for poly in entry.data.polynomials:
f.write(' {0!r},\n'.format(poly))
f.write(' ],\n')
if entry.data.Tmin is not None: f.write(' Tmin = {0!r},\n'.format(entry.data.Tmin))
if entry.data.Tmax is not None: f.write(' Tmax = {0!r},\n'.format(entry.data.Tmax))
f.write(' ),\n')
else:
f.write(' thermo = {0!r},\n'.format(entry.data))
if entry.reference is not None: f.write(' reference = {0!r},\n'.format(entry.reference))
if entry.referenceType != "": f.write(' referenceType = "{0}",\n'.format(entry.referenceType))
f.write(' shortDesc = u"""')
try:
f.write(entry.shortDesc.encode('utf-8'))
except:
f.write(entry.shortDesc.strip().encode('ascii', 'ignore'))
f.write('""",\n')
f.write(' longDesc = \n')
f.write('u"""\n')
try:
f.write(entry.longDesc.strip().encode('utf-8') + "\n")
except:
f.write(entry.longDesc.strip().encode('ascii', 'ignore')+ "\n")
f.write('""",\n')
f.write(')\n\n')
def generateOldLibraryEntry(data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
if isinstance(data, ThermoData):
return '{0:9g} {1:9g} {2:9g} {3:9g} {4:9g} {5:9g} {6:9g} {7:9g} {8:9g} {9:9g} {10:9g} {11:9g}'.format(
data.H298.value_si/4184.,
data.S298.value_si/4.184,
data.Cpdata.value_si[0]/4.184,
data.Cpdata.value_si[1]/4.184,
data.Cpdata.value_si[2]/4.184,
data.Cpdata.value_si[3]/4.184,
data.Cpdata.value_si[4]/4.184,
data.Cpdata.value_si[5]/4.184,
data.Cpdata.value_si[6]/4.184,
data.H298.uncertainty/4184.,
data.S298.uncertainty/4.184,
max(data.Cpdata.uncertainty)/4.184,
)
elif isinstance(data, basestring):
return data
else:
return '{0:9g} {1:9g} {2:9g} {3:9g} {4:9g} {5:9g} {6:9g} {7:9g} {8:9g} {9:9g} {10:9g} {11:9g}'.format(
data.getEnthalpy(298)/4184.,
data.getEntropy(298)/4.184,
data.getHeatCapacity(300)/4.184,
data.getHeatCapacity(400)/4.184,
data.getHeatCapacity(500)/4.184,
data.getHeatCapacity(600)/4.184,
data.getHeatCapacity(800)/4.184,
data.getHeatCapacity(1000)/4.184,
data.getHeatCapacity(1500)/4.184,
0,
0,
0,
)
def processOldLibraryEntry(data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
return ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],"K"),
Cpdata = ([float(d) for d in data[2:9]],"cal/(mol*K)","+|-",float(data[11])),
H298 = (float(data[0]),"kcal/mol","+|-",float(data[9])),
S298 = (float(data[1]),"cal/(mol*K)","+|-",float(data[10])),
)
################################################################################
class ThermoDepository(Database):
"""
A class for working with the RMG thermodynamics depository.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self, index, label, molecule, thermo, reference=None, referenceType='', shortDesc='', longDesc=''):
entry = Entry(
index = index,
label = label,
item = Molecule().fromAdjacencyList(molecule),
data = thermo,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
)
self.entries[label] = entry
return entry
def saveEntry(self, f, entry):
"""
Write the given `entry` in the thermo database to the file object `f`.
"""
return saveEntry(f, entry)
################################################################################
class ThermoLibrary(Database):
"""
A class for working with a RMG thermodynamics library.
"""
def __init__(self, label='', name='',solvent=None, shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self,
index,
label,
molecule,
thermo,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
):
molecule = Molecule().fromAdjacencyList(molecule)
# Internal checks for adding entry to the thermo library
if label in self.entries.keys():
raise DatabaseError('Found a duplicate molecule with label {0} in the thermo library. Please correct your library.'.format(label))
for entry in self.entries.values():
if molecule.isIsomorphic(entry.item):
if molecule.multiplicity == entry.item.multiplicity:
raise DatabaseError('Adjacency list and multiplicity of {0} matches that of existing molecule {1} in thermo library. Please correct your library.'.format(label, entry.label))
self.entries[label] = Entry(
index = index,
label = label,
item = molecule,
data = thermo,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
)
def saveEntry(self, f, entry):
"""
Write the given `entry` in the thermo database to the file object `f`.
"""
return saveEntry(f, entry)
def generateOldLibraryEntry(self, data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
return generateOldLibraryEntry(data)
def processOldLibraryEntry(self, data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
return processOldLibraryEntry(data)
################################################################################
class ThermoGroups(Database):
"""
A class for working with an RMG thermodynamics group additivity database.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self,
index,
label,
group,
thermo,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
):
if group[0:3].upper() == 'OR{' or group[0:4].upper() == 'AND{' or group[0:7].upper() == 'NOT OR{' or group[0:8].upper() == 'NOT AND{':
item = makeLogicNode(group)
else:
item = Group().fromAdjacencyList(group)
self.entries[label] = Entry(
index = index,
label = label,
item = item,
data = thermo,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
)
def saveEntry(self, f, entry):
"""
Write the given `entry` in the thermo database to the file object `f`.
"""
return saveEntry(f, entry)
def generateOldLibraryEntry(self, data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
return generateOldLibraryEntry(data)
def processOldLibraryEntry(self, data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
return processOldLibraryEntry(data)
################################################################################
class ThermoDatabase(object):
"""
A class for working with the RMG thermodynamics database.
"""
def __init__(self):
self.depository = {}
self.libraries = {}
self.groups = {}
self.libraryOrder = []
self.local_context = {
'ThermoData': ThermoData,
'Wilhoit': Wilhoit,
'NASAPolynomial': NASAPolynomial,
'NASA': NASA,
}
self.global_context = {}
def __reduce__(self):
"""
A helper function used when pickling a ThermoDatabase object.
"""
d = {
'depository': self.depository,
'libraries': self.libraries,
'groups': self.groups,
'libraryOrder': self.libraryOrder,
}
return (ThermoDatabase, (), d)
def __setstate__(self, d):
"""
A helper function used when unpickling a ThermoDatabase object.
"""
self.depository = d['depository']
self.libraries = d['libraries']
self.groups = d['groups']
self.libraryOrder = d['libraryOrder']
def load(self, path, libraries=None, depository=True):
"""
Load the thermo database from the given `path` on disk, where `path`
points to the top-level folder of the thermo database.
"""
if depository:
self.loadDepository(os.path.join(path, 'depository'))
else:
self.depository = {}
self.loadLibraries(os.path.join(path, 'libraries'), libraries)
self.loadGroups(os.path.join(path, 'groups'))
def loadDepository(self, path):
"""
Load the thermo database from the given `path` on disk, where `path`
points to the top-level folder of the thermo database.
"""
self.depository = {}
self.depository['stable'] = ThermoDepository().load(os.path.join(path, 'stable.py'), self.local_context, self.global_context)
self.depository['radical'] = ThermoDepository().load(os.path.join(path, 'radical.py'), self.local_context, self.global_context)
def loadLibraries(self, path, libraries=None):
"""
Load the thermo database from the given `path` on disk, where `path`
points to the top-level folder of the thermo database.
"""
self.libraries = {}; self.libraryOrder = []
for (root, dirs, files) in os.walk(os.path.join(path)):
for f in files:
name, ext = os.path.splitext(f)
if ext.lower() == '.py' and (libraries is None or name in libraries):
logging.info('Loading thermodynamics library from {0} in {1}...'.format(f, root))
library = ThermoLibrary()
library.load(os.path.join(root, f), self.local_context, self.global_context)
library.label = os.path.splitext(f)[0]
self.libraries[library.label] = library
self.libraryOrder.append(library.label)
if libraries is not None:
self.libraryOrder = libraries
def loadGroups(self, path):
"""
Load the thermo database from the given `path` on disk, where `path`
points to the top-level folder of the thermo database.
"""
logging.info('Loading thermodynamics group database from {0}...'.format(path))
self.groups = {}
self.groups['group'] = ThermoGroups(label='group').load(os.path.join(path, 'group.py' ), self.local_context, self.global_context)
self.groups['gauche'] = ThermoGroups(label='gauche').load(os.path.join(path, 'gauche.py' ), self.local_context, self.global_context)
self.groups['int15'] = ThermoGroups(label='int15').load(os.path.join(path, 'int15.py' ), self.local_context, self.global_context)
self.groups['ring'] = ThermoGroups(label='ring').load(os.path.join(path, 'ring.py' ), self.local_context, self.global_context)
self.groups['radical'] = ThermoGroups(label='radical').load(os.path.join(path, 'radical.py'), self.local_context, self.global_context)
self.groups['polycyclic'] = ThermoGroups(label='polycyclic').load(os.path.join(path, 'polycyclic.py'), self.local_context, self.global_context)
self.groups['other'] = ThermoGroups(label='other').load(os.path.join(path, 'other.py' ), self.local_context, self.global_context)
def save(self, path):
"""
Save the thermo database to the given `path` on disk, where `path`
points to the top-level folder of the thermo database.
"""
path = os.path.abspath(path)
if not os.path.exists(path): os.mkdir(path)
self.saveDepository(os.path.join(path, 'depository'))
self.saveLibraries(os.path.join(path, 'libraries'))
self.saveGroups(os.path.join(path, 'groups'))
def saveDepository(self, path):
"""
Save the thermo depository to the given `path` on disk, where `path`
points to the top-level folder of the thermo depository.
"""
if not os.path.exists(path): os.mkdir(path)
for depo in self.depository.keys():
self.depository[depo].save(os.path.join(path, depo+'.py'))
def saveLibraries(self, path):
"""
Save the thermo libraries to the given `path` on disk, where `path`
points to the top-level folder of the thermo libraries.
"""
if not os.path.exists(path): os.mkdir(path)
for library in self.libraries.values():
library.save(os.path.join(path, '{0}.py'.format(library.label)))
def saveGroups(self, path):
"""
Save the thermo groups to the given `path` on disk, where `path`
points to the top-level folder of the thermo groups.
"""
if not os.path.exists(path): os.mkdir(path)
for group in self.groups.keys():
self.groups[group].save(os.path.join(path, group+'.py'))
def loadOld(self, path):
"""
Load the old RMG thermo database from the given `path` on disk, where
`path` points to the top-level folder of the old RMG database.
"""
# The old database does not have a depository, so create an empty one
self.depository = {}
self.depository['stable'] = ThermoDepository(label='stable', name='Stable Molecules')
self.depository['radical'] = ThermoDepository(label='radical', name='Radical Molecules')
for (root, dirs, files) in os.walk(os.path.join(path, 'thermo_libraries')):
if os.path.exists(os.path.join(root, 'Dictionary.txt')) and os.path.exists(os.path.join(root, 'Library.txt')):
library = ThermoLibrary(label=os.path.basename(root), name=os.path.basename(root))
library.loadOld(
dictstr = os.path.join(root, 'Dictionary.txt'),
treestr = '',
libstr = os.path.join(root, 'Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = False,
)
library.label = os.path.basename(root)
self.libraries[library.label] = library
self.groups = {}
self.groups['group'] = ThermoGroups(label='group', name='Functional Group Additivity Values').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Group_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Group_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Group_Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = True,
)
self.groups['gauche'] = ThermoGroups(label='gauche', name='Gauche Interaction Corrections').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Gauche_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Gauche_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Gauche_Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = True,
)
self.groups['int15'] = ThermoGroups(label='int15', name='1,5-Interaction Corrections').loadOld(
dictstr = os.path.join(path, 'thermo_groups', '15_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', '15_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', '15_Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = True,
)
self.groups['radical'] = ThermoGroups(label='radical', name='Radical Corrections').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Radical_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Radical_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Radical_Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = True,
)
self.groups['ring'] = ThermoGroups(label='ring', name='Ring Corrections').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Ring_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Ring_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Ring_Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = True,
)
self.groups['polycyclic'] = ThermoGroups(label='other', name='Polycyclic Ring Corrections').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Polycyclic_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Polycyclic_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Polycyclic_Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = True,
)
self.groups['other'] = ThermoGroups(label='other', name='Other Corrections').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Other_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Other_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Other_Library.txt'),
numParameters = 12,
numLabels = 1,
pattern = True,
)
def pruneHeteroatoms(self, allowed=['C','H','O','S']):
"""
Remove all species from thermo libraries that contain atoms other than those allowed.
This is useful before saving the database for use in RMG-Java
"""
allowedElements = [rmgpy.molecule.element.getElement(label) for label in allowed]
for library in self.libraries.values():
logging.info("Removing hetoroatoms from thermo library '{0}'".format(library.name))
toDelete = []
for entry in library.entries.values():
for atom in entry.item.atoms:
if atom.element not in allowedElements:
toDelete.append(entry.label)
break
for label in toDelete:
logging.info(" {0}".format(label))
library.entries.pop(label)
def saveOld(self, path):
"""
Save the old RMG thermo database to the given `path` on disk, where
`path` points to the top-level folder of the old RMG database.
"""
# Depository not used in old database, so it is not saved
librariesPath = os.path.join(path, 'thermo_libraries')
if not os.path.exists(librariesPath): os.mkdir(librariesPath)
for library in self.libraries.values():
libraryPath = os.path.join(librariesPath, library.label)
if not os.path.exists(libraryPath): os.mkdir(libraryPath)
library.saveOld(
dictstr = os.path.join(libraryPath, 'Dictionary.txt'),
treestr = '',
libstr = os.path.join(libraryPath, 'Library.txt'),
)
groupsPath = os.path.join(path, 'thermo_groups')
if not os.path.exists(groupsPath): os.mkdir(groupsPath)
self.groups['group'].saveOld(
dictstr = os.path.join(groupsPath, 'Group_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Group_Tree.txt'),
libstr = os.path.join(groupsPath, 'Group_Library.txt'),
)
self.groups['gauche'].saveOld(
dictstr = os.path.join(groupsPath, 'Gauche_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Gauche_Tree.txt'),
libstr = os.path.join(groupsPath, 'Gauche_Library.txt'),
)
self.groups['int15'].saveOld(
dictstr = os.path.join(groupsPath, '15_Dictionary.txt'),
treestr = os.path.join(groupsPath, '15_Tree.txt'),
libstr = os.path.join(groupsPath, '15_Library.txt'),
)
self.groups['radical'].saveOld(
dictstr = os.path.join(groupsPath, 'Radical_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Radical_Tree.txt'),
libstr = os.path.join(groupsPath, 'Radical_Library.txt'),
)
self.groups['ring'].saveOld(
dictstr = os.path.join(groupsPath, 'Ring_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Ring_Tree.txt'),
libstr = os.path.join(groupsPath, 'Ring_Library.txt'),
)
self.groups['polycyclic'].saveOld(
dictstr = os.path.join(groupsPath, 'Polycyclic_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Polycyclic_Tree.txt'),
libstr = os.path.join(groupsPath, 'Polycyclic_Library.txt'),
)
self.groups['other'].saveOld(
dictstr = os.path.join(groupsPath, 'Other_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Other_Tree.txt'),
libstr = os.path.join(groupsPath, 'Other_Library.txt'),
)
def getThermoData(self, species, trainingSet=None, quantumMechanics=None):
"""
Return the thermodynamic parameters for a given :class:`Species`
object `species`. This function first searches the loaded libraries
in order, returning the first match found, before falling back to
estimation via group additivity.
Returns: ThermoData
"""
thermo0 = None
thermo0 = self.getThermoDataFromLibraries(species)
if thermo0 is not None:
logging.info("Found thermo for {0} in {1}".format(species.label,thermo0[0].comment.lower()))
assert len(thermo0) == 3, "thermo0 should be a tuple at this point: (thermoData, library, entry)"
thermo0 = thermo0[0]
elif quantumMechanics:
original_molecule = species.molecule[0]
if quantumMechanics.settings.onlyCyclics and not original_molecule.isCyclic():
pass
else: # try a QM calculation
if original_molecule.getRadicalCount() > quantumMechanics.settings.maxRadicalNumber:
# Too many radicals for direct calculation: use HBI.
logging.info("{0} radicals on {1} exceeds limit of {2}. Using HBI method.".format(
original_molecule.getRadicalCount(),
species.label,
quantumMechanics.settings.maxRadicalNumber,
))
# Need to estimate thermo via each resonance isomer
thermo = []
for molecule in species.molecule:
molecule.clearLabeledAtoms()
# Try to see if the saturated molecule can be found in the libraries
tdata = self.estimateRadicalThermoViaHBI(molecule, self.getThermoDataFromLibraries)
priority = 1
if tdata is None:
# Then attempt quantum mechanics job on the saturated molecule
tdata = self.estimateRadicalThermoViaHBI(molecule, quantumMechanics.getThermoData)
priority = 2
if tdata is None:
# Fall back to group additivity
tdata = self.estimateThermoViaGroupAdditivity(molecule)
priority = 3
thermo.append((priority, tdata.getEnthalpy(298.), molecule, tdata))
if len(thermo) > 1:
# Sort thermo first by the priority, then by the most stable H298 value
thermo = sorted(thermo, key=lambda x: (x[0], x[1]))
for i in range(len(thermo)):
logging.info("Resonance isomer {0} {1} gives H298={2:.0f} J/mol".format(i+1, thermo[i][2].toSMILES(), thermo[i][1]))
# Save resonance isomers reordered by their thermo
species.molecule = [item[2] for item in thermo]
original_molecule = species.molecule[0]
thermo0 = thermo[0][3]
# If priority == 2
if thermo[0][0] == 2:
# Write the QM molecule thermo to a library so that can be used in future RMG jobs. (Do this only if it came from a QM calculation)
quantumMechanics.database.loadEntry(index = len(quantumMechanics.database.entries) + 1,
label = original_molecule.toSMILES() + '_({0})'.format(_multiplicity_labels[original_molecule.multiplicity]),
molecule = original_molecule.toAdjacencyList(),
thermo = thermo0,
shortDesc = thermo0.comment
)
# # For writing thermodata HBI check for QM molecules
# with open('thermoHBIcheck.txt','a') as f:
# f.write('// {0!r}\n'.format(thermo0).replace('),','),\n// '))
# f.write('{0}\n'.format(original_molecule.toSMILES()))
# f.write('{0}\n\n'.format(original_molecule.toAdjacencyList(removeH=False)))
else: # Not too many radicals: do a direct calculation.
thermo0 = quantumMechanics.getThermoData(original_molecule) # returns None if it fails
if thermo0 is not None:
# Write the QM molecule thermo to a library so that can be used in future RMG jobs.
quantumMechanics.database.loadEntry(index = len(quantumMechanics.database.entries) + 1,
label = original_molecule.toSMILES() + '_({0})'.format(_multiplicity_labels[original_molecule.multiplicity]),
molecule = original_molecule.toAdjacencyList(),
thermo = thermo0,
shortDesc = thermo0.comment
)
if thermo0 is None:
# Use group additivity methods to determine thermo for molecule (or if QM fails completely)
original_molecule = species.molecule[0]
if original_molecule.getRadicalCount() > 0:
# Molecule is a radical, use the HBI method
thermo = []
for molecule in species.molecule:
molecule.clearLabeledAtoms()
# First see if the saturated molecule is in the libaries
tdata = self.estimateRadicalThermoViaHBI(molecule, self.getThermoDataFromLibraries)
priority = 1
if tdata is None:
# Otherwise use normal group additivity to obtain the thermo for the molecule
tdata = self.estimateThermoViaGroupAdditivity(molecule)
priority = 2
thermo.append((priority, tdata.getEnthalpy(298.), molecule, tdata))
if len(thermo) > 1:
# Sort thermo first by the priority, then by the most stable H298 value
thermo = sorted(thermo, key=lambda x: (x[0], x[1]))
for i in range(len(thermo)):
logging.info("Resonance isomer {0} {1} gives H298={2:.0f} J/mol".format(i+1, thermo[i][2].toSMILES(), thermo[i][1]))
# Save resonance isomers reordered by their thermo
species.molecule = [item[2] for item in thermo]
thermo0 = thermo[0][3]
else:
# Saturated molecule, does not need HBI method
thermo0 = self.getThermoDataFromGroups(species)
# Make sure to calculate Cp0 and CpInf if it wasn't done already
self.findCp0andCpInf(species, thermo0)
# Return the resulting thermo parameters
return thermo0
def getThermoDataFromLibraries(self, species, trainingSet=None):
"""
Return the thermodynamic parameters for a given :class:`Species`
object `species`. This function first searches the loaded libraries
in order, returning the first match found, before failing and returning None.
`trainingSet` is used to identify if function is called during training set or not.
During training set calculation we want to use gas phase thermo to not affect reverse
rate calculation.
Returns: ThermoData or None
"""
thermoData = None
#chatelak 11/15/14: modification to introduce liquid phase thermo libraries
libraryList=deepcopy(self.libraryOrder) #copy the value to not affect initial object
if rmgpy.rmg.main.solvent is not None:
liqLibraries=[]
#Liquid phase simulation part:
#This bloc "for": Identify liquid phase libraries and store them in liqLibraries
for iterLib in libraryList:
if self.libraries[iterLib].solvent:
liqLibraries.append(iterLib)
#Check in liqLibraries if thermo for species exists and return the first match. Only if function not called by trainingSet
if liqLibraries and trainingSet is None:
for label in liqLibraries:
thermoData = self.getThermoDataFromLibrary(species, self.libraries[label])
if thermoData is not None:
assert len(thermoData) == 3, "thermoData should be a tuple at this point"
#Watch out comments changed: this is used later to apply solvation or not on species matching thermo. If required, Modify this carefully.
thermoData[0].comment += 'Liquid thermo library: ' + label
return thermoData
#Remove liqLibraries from libraryList if: called by training set (trainingSet=True) or if no thermo found in liqLibrairies
#if no liquid library found this does nothing.
for libIter in liqLibraries:
libraryList.remove(libIter)
# Condition to execute this part: gas phase simulation or training set or liquid phase simulation with : noliquid libraries found or no matching species found in liquid libraries
# If gas phase simulation libraryList = self.libraryOrder (just like before modifications) and they are all gas phase, already checked by checkLibrairies function in database.load()
# Check the libraries in order; return the first successful match
for label in libraryList:
thermoData = self.getThermoDataFromLibrary(species, self.libraries[label])
if thermoData is not None:
assert len(thermoData) == 3, "thermoData should be a tuple at this point"
if rmgpy.rmg.main.solvent is not None and trainingSet is None:
thermoData[0].comment += 'Thermo library corrected for liquid phase: ' + label
else:
thermoData[0].comment += 'Thermo library: ' + label
return thermoData
return None
def findCp0andCpInf(self, species, thermoData):
"""
Calculate the Cp0 and CpInf values, and add them to the thermoData object.
Modifies thermoData in place and doesn't return anything
"""
if not isinstance(thermoData,ThermoData):
return # Just skip it
raise Exception("Trying to add Cp0 to something that's not a ThermoData: {0!r}".format(thermoData))
if thermoData.Cp0 is None:
Cp0 = species.calculateCp0()
thermoData.Cp0 = (Cp0,"J/(mol*K)")
if thermoData.CpInf is None:
CpInf = species.calculateCpInf()
thermoData.CpInf = (CpInf,"J/(mol*K)")
def getAllThermoData(self, species):
"""
Return all possible sets of thermodynamic parameters for a given
:class:`Species` object `species`. The hits from the depository come
first, then the libraries (in order), and then the group additivity
estimate. This method is useful for a generic search job.
Returns: a list of tuples (ThermoData, source, entry)
(Source is a library or depository, or None)
"""
thermoDataList = []
# Data from depository comes first
thermoDataList.extend(self.getThermoDataFromDepository(species))
# Data from libraries comes second
for label in self.libraryOrder:
data = self.getThermoDataFromLibrary(species, self.libraries[label])
if data:
assert len(data) == 3, "thermoData should be a tuple at this point"
data[0].comment += label
thermoDataList.append(data)
# Last entry is always the estimate from group additivity
# Make it a tuple
data = (self.getThermoDataFromGroups(species), None, None)
thermoDataList.append(data)
# Return all of the resulting thermo parameters
return thermoDataList
def getThermoDataFromDepository(self, species):
"""
Return all possible sets of thermodynamic parameters for a given
:class:`Species` object `species` from the depository. If no
depository is loaded, a :class:`DatabaseError` is raised.
Returns: a list of tuples (thermoData, depository, entry) without any Cp0 or CpInf data.
"""
items = []
for label, entry in self.depository['stable'].entries.iteritems():
for molecule in species.molecule:
if molecule.isIsomorphic(entry.item):
items.append((deepcopy(entry.data), self.depository['stable'], entry))
break
for label, entry in self.depository['radical'].entries.iteritems():
for molecule in species.molecule:
if molecule.isIsomorphic(entry.item):
items.append((deepcopy(entry.data), self.depository['radical'], entry))
break
return items
def getThermoDataFromLibrary(self, species, library):
"""
Return the set of thermodynamic parameters corresponding to a given
:class:`Species` object `species` from the specified thermodynamics
`library`. If `library` is a string, the list of libraries is searched
for a library with that name. If no match is found in that library,
``None`` is returned. If no corresponding library is found, a
:class:`DatabaseError` is raised.
Returns a tuple: (ThermoData, library, entry) or None.
"""
for label, entry in library.entries.iteritems():
for molecule in species.molecule:
if molecule.isIsomorphic(entry.item) and entry.data is not None:
thermoData = deepcopy(entry.data)
self.findCp0andCpInf(species, thermoData)
return (thermoData, library, entry)
return None
def getThermoDataFromGroups(self, species):
"""
Return the set of thermodynamic parameters corresponding to a given
:class:`Species` object `species` by estimation using the group
additivity values. If no group additivity values are loaded, a
:class:`DatabaseError` is raised.
The resonance isomer (molecule) with the lowest H298 is used, and as a side-effect
the resonance isomers (items in `species.molecule` list) are sorted in ascending order.
Returns: ThermoData
"""
thermo = []
for molecule in species.molecule:
molecule.clearLabeledAtoms()
molecule.updateAtomTypes()
tdata = self.estimateThermoViaGroupAdditivity(molecule)
thermo.append(tdata)
H298 = numpy.array([t.getEnthalpy(298.) for t in thermo])
indices = H298.argsort()
species.molecule = [species.molecule[ind] for ind in indices]
thermoData = thermo[indices[0]]
self.findCp0andCpInf(species, thermoData)
return thermoData
def estimateRadicalThermoViaHBI(self, molecule, stableThermoEstimator ):
"""
Estimate the thermodynamics of a radical by saturating it,
applying the provided stableThermoEstimator method on the saturated species,
then applying hydrogen bond increment corrections for the radical
site(s) and correcting for the symmetry.
"""
assert molecule.isRadical(), "Method only valid for radicals."
saturatedStruct = molecule.copy(deep=True)
added = saturatedStruct.saturate()
saturatedStruct.props['saturated'] = True
# Get thermo estimate for saturated form of structure
if stableThermoEstimator == self.getThermoDataFromLibraries:
# Get data from libraries
saturatedSpec = Species(molecule=[saturatedStruct])
thermoData_sat = stableThermoEstimator(saturatedSpec)
if thermoData_sat:
assert len(thermoData_sat) == 3, "thermoData should be a tuple at this point: (thermoData, library, entry)"
thermoData_sat = thermoData_sat[0]
else:
thermoData_sat = stableThermoEstimator(saturatedStruct)
if thermoData_sat is None:
# logging.info("Thermo data of saturated {0} of molecule {1} is None.".format(saturatedStruct, molecule))
return None
assert thermoData_sat is not None, "Thermo data of saturated {0} of molecule {1} is None!".format(saturatedStruct, molecule)
# Convert to ThermoData object if necessary in order to add and subtract from enthalpy and entropy values
if not isinstance(thermoData_sat, ThermoData):
thermoData_sat = thermoData_sat.toThermoData()
if not stableThermoEstimator == self.computeGroupAdditivityThermo:
#remove the symmetry contribution to the entropy of the saturated molecule
##assumes that the thermo data comes from QMTP or from a thermolibrary
thermoData_sat.S298.value_si += constants.R * math.log(saturatedStruct.getSymmetryNumber())
thermoData = thermoData_sat
# Correct entropy for symmetry number of radical structure
thermoData.S298.value_si -= constants.R * math.log(molecule.getSymmetryNumber())
# For each radical site, get radical correction
# Only one radical site should be considered at a time; all others
# should be saturated with hydrogen atoms
for atom in added:
# Remove the added hydrogen atoms and bond and restore the radical
for H, bond in added[atom]:
saturatedStruct.removeBond(bond)
saturatedStruct.removeAtom(H)
atom.incrementRadical()
saturatedStruct.updateConnectivityValues()
try:
self.__addGroupThermoData(thermoData, self.groups['radical'], saturatedStruct, {'*':atom})
except KeyError:
logging.error("Couldn't find in radical thermo database:")
logging.error(molecule)
logging.error(molecule.toAdjacencyList())
raise
# Re-saturate
for H, bond in added[atom]:
saturatedStruct.addAtom(H)
saturatedStruct.addBond(bond)
atom.decrementRadical()
# Subtract the enthalpy of the added hydrogens
for H, bond in added[atom]:
thermoData.H298.value_si -= 52.103 * 4184
return thermoData
def estimateThermoViaGroupAdditivity(self, molecule):
"""
Return the set of thermodynamic parameters corresponding to a given
:class:`Molecule` object `molecule` by estimation using the group
additivity values. If no group additivity values are loaded, a
:class:`DatabaseError` is raised.
"""
# For thermo estimation we need the atoms to already be sorted because we
# iterate over them; if the order changes during the iteration then we
# will probably not visit the right atoms, and so will get the thermo wrong
molecule.sortVertices()
if molecule.isRadical(): # radical species
thermoData = self.estimateRadicalThermoViaHBI(molecule, self.computeGroupAdditivityThermo)
return thermoData
else: # non-radical species
thermoData = self.computeGroupAdditivityThermo(molecule)
# Correct entropy for symmetry number
if not 'saturated' in molecule.props:
thermoData.S298.value_si -= constants.R * math.log(molecule.getSymmetryNumber())
return thermoData
def computeGroupAdditivityThermo(self, molecule):
"""
Return the set of thermodynamic parameters corresponding to a given
:class:`Molecule` object `molecule` by estimation using the group
additivity values. If no group additivity values are loaded, a
:class:`DatabaseError` is raised.
The entropy is not corrected for the symmetry of the molecule.
This should be done later by the calling function.
"""
assert not molecule.isRadical(), "This method is only for saturated non-radical species."
# For thermo estimation we need the atoms to already be sorted because we
# iterate over them; if the order changes during the iteration then we
# will probably not visit the right atoms, and so will get the thermo wrong
molecule.sortVertices()
# Create the ThermoData object
thermoData = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],"K"),
Cpdata = ([0.0,0.0,0.0,0.0,0.0,0.0,0.0],"J/(mol*K)"),
H298 = (0.0,"kJ/mol"),
S298 = (0.0,"J/(mol*K)"),
)
cyclic = molecule.isCyclic()
# Generate estimate of thermodynamics
for atom in molecule.atoms:
# Iterate over heavy (non-hydrogen) atoms
if atom.isNonHydrogen():
# Get initial thermo estimate from main group database
try:
self.__addGroupThermoData(thermoData, self.groups['group'], molecule, {'*':atom})
except KeyError:
logging.error("Couldn't find in main thermo database:")
logging.error(molecule)
logging.error(molecule.toAdjacencyList())
raise
# Correct for gauche and 1,5- interactions
if not cyclic:
try:
self.__addGroupThermoData(thermoData, self.groups['gauche'], molecule, {'*':atom})
except KeyError: pass
try:
self.__addGroupThermoData(thermoData, self.groups['int15'], molecule, {'*':atom})
except KeyError: pass
try:
self.__addGroupThermoData(thermoData, self.groups['other'], molecule, {'*':atom})
except KeyError: pass
# Do ring corrections separately because we only want to match
# each ring one time
if cyclic:
if molecule.getAllPolycyclicVertices():
# If the molecule has fused ring atoms, this implies that we are dealing
# with a polycyclic ring system, for which separate ring strain corrections may not
# be adequate. Therefore, we search the polycyclic thermo group corrections
# instead of adding single ring strain corrections within the molecule.
# For now, assume only one polycyclic RSC can be found per molecule
try:
self.__addGroupThermoData(thermoData, self.groups['polycyclic'], molecule, {})
except:
logging.error("Couldn't find in polycyclic ring database:")
logging.error(molecule)
logging.error(molecule.toAdjacencyList())
raise
else:
rings = molecule.getSmallestSetOfSmallestRings()
for ring in rings:
# Make a temporary structure containing only the atoms in the ring
# NB. if any of the ring corrections depend on ligands not in the ring, they will not be found!
try:
self.__addGroupThermoData(thermoData, self.groups['ring'], molecule, {})
except KeyError:
logging.error("Couldn't find in ring database:")
logging.error(ring)
logging.error(ring.toAdjacencyList())
raise
return thermoData
def __addThermoData(self, thermoData1, thermoData2):
"""
Add the thermodynamic data `thermoData2` to the data `thermoData1`,
and return `thermoData1`.
"""
if len(thermoData1.Tdata.value_si) != len(thermoData2.Tdata.value_si) or any([T1 != T2 for T1, T2 in zip(thermoData1.Tdata.value_si, thermoData2.Tdata.value_si)]):
raise Exception('Cannot add these ThermoData objects due to their having different temperature points.')
for i in range(thermoData1.Tdata.value_si.shape[0]):
thermoData1.Cpdata.value_si[i] += thermoData2.Cpdata.value_si[i]
thermoData1.H298.value_si += thermoData2.H298.value_si
thermoData1.S298.value_si += thermoData2.S298.value_si
if thermoData1.comment:
thermoData1.comment += ' + {0}'.format(thermoData2.comment)
else:
thermoData1.comment = 'Thermo group additivity estimation: ' + thermoData2.comment
return thermoData1
def __addGroupThermoData(self, thermoData, database, molecule, atom):
"""
Determine the group additivity thermodynamic data for the atom `atom`
in the structure `structure`, and add it to the existing thermo data
`thermoData`.
"""
node0 = database.descendTree(molecule, atom, None)
if node0 is None:
raise KeyError('Node not found in database.')
# It's possible (and allowed) that items in the tree may not be in the
# library, in which case we need to fall up the tree until we find an
# ancestor that has an entry in the library
node = node0
while node.data is None and node is not None:
node = node.parent
if node is None:
raise DatabaseError('Unable to determine thermo parameters for {0}: no library entries for {1} or any of its ancestors.'.format(molecule, node0) )
data = node.data; comment = node.label
while isinstance(data, basestring) and data is not None:
for entry in database.entries.values():
if entry.label == data:
data = entry.data
comment = entry.label
break
data.comment = '{0}({1})'.format(database.label, comment)
# This code prints the hierarchy of the found node; useful for debugging
# result = ''
# while node is not None:
# result = ' -> ' + node.label + result
# node = node.parent
# print result[4:]
if thermoData is None:
return data
else:
return self.__addThermoData(thermoData, data)
| enochd/RMG-Py | rmgpy/data/thermo.py | Python | mit | 54,148 | [
"VisIt"
] | a670cb60dae9ec416e97b27bdc275566232d3142f80eddbaeaf838d945b37492 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import os
import re
import itertools
import warnings
import logging
import math
import six
import numpy as np
from numpy.linalg import det
from collections import OrderedDict, namedtuple
from hashlib import md5
from monty.io import zopen
from monty.os.path import zpath
from monty.json import MontyDecoder
from enum import Enum
from tabulate import tabulate
import scipy.constants as const
from pymatgen import SETTINGS
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.periodic_table import Element, get_el_sp
from pymatgen.electronic_structure.core import Magmom
from monty.design_patterns import cached_class
from pymatgen.util.string import str_delimited
from pymatgen.util.io_utils import clean_lines
from monty.json import MSONable
"""
Classes for reading/manipulating/writing VASP input files. All major VASP input
files.
"""
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Rickard Armiento, " + \
"Vincent L Chevrier, Stephen Dacek"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Jul 16, 2012"
logger = logging.getLogger(__name__)
class Poscar(MSONable):
"""
Object for representing the data in a POSCAR or CONTCAR file.
Please note that this current implementation. Most attributes can be set
directly.
Args:
structure (Structure): Structure object.
comment (str): Optional comment line for POSCAR. Defaults to unit
cell formula of structure. Defaults to None.
selective_dynamics (Nx3 array): bool values for selective dynamics,
where N is number of sites. Defaults to None.
true_names (bool): Set to False is the names in the POSCAR are not
well-defined and ambiguous. This situation arises commonly in
vasp < 5 where the POSCAR sometimes does not contain element
symbols. Defaults to True.
velocities (Nx3 array): Velocities for the POSCAR. Typically parsed
in MD runs or can be used to initialize velocities.
predictor_corrector (Nx3 array): Predictor corrector for the POSCAR.
Typically parsed in MD runs.
.. attribute:: structure
Associated Structure.
.. attribute:: comment
Optional comment string.
.. attribute:: true_names
Boolean indication whether Poscar contains actual real names parsed
from either a POTCAR or the POSCAR itself.
.. attribute:: selective_dynamics
Selective dynamics attribute for each site if available. A Nx3 array of
booleans.
.. attribute:: velocities
Velocities for each site (typically read in from a CONTCAR). A Nx3
array of floats.
.. attribute:: predictor_corrector
Predictor corrector coordinates and derivatives for each site; i.e.
a list of three 1x3 arrays for each site (typically read in from a MD
CONTCAR).
.. attribute:: predictor_corrector_preamble
Predictor corrector preamble contains the predictor-corrector key,
POTIM, and thermostat parameters that precede the site-specic predictor
corrector data in MD CONTCAR
.. attribute:: temperature
Temperature of velocity Maxwell-Boltzmann initialization. Initialized
to -1 (MB hasn"t been performed).
"""
def __init__(self, structure, comment=None, selective_dynamics=None,
true_names=True, velocities=None, predictor_corrector=None,
predictor_corrector_preamble=None):
if structure.is_ordered:
site_properties = {}
if selective_dynamics:
site_properties["selective_dynamics"] = selective_dynamics
if velocities:
site_properties["velocities"] = velocities
if predictor_corrector:
site_properties["predictor_corrector"] = predictor_corrector
self.structure = structure.copy(site_properties=site_properties)
self.true_names = true_names
self.comment = structure.formula if comment is None else comment
self.predictor_corrector_preamble = predictor_corrector_preamble
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into POSCAR!")
self.temperature = -1
@property
def velocities(self):
return self.structure.site_properties.get("velocities")
@property
def selective_dynamics(self):
return self.structure.site_properties.get("selective_dynamics")
@property
def predictor_corrector(self):
return self.structure.site_properties.get("predictor_corrector")
@velocities.setter
def velocities(self, velocities):
self.structure.add_site_property("velocities", velocities)
@selective_dynamics.setter
def selective_dynamics(self, selective_dynamics):
self.structure.add_site_property("selective_dynamics",
selective_dynamics)
@predictor_corrector.setter
def predictor_corrector(self, predictor_corrector):
self.structure.add_site_property("predictor_corrector",
predictor_corrector)
@property
def site_symbols(self):
"""
Sequence of symbols associated with the Poscar. Similar to 6th line in
vasp 5+ POSCAR.
"""
syms = [site.specie.symbol for site in self.structure]
return [a[0] for a in itertools.groupby(syms)]
@property
def natoms(self):
"""
Sequence of number of sites of each type associated with the Poscar.
Similar to 7th line in vasp 5+ POSCAR or the 6th line in vasp 4 POSCAR.
"""
syms = [site.specie.symbol for site in self.structure]
return [len(tuple(a[1])) for a in itertools.groupby(syms)]
def __setattr__(self, name, value):
if name in ("selective_dynamics", "velocities"):
if value is not None and len(value) > 0:
value = np.array(value)
dim = value.shape
if dim[1] != 3 or dim[0] != len(self.structure):
raise ValueError(name + " array must be same length as" +
" the structure.")
value = value.tolist()
super(Poscar, self).__setattr__(name, value)
@staticmethod
def from_file(filename, check_for_POTCAR=True, read_velocities=True):
"""
Reads a Poscar from a file.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If check_for_POTCAR is True, the code will try to check if a POTCAR
is in the same directory as the POSCAR and use elements from that by
default. (This is the VASP default sequence of priority).
2. If the input file is Vasp5-like and contains element symbols in the
6th line, the code will use that if check_for_POTCAR is False or there
is no POTCAR found.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
filename (str): File name containing Poscar data.
check_for_POTCAR (bool): Whether to check if a POTCAR is present
in the same directory as the POSCAR. Defaults to True.
read_velocities (bool): Whether to read or not velocities if they
are present in the POSCAR. Default is True.
Returns:
Poscar object.
"""
dirname = os.path.dirname(os.path.abspath(filename))
names = None
if check_for_POTCAR:
for f in os.listdir(dirname):
if f == "POTCAR":
try:
potcar = Potcar.from_file(os.path.join(dirname, f))
names = [sym.split("_")[0] for sym in potcar.symbols]
[get_el_sp(n) for n in names] # ensure valid names
except:
names = None
with zopen(filename, "rt") as f:
return Poscar.from_string(f.read(), names,
read_velocities=read_velocities)
@staticmethod
def from_string(data, default_names=None, read_velocities=True):
"""
Reads a Poscar from a string.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If default_names are supplied and valid, it will use those. Usually,
default names comes from an external source, such as a POTCAR in the
same directory.
2. If there are no valid default names but the input file is Vasp5-like
and contains element symbols in the 6th line, the code will use that.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
data (str): String containing Poscar data.
default_names ([str]): Default symbols for the POSCAR file,
usually coming from a POTCAR in the same directory.
read_velocities (bool): Whether to read or not velocities if they
are present in the POSCAR. Default is True.
Returns:
Poscar object.
"""
# "^\s*$" doesn't match lines with no whitespace
chunks = re.split(r"\n\s*\n", data.rstrip(), flags=re.MULTILINE)
try:
if chunks[0] == "":
chunks.pop(0)
chunks[0] = "\n" + chunks[0]
except IndexError:
raise ValueError("Empty POSCAR")
# Parse positions
lines = tuple(clean_lines(chunks[0].split("\n"), False))
comment = lines[0]
scale = float(lines[1])
lattice = np.array([[float(i) for i in line.split()]
for line in lines[2:5]])
if scale < 0:
# In vasp, a negative scale factor is treated as a volume. We need
# to translate this to a proper lattice vector scaling.
vol = abs(det(lattice))
lattice *= (-scale / vol) ** (1 / 3)
else:
lattice *= scale
vasp5_symbols = False
try:
natoms = [int(i) for i in lines[5].split()]
ipos = 6
except ValueError:
vasp5_symbols = True
symbols = lines[5].split()
"""
Atoms and number of atoms in POSCAR written with vasp appear on
multiple lines when atoms of the same type are not grouped together
and more than 20 groups are then defined ...
Example :
Cr16 Fe35 Ni2
1.00000000000000
8.5415010000000002 -0.0077670000000000 -0.0007960000000000
-0.0077730000000000 8.5224019999999996 0.0105580000000000
-0.0007970000000000 0.0105720000000000 8.5356889999999996
Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Ni Fe Cr Fe Cr
Fe Ni Fe Cr Fe
1 1 2 4 2 1 1 1 2 1 1 1 4 1 1 1 5 3 6 1
2 1 3 2 5
Direct
...
"""
nlines_symbols = 1
for nlines_symbols in range(1, 11):
try:
int(lines[5+nlines_symbols].split()[0])
break
except ValueError:
pass
for iline_symbols in range(6, 5+nlines_symbols):
symbols.extend(lines[iline_symbols].split())
natoms = []
iline_natoms_start = 5+nlines_symbols
for iline_natoms in range(iline_natoms_start,
iline_natoms_start+nlines_symbols):
natoms.extend([int(i) for i in lines[iline_natoms].split()])
atomic_symbols = list()
for i in range(len(natoms)):
atomic_symbols.extend([symbols[i]] * natoms[i])
ipos = 5+2*nlines_symbols
postype = lines[ipos].split()[0]
sdynamics = False
# Selective dynamics
if postype[0] in "sS":
sdynamics = True
ipos += 1
postype = lines[ipos].split()[0]
cart = postype[0] in "cCkK"
nsites = sum(natoms)
# If default_names is specified (usually coming from a POTCAR), use
# them. This is in line with Vasp"s parsing order that the POTCAR
# specified is the default used.
if default_names:
try:
atomic_symbols = []
for i in range(len(natoms)):
atomic_symbols.extend([default_names[i]] * natoms[i])
vasp5_symbols = True
except IndexError:
pass
if not vasp5_symbols:
ind = 3 if not sdynamics else 6
try:
# Check if names are appended at the end of the coordinates.
atomic_symbols = [l.split()[ind]
for l in lines[ipos + 1:ipos + 1 + nsites]]
# Ensure symbols are valid elements
if not all([Element.is_valid_symbol(sym)
for sym in atomic_symbols]):
raise ValueError("Non-valid symbols detected.")
vasp5_symbols = True
except (ValueError, IndexError):
# Defaulting to false names.
atomic_symbols = []
for i in range(len(natoms)):
sym = Element.from_Z(i + 1).symbol
atomic_symbols.extend([sym] * natoms[i])
warnings.warn("Elements in POSCAR cannot be determined. "
"Defaulting to false names %s." %
" ".join(atomic_symbols))
# read the atomic coordinates
coords = []
selective_dynamics = list() if sdynamics else None
for i in range(nsites):
toks = lines[ipos + 1 + i].split()
crd_scale = scale if cart else 1
coords.append([float(j) * crd_scale for j in toks[:3]])
if sdynamics:
selective_dynamics.append([tok.upper()[0] == "T"
for tok in toks[3:6]])
struct = Structure(lattice, atomic_symbols, coords,
to_unit_cell=False, validate_proximity=False,
coords_are_cartesian=cart)
if read_velocities:
# Parse velocities if any
velocities = []
if len(chunks) > 1:
for line in chunks[1].strip().split("\n"):
velocities.append([float(tok) for tok in line.split()])
# Parse the predictor-corrector data
predictor_corrector = []
predictor_corrector_preamble = None
if len(chunks) > 2:
lines = chunks[2].strip().split("\n")
# There are 3 sets of 3xN Predictor corrector parameters
# So can't be stored as a single set of "site_property"
# First line in chunk is a key in CONTCAR
# Second line is POTIM
# Third line is the thermostat parameters
predictor_corrector_preamble = (lines[0] + "\n" + lines[1]
+ "\n" + lines[2])
# Rest is three sets of parameters, each set contains
# x, y, z predictor-corrector parameters for every atom in orde
lines = lines[3:]
for st in range(nsites):
d1 = [float(tok) for tok in lines[st].split()]
d2 = [float(tok) for tok in lines[st+nsites].split()]
d3 = [float(tok) for tok in lines[st+2*nsites].split()]
predictor_corrector.append([d1,d2,d3])
else:
velocities = None
predictor_corrector = None
predictor_corrector_preamble = None
return Poscar(struct, comment, selective_dynamics, vasp5_symbols,
velocities=velocities,
predictor_corrector=predictor_corrector,
predictor_corrector_preamble=predictor_corrector_preamble)
def get_string(self, direct=True, vasp4_compatible=False,
significant_figures=6):
"""
Returns a string to be written as a POSCAR file. By default, site
symbols are written, which means compatibility is for vasp >= 5.
Args:
direct (bool): Whether coordinates are output in direct or
cartesian. Defaults to True.
vasp4_compatible (bool): Set to True to omit site symbols on 6th
line to maintain backward vasp 4.x compatibility. Defaults
to False.
significant_figures (int): No. of significant figures to
output all quantities. Defaults to 6. Note that positions are
output in fixed point, while velocities are output in
scientific format.
Returns:
String representation of POSCAR.
"""
# This corrects for VASP really annoying bug of crashing on lattices
# which have triple product < 0. We will just invert the lattice
# vectors.
latt = self.structure.lattice
if np.linalg.det(latt.matrix) < 0:
latt = Lattice(-latt.matrix)
format_str = "{{:.{0}f}}".format(significant_figures)
lines = [self.comment, "1.0"]
for v in latt.matrix:
lines.append(" ".join([format_str.format(c) for c in v]))
if self.true_names and not vasp4_compatible:
lines.append(" ".join(self.site_symbols))
lines.append(" ".join([str(x) for x in self.natoms]))
if self.selective_dynamics:
lines.append("Selective dynamics")
lines.append("direct" if direct else "cartesian")
for (i, site) in enumerate(self.structure):
coords = site.frac_coords if direct else site.coords
line = " ".join([format_str.format(c) for c in coords])
if self.selective_dynamics is not None:
sd = ["T" if j else "F" for j in self.selective_dynamics[i]]
line += " %s %s %s" % (sd[0], sd[1], sd[2])
line += " " + site.species_string
lines.append(line)
if self.velocities:
try:
lines.append("")
for v in self.velocities:
lines.append(" ".join([format_str.format(i) for i in v]))
except:
warnings.warn("Velocities are missing or corrupted.")
if self.predictor_corrector:
lines.append("")
if self.predictor_corrector_preamble:
lines.append(self.predictor_corrector_preamble)
pred = np.array(self.predictor_corrector)
for col in range(3):
for z in pred[:,col]:
lines.append(" ".join([format_str.format(i) for i in z]))
else:
warnings.warn(
"Preamble information missing or corrupt. "
"Writing Poscar with no predictor corrector data.")
return "\n".join(lines) + "\n"
def __repr__(self):
return self.get_string()
def __str__(self):
"""
String representation of Poscar file.
"""
return self.get_string()
def write_file(self, filename, **kwargs):
"""
Writes POSCAR to a file. The supported kwargs are the same as those for
the Poscar.get_string method and are passed through directly.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"true_names": self.true_names,
"selective_dynamics": np.array(
self.selective_dynamics).tolist(),
"velocities": self.velocities,
"predictor_corrector": self.predictor_corrector,
"comment": self.comment}
@classmethod
def from_dict(cls, d):
return Poscar(Structure.from_dict(d["structure"]),
comment=d["comment"],
selective_dynamics=d["selective_dynamics"],
true_names=d["true_names"],
velocities=d.get("velocities", None),
predictor_corrector=d.get("predictor_corrector", None))
def set_temperature(self, temperature):
"""
Initializes the velocities based on Maxwell-Boltzmann distribution.
Removes linear, but not angular drift (same as VASP)
Scales the energies to the exact temperature (microcanonical ensemble)
Velocities are given in A/fs. This is the vasp default when
direct/cartesian is not specified (even when positions are given in
direct coordinates)
Overwrites imported velocities, if any.
Args:
temperature (float): Temperature in Kelvin.
"""
# mean 0 variance 1
velocities = np.random.randn(len(self.structure), 3)
# in AMU, (N,1) array
atomic_masses = np.array([site.specie.atomic_mass.to("kg")
for site in self.structure])
dof = 3 * len(self.structure) - 3
# scale velocities due to atomic masses
# mean 0 std proportional to sqrt(1/m)
velocities /= atomic_masses[:, np.newaxis] ** (1 / 2)
# remove linear drift (net momentum)
velocities -= np.average(atomic_masses[:, np.newaxis] * velocities,
axis=0) / np.average(atomic_masses)
# scale velocities to get correct temperature
energy = np.sum(1 / 2 * atomic_masses *
np.sum(velocities ** 2, axis=1))
scale = (temperature * dof / (2 * energy / const.k)) ** (1 / 2)
velocities *= scale * 1e-5 # these are in A/fs
self.temperature = temperature
try:
del self.structure.site_properties["selective_dynamics"]
except KeyError:
pass
try:
del self.structure.site_properties["predictor_corrector"]
except KeyError:
pass
# returns as a list of lists to be consistent with the other
# initializations
self.structure.add_site_property("velocities", velocities.tolist())
class Incar(dict, MSONable):
"""
INCAR object for reading and writing INCAR files. Essentially consists of
a dictionary with some helper functions
"""
def __init__(self, params=None):
"""
Creates an Incar object.
Args:
params (dict): A set of input parameters as a dictionary.
"""
super(Incar, self).__init__()
if params:
# if Incar contains vector-like magmoms given as a list
# of floats, convert to a list of lists
if (params.get("MAGMOM") and isinstance(params["MAGMOM"][0], (int, float))) \
and (params.get("LSORBIT") or params.get("LNONCOLLINEAR")):
val = []
for i in range(len(params["MAGMOM"])//3):
val.append(params["MAGMOM"][i*3:(i+1)*3])
params["MAGMOM"] = val
self.update(params)
def __setitem__(self, key, val):
"""
Add parameter-val pair to Incar. Warns if parameter is not in list of
valid INCAR tags. Also cleans the parameter and val by stripping
leading and trailing white spaces.
"""
super(Incar, self).__setitem__(
key.strip(), Incar.proc_val(key.strip(), val.strip())
if isinstance(val, six.string_types) else val)
def as_dict(self):
d = dict(self)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
if d.get("MAGMOM") and isinstance(d["MAGMOM"][0], dict):
d["MAGMOM"] = [Magmom.from_dict(m) for m in d["MAGMOM"]]
return Incar({k: v for k, v in d.items() if k not in ("@module",
"@class")})
def get_string(self, sort_keys=False, pretty=False):
"""
Returns a string representation of the INCAR. The reason why this
method is different from the __str__ method is to provide options for
pretty printing.
Args:
sort_keys (bool): Set to True to sort the INCAR parameters
alphabetically. Defaults to False.
pretty (bool): Set to True for pretty aligned output. Defaults
to False.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if k == "MAGMOM" and isinstance(self[k], list):
value = []
if (isinstance(self[k][0], list) or isinstance(self[k][0], Magmom)) and \
(self.get("LSORBIT") or self.get("LNONCOLLINEAR")):
value.append(" ".join(str(i) for j in self[k] for i in j))
elif self.get("LSORBIT") or self.get("LNONCOLLINEAR"):
for m, g in itertools.groupby(self[k]):
value.append("3*{}*{}".format(len(tuple(g)), m))
else:
# float() to ensure backwards compatibility between
# float magmoms and Magmom objects
for m, g in itertools.groupby(self[k], lambda x: float(x)):
value.append("{}*{}".format(len(tuple(g)), m))
lines.append([k, " ".join(value)])
elif isinstance(self[k], list):
lines.append([k, " ".join([str(i) for i in self[k]])])
else:
lines.append([k, self[k]])
if pretty:
return str(tabulate([[l[0], "=", l[1]] for l in lines],
tablefmt="plain"))
else:
return str_delimited(lines, None, " = ") + "\n"
def __str__(self):
return self.get_string(sort_keys=True, pretty=False)
def write_file(self, filename):
"""
Write Incar to a file.
Args:
filename (str): filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@staticmethod
def from_file(filename):
"""
Reads an Incar object from a file.
Args:
filename (str): Filename for file
Returns:
Incar object
"""
with zopen(filename, "rt") as f:
return Incar.from_string(f.read())
@staticmethod
def from_string(string):
"""
Reads an Incar object from a string.
Args:
string (str): Incar string
Returns:
Incar object
"""
lines = list(clean_lines(string.splitlines()))
params = {}
for line in lines:
m = re.match(r'(\w+)\s*=\s*(.*)', line)
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Incar.proc_val(key, val)
params[key] = val
return Incar(params)
@staticmethod
def proc_val(key, val):
"""
Static helper method to convert INCAR parameters to proper types, e.g.,
integers, floats, lists, etc.
Args:
key: INCAR parameter key
val: Actual value of INCAR parameter.
"""
list_keys = ("LDAUU", "LDAUL", "LDAUJ", "MAGMOM", "DIPOL",
"LANGEVIN_GAMMA", "QUAD_EFG", "EINT")
bool_keys = ("LDAU", "LWAVE", "LSCALU", "LCHARG", "LPLANE", "LUSE_VDW",
"LHFCALC", "ADDGRID", "LSORBIT", "LNONCOLLINEAR")
float_keys = ("EDIFF", "SIGMA", "TIME", "ENCUTFOCK", "HFSCREEN",
"POTIM", "EDIFFG", "AGGAC", "PARAM1", "PARAM2")
int_keys = ("NSW", "NBANDS", "NELMIN", "ISIF", "IBRION", "ISPIN",
"ICHARG", "NELM", "ISMEAR", "NPAR", "LDAUPRINT", "LMAXMIX",
"ENCUT", "NSIM", "NKRED", "NUPDOWN", "ISPIND", "LDAUTYPE",
"IVDW")
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key in list_keys:
output = []
toks = re.findall(
r"(-?\d+\.?\d*)\*?(-?\d+\.?\d*)?\*?(-?\d+\.?\d*)?", val)
for tok in toks:
if tok[2] and "3" in tok[0]:
output.extend(
[smart_int_or_float(tok[2])] * int(tok[0])
* int(tok[1]))
elif tok[1]:
output.extend([smart_int_or_float(tok[1])] *
int(tok[0]))
else:
output.append(smart_int_or_float(tok[0]))
return output
if key in bool_keys:
m = re.match(r"^\.?([T|F|t|f])[A-Za-z]*\.?", val)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(key + " should be a boolean type!")
if key in float_keys:
return float(re.search(r"^-?\d*\.?\d*[e|E]?-?\d*", val).group(0))
if key in int_keys:
return int(re.match(r"^-?[0-9]+", val).group(0))
except ValueError:
pass
# Not in standard keys. We will try a hierarchy of conversions.
try:
val = int(val)
return val
except ValueError:
pass
try:
val = float(val)
return val
except ValueError:
pass
if "true" in val.lower():
return True
if "false" in val.lower():
return False
return val.strip().capitalize()
def diff(self, other):
"""
Diff function for Incar. Compares two Incars and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other (Incar): The other Incar object to compare to.
Returns:
Dict of the following format:
{"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different}
Note that the parameters are return as full dictionaries of values.
E.g. {"ISIF":3}
"""
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"INCAR1": v1, "INCAR2": None}
elif v1 != other[k1]:
different_param[k1] = {"INCAR1": v1, "INCAR2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"INCAR1": None, "INCAR2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
"""
Add all the values of another INCAR object to this object.
Facilitates the use of "standard" INCARs.
"""
params = {k: v for k, v in self.items()}
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("Incars have conflicting values!")
else:
params[k] = v
return Incar(params)
class Kpoints_supported_modes(Enum):
Automatic = 0
Gamma = 1
Monkhorst = 2
Line_mode = 3
Cartesian = 4
Reciprocal = 5
def __str__(self):
return self.name
@staticmethod
def from_string(s):
c = s.lower()[0]
for m in Kpoints_supported_modes:
if m.name.lower()[0] == c:
return m
raise ValueError("Can't interprete Kpoint mode %s" % s)
class Kpoints(MSONable):
"""
KPOINT reader/writer.
"""
supported_modes = Kpoints_supported_modes
def __init__(self, comment="Default gamma", num_kpts=0,
style=supported_modes.Gamma,
kpts=((1, 1, 1),), kpts_shift=(0, 0, 0),
kpts_weights=None, coord_type=None, labels=None,
tet_number=0, tet_weight=0, tet_connections=None):
"""
Highly flexible constructor for Kpoints object. The flexibility comes
at the cost of usability and in general, it is recommended that you use
the default constructor only if you know exactly what you are doing and
requires the flexibility. For most usage cases, the three automatic
schemes can be constructed far more easily using the convenience static
constructors (automatic, gamma_automatic, monkhorst_automatic) and it
is recommended that you use those.
Args:
comment (str): String comment for Kpoints
num_kpts: Following VASP method of defining the KPOINTS file, this
parameter is the number of kpoints specified. If set to 0
(or negative), VASP automatically generates the KPOINTS.
style: Style for generating KPOINTS. Use one of the
Kpoints.supported_modes enum types.
kpts (2D array): 2D array of kpoints. Even when only a single
specification is required, e.g. in the automatic scheme,
the kpts should still be specified as a 2D array. e.g.,
[[20]] or [[2,2,2]].
kpts_shift (3x1 array): Shift for Kpoints.
kpts_weights: Optional weights for kpoints. Weights should be
integers. For explicit kpoints.
coord_type: In line-mode, this variable specifies whether the
Kpoints were given in Cartesian or Reciprocal coordinates.
labels: In line-mode, this should provide a list of labels for
each kpt. It is optional in explicit kpoint mode as comments for
k-points.
tet_number: For explicit kpoints, specifies the number of
tetrahedrons for the tetrahedron method.
tet_weight: For explicit kpoints, specifies the weight for each
tetrahedron for the tetrahedron method.
tet_connections: For explicit kpoints, specifies the connections
of the tetrahedrons for the tetrahedron method.
Format is a list of tuples, [ (sym_weight, [tet_vertices]),
...]
The default behavior of the constructor is for a Gamma centered,
1x1x1 KPOINTS with no shift.
"""
if num_kpts > 0 and (not labels) and (not kpts_weights):
raise ValueError("For explicit or line-mode kpoints, either the "
"labels or kpts_weights must be specified.")
self.comment = comment
self.num_kpts = num_kpts
self.kpts = kpts
self.style = style
self.coord_type = coord_type
self.kpts_weights = kpts_weights
self.kpts_shift = kpts_shift
self.labels = labels
self.tet_number = tet_number
self.tet_weight = tet_weight
self.tet_connections = tet_connections
@property
def style(self):
return self._style
@style.setter
def style(self, style):
if isinstance(style, six.string_types):
style = Kpoints.supported_modes.from_string(style)
if style in (Kpoints.supported_modes.Automatic,
Kpoints.supported_modes.Gamma,
Kpoints.supported_modes.Monkhorst) and len(self.kpts) > 1:
raise ValueError("For fully automatic or automatic gamma or monk "
"kpoints, only a single line for the number of "
"divisions is allowed.")
self._style = style
@staticmethod
def automatic(subdivisions):
"""
Convenient static constructor for a fully automatic Kpoint grid, with
gamma centered Monkhorst-Pack grids and the number of subdivisions
along each reciprocal lattice vector determined by the scheme in the
VASP manual.
Args:
subdivisions: Parameter determining number of subdivisions along
each reciprocal lattice vector.
Returns:
Kpoints object
"""
return Kpoints("Fully automatic kpoint scheme", 0,
style=Kpoints.supported_modes.Automatic,
kpts=[[subdivisions]])
@staticmethod
def gamma_automatic(kpts=(1, 1, 1), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Gamma centered Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (1,1,1)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints("Automatic kpoint scheme", 0,
Kpoints.supported_modes.Gamma, kpts=[kpts],
kpts_shift=shift)
@staticmethod
def monkhorst_automatic(kpts=(2, 2, 2), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Monkhorst pack Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (2,2,2)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints("Automatic kpoint scheme", 0,
Kpoints.supported_modes.Monkhorst, kpts=[kpts],
kpts_shift=shift)
@staticmethod
def automatic_density(structure, kppa, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes for hexagonal cells and
Monkhorst-Pack grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure (Structure): Input structure
kppa (int): Grid density
force_gamma (bool): Force a gamma centered mesh (default is to
use gamma only for hexagonal cells or odd meshes)
Returns:
Kpoints
"""
comment = "pymatgen 4.7.6+ generated KPOINTS with grid density = " + \
"%.0f / atom" % kppa
if math.fabs((math.floor(kppa ** (1 / 3) + 0.5)) ** 3 - kppa) < 1:
kppa += kppa * 0.01
latt = structure.lattice
lengths = latt.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(math.floor(max(mult / l, 1))) for l in lengths]
is_hexagonal = latt.is_hexagonal()
has_odd = any([i % 2 == 1 for i in num_div])
if has_odd or is_hexagonal or force_gamma:
style = Kpoints.supported_modes.Gamma
else:
style = Kpoints.supported_modes.Monkhorst
return Kpoints(comment, 0, style, [num_div], [0, 0, 0])
@staticmethod
def automatic_gamma_density(structure, kppa):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes always. For GW.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure:
Input structure
kppa:
Grid density
"""
latt = structure.lattice
lengths = latt.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(round(mult / l)) for l in lengths]
# ensure that numDiv[i] > 0
num_div = [i if i > 0 else 1 for i in num_div]
# VASP documentation recommends to use even grids for n <= 8 and odd
# grids for n > 8.
num_div = [i + i % 2 if i <= 8 else i - i % 2 + 1 for i in num_div]
style = Kpoints.supported_modes.Gamma
comment = "pymatgen 4.7.6+ generated KPOINTS with grid density = " + \
"{} / atom".format(kppa)
num_kpts = 0
return Kpoints(comment, num_kpts, style, [num_div], [0, 0, 0])
@staticmethod
def automatic_density_by_vol(structure, kppvol, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density per inverse Angstrom of reciprocal cell.
Algorithm:
Same as automatic_density()
Args:
structure (Structure): Input structure
kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell
force_gamma (bool): Force a gamma centered mesh
Returns:
Kpoints
"""
vol = structure.lattice.reciprocal_lattice.volume
kppa = kppvol * vol * structure.num_sites
return Kpoints.automatic_density(structure, kppa,
force_gamma=force_gamma)
@staticmethod
def automatic_linemode(divisions, ibz):
"""
Convenient static constructor for a KPOINTS in mode line_mode.
gamma centered Monkhorst-Pack grids and the number of subdivisions
along each reciprocal lattice vector determined by the scheme in the
VASP manual.
Args:
divisions: Parameter determining the number of k-points along each
hight symetry lines.
ibz: HighSymmKpath object (pymatgen.symmetry.bandstructure)
Returns:
Kpoints object
"""
kpoints = list()
labels = list()
for path in ibz.kpath["path"]:
kpoints.append(ibz.kpath["kpoints"][path[0]])
labels.append(path[0])
for i in range(1, len(path) - 1):
kpoints.append(ibz.kpath["kpoints"][path[i]])
labels.append(path[i])
kpoints.append(ibz.kpath["kpoints"][path[i]])
labels.append(path[i])
kpoints.append(ibz.kpath["kpoints"][path[-1]])
labels.append(path[-1])
return Kpoints("Line_mode KPOINTS file",
style=Kpoints.supported_modes.Line_mode,
coord_type="Reciprocal",
kpts=kpoints,
labels=labels,
num_kpts=int(divisions))
@staticmethod
def from_file(filename):
"""
Reads a Kpoints object from a KPOINTS file.
Args:
filename (str): filename to read from.
Returns:
Kpoints object
"""
with zopen(filename, "rt") as f:
return Kpoints.from_string(f.read())
@staticmethod
def from_string(string):
"""
Reads a Kpoints object from a KPOINTS string.
Args:
string (str): KPOINTS string.
Returns:
Kpoints object
"""
lines = [line.strip() for line in string.splitlines()]
comment = lines[0]
num_kpts = int(lines[1].split()[0].strip())
style = lines[2].lower()[0]
# Fully automatic KPOINTS
if style == "a":
return Kpoints.automatic(int(lines[3]))
coord_pattern = re.compile(r'^\s*([\d+.\-Ee]+)\s+([\d+.\-Ee]+)\s+'
r'([\d+.\-Ee]+)')
# Automatic gamma and Monk KPOINTS, with optional shift
if style == "g" or style == "m":
kpts = [int(i) for i in lines[3].split()]
kpts_shift = (0, 0, 0)
if len(lines) > 4 and coord_pattern.match(lines[4]):
try:
kpts_shift = [int(i) for i in lines[4].split()]
except ValueError:
pass
return Kpoints.gamma_automatic(kpts, kpts_shift) if style == "g" \
else Kpoints.monkhorst_automatic(kpts, kpts_shift)
# Automatic kpoints with basis
if num_kpts <= 0:
style = Kpoints.supported_modes.Cartesian if style in "ck" \
else Kpoints.supported_modes.Reciprocal
kpts = [[float(j) for j in lines[i].split()] for i in range(3, 6)]
kpts_shift = [float(i) for i in lines[6].split()]
return Kpoints(comment=comment, num_kpts=num_kpts, style=style,
kpts=kpts, kpts_shift=kpts_shift)
# Line-mode KPOINTS, usually used with band structures
if style == "l":
coord_type = "Cartesian" if lines[3].lower()[0] in "ck" \
else "Reciprocal"
style = Kpoints.supported_modes.Line_mode
kpts = []
labels = []
patt = re.compile(r'([e0-9.\-]+)\s+([e0-9.\-]+)\s+([e0-9.\-]+)'
r'\s*!*\s*(.*)')
for i in range(4, len(lines)):
line = lines[i]
m = patt.match(line)
if m:
kpts.append([float(m.group(1)), float(m.group(2)),
float(m.group(3))])
labels.append(m.group(4).strip())
return Kpoints(comment=comment, num_kpts=num_kpts, style=style,
kpts=kpts, coord_type=coord_type, labels=labels)
# Assume explicit KPOINTS if all else fails.
style = Kpoints.supported_modes.Cartesian if style in "ck" \
else Kpoints.supported_modes.Reciprocal
kpts = []
kpts_weights = []
labels = []
tet_number = 0
tet_weight = 0
tet_connections = None
for i in range(3, 3 + num_kpts):
toks = lines[i].split()
kpts.append([float(j) for j in toks[0:3]])
kpts_weights.append(float(toks[3]))
if len(toks) > 4:
labels.append(toks[4])
else:
labels.append(None)
try:
# Deal with tetrahedron method
if lines[3 + num_kpts].strip().lower()[0] == "t":
toks = lines[4 + num_kpts].split()
tet_number = int(toks[0])
tet_weight = float(toks[1])
tet_connections = []
for i in range(5 + num_kpts, 5 + num_kpts + tet_number):
toks = lines[i].split()
tet_connections.append((int(toks[0]),
[int(toks[j])
for j in range(1, 5)]))
except IndexError:
pass
return Kpoints(comment=comment, num_kpts=num_kpts,
style=Kpoints.supported_modes[str(style)],
kpts=kpts, kpts_weights=kpts_weights,
tet_number=tet_number, tet_weight=tet_weight,
tet_connections=tet_connections, labels=labels)
def write_file(self, filename):
"""
Write Kpoints to a file.
Args:
filename (str): Filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def __repr__(self):
return self.__str__()
def __str__(self):
lines = [self.comment, str(self.num_kpts), self.style.name]
style = self.style.name.lower()[0]
if style == "l":
lines.append(self.coord_type)
for i in range(len(self.kpts)):
lines.append(" ".join([str(x) for x in self.kpts[i]]))
if style == "l":
lines[-1] += " ! " + self.labels[i]
if i % 2 == 1:
lines[-1] += "\n"
elif self.num_kpts > 0:
if self.labels is not None:
lines[-1] += " %i %s" % (self.kpts_weights[i],
self.labels[i])
else:
lines[-1] += " %i" % (self.kpts_weights[i])
# Print tetrahedron parameters if the number of tetrahedrons > 0
if style not in "lagm" and self.tet_number > 0:
lines.append("Tetrahedron")
lines.append("%d %f" % (self.tet_number, self.tet_weight))
for sym_weight, vertices in self.tet_connections:
lines.append("%d %d %d %d %d" % (sym_weight, vertices[0],
vertices[1], vertices[2],
vertices[3]))
# Print shifts for automatic kpoints types if not zero.
if self.num_kpts <= 0 and tuple(self.kpts_shift) != (0, 0, 0):
lines.append(" ".join([str(x) for x in self.kpts_shift]))
return "\n".join(lines) + "\n"
def as_dict(self):
"""json friendly dict representation of Kpoints"""
d = {"comment": self.comment, "nkpoints": self.num_kpts,
"generation_style": self.style.name, "kpoints": self.kpts,
"usershift": self.kpts_shift,
"kpts_weights": self.kpts_weights, "coord_type": self.coord_type,
"labels": self.labels, "tet_number": self.tet_number,
"tet_weight": self.tet_weight,
"tet_connections": self.tet_connections}
optional_paras = ["genvec1", "genvec2", "genvec3", "shift"]
for para in optional_paras:
if para in self.__dict__:
d[para] = self.__dict__[para]
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
comment = d.get("comment", "")
generation_style = d.get("generation_style")
kpts = d.get("kpoints", [[1, 1, 1]])
kpts_shift = d.get("usershift", [0, 0, 0])
num_kpts = d.get("nkpoints", 0)
return cls(comment=comment, kpts=kpts, style=generation_style,
kpts_shift=kpts_shift, num_kpts=num_kpts,
kpts_weights=d.get("kpts_weights"),
coord_type=d.get("coord_type"),
labels=d.get("labels"), tet_number=d.get("tet_number", 0),
tet_weight=d.get("tet_weight", 0),
tet_connections=d.get("tet_connections"))
def parse_string(s):
return "{}".format(s.strip())
def parse_bool(s):
m = re.match(r"^\.?([TFtf])[A-Za-z]*\.?", s)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(s + " should be a boolean type!")
def parse_float(s):
return float(re.search(r"^-?\d*\.?\d*[eE]?-?\d*", s).group(0))
def parse_int(s):
return int(re.match(r"^-?[0-9]+", s).group(0))
def parse_list(s):
return [float(y) for y in re.split(r"\s+", s.strip()) if not y.isalpha()]
Orbital = namedtuple('Orbital', ['n', 'l', 'j', 'E', 'occ'])
OrbitalDescription = namedtuple('OrbitalDescription',
['l', 'E', 'Type', "Rcut", "Type2", "Rcut2"])
class PotcarSingle(object):
"""
Object for a **single** POTCAR. The builder assumes the complete string is
the POTCAR contains the complete untouched data in "data" as a string and
a dict of keywords.
Args:
data:
Complete and single potcar file as a string.
.. attribute:: data
POTCAR data as a string.
.. attribute:: keywords
Keywords parsed from the POTCAR as a dict. All keywords are also
accessible as attributes in themselves. E.g., potcar.enmax,
potcar.encut, etc.
"""
functional_dir = {"PBE": "POT_GGA_PAW_PBE",
"PBE_52": "POT_GGA_PAW_PBE_52",
"PBE_54": "POT_GGA_PAW_PBE_54",
"LDA": "POT_LDA_PAW",
"LDA_52": "POT_LDA_PAW_52",
"LDA_54": "POT_LDA_PAW_54",
"PW91": "POT_GGA_PAW_PW91",
"LDA_US": "POT_LDA_US",
"PW91_US": "POT_GGA_US_PW91"}
functional_tags = {"pe": {"name": "PBE", "class": "GGA"},
"91": {"name": "PW91", "class": "GGA"},
"rp": {"name": "revPBE", "class": "GGA"},
"am": {"name": "AM05", "class": "GGA"},
"ps": {"name": "PBEsol", "class": "GGA"},
"pw": {"name": "PW86", "class": "GGA"},
"lm": {"name": "Langreth-Mehl-Hu", "class": "GGA"},
"pb": {"name": "Perdew-Becke", "class": "GGA"},
"ca": {"name": "Perdew-Zunger81", "class": "LDA"},
"hl": {"name": "Hedin-Lundquist", "class": "LDA"},
"wi": {"name": "Wigner Interpoloation", "class": "LDA"}}
parse_functions = {"LULTRA": parse_bool,
"LUNSCR": parse_bool,
"LCOR": parse_bool,
"LPAW": parse_bool,
"EATOM": parse_float,
"RPACOR": parse_float,
"POMASS": parse_float,
"ZVAL": parse_float,
"RCORE": parse_float,
"RWIGS": parse_float,
"ENMAX": parse_float,
"ENMIN": parse_float,
"EMMIN": parse_float,
"EAUG": parse_float,
"DEXC": parse_float,
"RMAX": parse_float,
"RAUG": parse_float,
"RDEP": parse_float,
"RDEPT": parse_float,
"QCUT": parse_float,
"QGAM": parse_float,
"RCLOC": parse_float,
"IUNSCR": parse_int,
"ICORE": parse_int,
"NDATA": parse_int,
"VRHFIN": parse_string,
"LEXCH": parse_string,
"TITEL": parse_string,
"STEP": parse_list,
"RRKJ": parse_list,
"GGA": parse_list}
def __init__(self, data):
self.data = data # raw POTCAR as a string
# Vasp parses header in vasprun.xml and this differs from the titel
self.header = data.split("\n")[0].strip()
search_lines = re.search(r"(?s)(parameters from PSCTR are:"
r".*?END of PSCTR-controll parameters)",
data).group(1)
self.keywords = {}
for key, val in re.findall(r"(\S+)\s*=\s*(.*?)(?=;|$)",
search_lines, flags=re.MULTILINE):
try:
self.keywords[key] = self.parse_functions[key](val)
except KeyError:
warnings.warn("Ignoring unknown variable type %s" % key)
PSCTR = OrderedDict()
array_search = re.compile(r"(-*[0-9.]+)")
orbitals = []
descriptions = []
atomic_configuration = re.search(r"Atomic configuration\s*\n?"
r"(.*?)Description", search_lines)
if atomic_configuration:
lines = atomic_configuration.group(1).splitlines()
num_entries = re.search(r"([0-9]+)", lines[0]).group(1)
num_entries = int(num_entries)
PSCTR['nentries'] = num_entries
for line in lines[1:]:
orbit = array_search.findall(line)
if orbit:
orbitals.append(self.Orbital(int(orbit[0]),
int(orbit[1]),
float(orbit[2]),
float(orbit[3]),
float(orbit[4])))
PSCTR['Orbitals'] = tuple(orbitals)
description_string = re.search(r"(?s)Description\s*\n"
r"(.*?)Error from kinetic"
r" energy argument \(eV\)",
search_lines)
if description_string:
for line in description_string.group(1).splitlines():
description = array_search.findall(line)
if description:
descriptions.append(
OrbitalDescription(
int(description[0]), float(description[1]),
int(description[2]), float(description[3]),
int(description[4]) if len(description) > 4 else None,
float(description[5]) if len(description) > 4 else None))
if descriptions:
PSCTR['OrbitalDescriptions'] = tuple(descriptions)
rrkj_kinetic_energy_string = re.search(
r"(?s)Error from kinetic energy argument \(eV\)\s*\n"
r"(.*?)END of PSCTR-controll parameters",
search_lines)
rrkj_array = []
if rrkj_kinetic_energy_string:
for line in rrkj_kinetic_energy_string.group(1).splitlines():
if "=" not in line:
rrkj_array += parse_list(line.strip('\n'))
if rrkj_array:
PSCTR['RRKJ'] = tuple(rrkj_array)
PSCTR.update(self.keywords)
self.PSCTR = OrderedDict(sorted(PSCTR.items(), key=lambda x: x[0]))
self.hash = self.get_potcar_hash()
def __str__(self):
return self.data + "\n"
@property
def electron_configuration(self):
el = Element.from_Z(self.atomic_no)
full_config = el.full_electronic_structure
nelect = self.nelectrons
config = []
while nelect > 0:
e = full_config.pop(-1)
config.append(e)
nelect -= e[-1]
return config
def write_file(self, filename):
with zopen(filename, "wt") as f:
f.write(self.__str__())
@staticmethod
def from_file(filename):
try:
with zopen(filename, "rt") as f:
return PotcarSingle(f.read())
except UnicodeDecodeError:
warnings.warn("POTCAR contains invalid unicode errors. "
"We will attempt to read it by ignoring errors.")
import codecs
with codecs.open(filename, "r", encoding="utf-8",
errors="ignore") as f:
return PotcarSingle(f.read())
@staticmethod
def from_symbol_and_functional(symbol, functional=None):
if functional is None:
functional = SETTINGS.get("PMG_DEFAULT_FUNCTIONAL", "PBE")
funcdir = PotcarSingle.functional_dir[functional]
d = SETTINGS.get("PMG_VASP_PSP_DIR")
if d is None:
raise ValueError(
"No POTCAR for %s with functional %s found. "
"Please set the PMG_VASP_PSP_DIR environment in "
".pmgrc.yaml, or you may need to set "
"PMG_DEFAULT_FUNCTIONAL to PBE_52 or PBE_54 if you "
"are using newer psps from VASP." % (symbol, functional))
paths_to_try = [os.path.join(d, funcdir, "POTCAR.{}".format(symbol)),
os.path.join(d, funcdir, symbol, "POTCAR")]
for p in paths_to_try:
p = os.path.expanduser(p)
p = zpath(p)
if os.path.exists(p):
return PotcarSingle.from_file(p)
raise IOError("You do not have the right POTCAR with functional " +
"{} and label {} in your VASP_PSP_DIR".format(functional,
symbol))
@property
def symbol(self):
"""
Symbol of POTCAR, e.g., Fe_pv
"""
return self.keywords["TITEL"].split(" ")[1].strip()
@property
def element(self):
"""
Attempt to return the atomic symbol based on the VRHFIN keyword.
"""
element = self.keywords["VRHFIN"].split(":")[0].strip()
#VASP incorrectly gives the element symbol for Xe as "X"
return "Xe" if element == "X" else element
@property
def atomic_no(self):
"""
Attempt to return the atomic number based on the VRHFIN keyword.
"""
return Element(self.element).Z
@property
def nelectrons(self):
return self.zval
@property
def potential_type(self):
if self.lultra:
return "US"
elif self.lpaw:
return "PAW"
else:
return "NC"
@property
def functional(self):
return self.functional_tags.get(self.LEXCH.lower(), {}).get('name')
@property
def functional_class(self):
return self.functional_tags.get(self.LEXCH.lower(), {}).get('class')
def get_potcar_hash(self):
hash_str = ""
for k, v in self.PSCTR.items():
hash_str += "{}".format(k)
if isinstance(v, int):
hash_str += "{}".format(v)
elif isinstance(v, float):
hash_str += "{:.3f}".format(v)
elif isinstance(v, bool):
hash_str += "{}".format(bool)
elif isinstance(v, (tuple, list)):
for item in v:
if isinstance(item, float):
hash_str += "{:.3f}".format(item)
elif isinstance(item, (Orbital, OrbitalDescription)):
for item_v in item:
if isinstance(item_v, (int, str)):
hash_str += "{}".format(item_v)
elif isinstance(item_v, float):
hash_str += "{:.3f}".format(item_v)
else:
hash_str += "{}".format(item_v) if item_v else ""
else:
hash_str += v.replace(" ", "")
self.hash_str = hash_str
return md5(hash_str.lower().encode('utf-8')).hexdigest()
def __getattr__(self, a):
"""
Delegates attributes to keywords. For example, you can use
potcarsingle.enmax to get the ENMAX of the POTCAR.
For float type properties, they are converted to the correct float. By
default, all energies in eV and all length scales are in Angstroms.
"""
try:
return self.keywords[a.upper()]
except:
raise AttributeError(a)
class Potcar(list, MSONable):
"""
Object for reading and writing POTCAR files for calculations. Consists of a
list of PotcarSingle.
Args:
symbols ([str]): Element symbols for POTCAR. This should correspond
to the symbols used by VASP. E.g., "Mg", "Fe_pv", etc.
functional (str): Functional used. To know what functional options
there are, use Potcar.FUNCTIONAL_CHOICES. Note that VASP has
different versions of the same functional. By default, the old
PBE functional is used. If you want the newer ones, use PBE_52 or
PBE_54. Note that if you intend to compare your results with the
Materials Project, you should use the default setting. You can also
override the default by setting PMG_DEFAULT_FUNCTIONAL in your
.pmgrc.yaml.
sym_potcar_map (dict): Allows a user to specify a specific element
symbol to raw POTCAR mapping.
"""
FUNCTIONAL_CHOICES = list(PotcarSingle.functional_dir.keys())
def __init__(self, symbols=None, functional=None, sym_potcar_map=None):
if functional is None:
functional = SETTINGS.get("PMG_DEFAULT_FUNCTIONAL", "PBE")
super(Potcar, self).__init__()
self.functional = functional
if symbols is not None:
self.set_symbols(symbols, functional, sym_potcar_map)
def as_dict(self):
return {"functional": self.functional, "symbols": self.symbols,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return Potcar(symbols=d["symbols"], functional=d["functional"])
@staticmethod
def from_file(filename):
try:
with zopen(filename, "rt") as f:
fdata = f.read()
except UnicodeDecodeError:
warnings.warn("POTCAR contains invalid unicode errors. "
"We will attempt to read it by ignoring errors.")
import codecs
with codecs.open(filename, "r", encoding="utf-8",
errors="ignore") as f:
fdata = f.read()
potcar = Potcar()
potcar_strings = re.compile(r"\n?(\s*.*?End of Dataset)",
re.S).findall(fdata)
functionals = []
for p in potcar_strings:
single = PotcarSingle(p)
potcar.append(single)
functionals.append(single.functional)
if len(set(functionals)) != 1:
raise ValueError("File contains incompatible functionals!")
else:
potcar.functional = functionals[0]
return potcar
def __str__(self):
return "\n".join([str(potcar).strip("\n") for potcar in self]) + "\n"
def write_file(self, filename):
"""
Write Potcar to a file.
Args:
filename (str): filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@property
def symbols(self):
"""
Get the atomic symbols of all the atoms in the POTCAR file.
"""
return [p.symbol for p in self]
@symbols.setter
def symbols(self, symbols):
self.set_symbols(symbols, functional=self.functional)
@property
def spec(self):
"""
Get the atomic symbols and hash of all the atoms in the POTCAR file.
"""
return [{"symbol": p.symbol, "hash": p.get_potcar_hash()} for p in self]
def set_symbols(self, symbols, functional=None,
sym_potcar_map=None):
"""
Initialize the POTCAR from a set of symbols. Currently, the POTCARs can
be fetched from a location specified in .pmgrc.yaml. Use pmg config
to add this setting.
Args:
symbols ([str]): A list of element symbols
functional (str): The functional to use. If None, the setting
PMG_DEFAULT_FUNCTIONAL in .pmgrc.yaml is used, or if this is
not set, it will default to PBE.
sym_potcar_map (dict): A map of symbol:raw POTCAR string. If
sym_potcar_map is specified, POTCARs will be generated from
the given map data rather than the config file location.
"""
del self[:]
if sym_potcar_map:
for el in symbols:
self.append(PotcarSingle(sym_potcar_map[el]))
else:
for el in symbols:
p = PotcarSingle.from_symbol_and_functional(el, functional)
self.append(p)
class VaspInput(dict, MSONable):
"""
Class to contain a set of vasp input objects corresponding to a run.
Args:
incar: Incar object.
kpoints: Kpoints object.
poscar: Poscar object.
potcar: Potcar object.
optional_files: Other input files supplied as a dict of {
filename: object}. The object should follow standard pymatgen
conventions in implementing a as_dict() and from_dict method.
"""
def __init__(self, incar, kpoints, poscar, potcar, optional_files=None,
**kwargs):
super(VaspInput, self).__init__(**kwargs)
self.update({'INCAR': incar,
'KPOINTS': kpoints,
'POSCAR': poscar,
'POTCAR': potcar})
if optional_files is not None:
self.update(optional_files)
def __str__(self):
output = []
for k, v in self.items():
output.append(k)
output.append(str(v))
output.append("")
return "\n".join(output)
def as_dict(self):
d = {k: v.as_dict() for k, v in self.items()}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
sub_d = {"optional_files": {}}
for k, v in d.items():
if k in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]:
sub_d[k.lower()] = dec.process_decoded(v)
elif k not in ["@module", "@class"]:
sub_d["optional_files"][k] = dec.process_decoded(v)
return cls(**sub_d)
def write_input(self, output_dir=".", make_dir_if_not_present=True):
"""
Write VASP input to a directory.
Args:
output_dir (str): Directory to write to. Defaults to current
directory (".").
make_dir_if_not_present (bool): Create the directory if not
present. Defaults to True.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
for k, v in self.items():
with zopen(os.path.join(output_dir, k), "wt") as f:
f.write(v.__str__())
@staticmethod
def from_directory(input_dir, optional_files=None):
"""
Read in a set of VASP input from a directory. Note that only the
standard INCAR, POSCAR, POTCAR and KPOINTS files are read unless
optional_filenames is specified.
Args:
input_dir (str): Directory to read VASP input from.
optional_files (dict): Optional files to read in as well as a
dict of {filename: Object type}. Object type must have a
static method from_file.
"""
sub_d = {}
for fname, ftype in [("INCAR", Incar), ("KPOINTS", Kpoints),
("POSCAR", Poscar), ("POTCAR", Potcar)]:
fullzpath = zpath(os.path.join(input_dir, fname))
sub_d[fname.lower()] = ftype.from_file(fullzpath)
sub_d["optional_files"] = {}
if optional_files is not None:
for fname, ftype in optional_files.items():
sub_d["optional_files"][fname] = \
ftype.from_file(os.path.join(input_dir, fname))
return VaspInput(**sub_d)
| matk86/pymatgen | pymatgen/io/vasp/inputs.py | Python | mit | 73,040 | [
"VASP",
"pymatgen"
] | 6f1f0017d13e00f4b39d458098ee6c514e83eb1e1894f9e95b032d8d0c90bd25 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various learning rate decay functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.exponential_decay", v1=[])
def exponential_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns a no-arg function that produces the decayed learning
rate. This can be useful for changing the learning rate value across
different invocations of optimizer functions.
It is computed as:
```python
decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
```
If the argument `staircase` is `True`, then `global_step / decay_steps` is an
integer division and the decayed learning rate follows a staircase function.
Example: decay every 100000 steps with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate_fn = tf.train.exponential_decay(starter_learning_rate,
global_step, 100000, 0.96,
staircase=True)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate_fn)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
Returns:
A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
of the same type as `learning_rate`.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("global_step is required for exponential_decay.")
def decayed_lr(learning_rate, global_step, decay_steps, decay_rate,
staircase, name):
"""Helper to recompute learning rate; most helpful in eager-mode."""
with ops.name_scope(
name, "ExponentialDecay",
[learning_rate, global_step, decay_steps, decay_rate]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
global_step_recomp = math_ops.cast(global_step, dtype)
p = global_step_recomp / decay_steps
if staircase:
p = math_ops.floor(p)
return math_ops.multiply(
learning_rate, math_ops.pow(decay_rate, p), name=name)
return functools.partial(decayed_lr, learning_rate, global_step, decay_steps,
decay_rate, staircase, name)
@tf_export("train.piecewise_constant_decay", v1=[])
def piecewise_constant(x, boundaries, values, name=None):
"""Piecewise constant from boundaries and interval values.
This function returns a no-arg callable to compute the piecewise constant.
This can be useful for changing the learning rate value across
different invocations of optimizer functions.
Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5
for the next 10000 steps, and 0.1 for any additional steps.
```python
global_step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate_fn = tf.train.piecewise_constant(global_step, boundaries,
values)
learning_rate = learning_rate_fn()
# Later, whenever we perform an optimization step, we increment global_step.
```
Args:
x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
`float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as `x`.
values: A list of `Tensor`s or `float`s or `int`s that specifies the values
for the intervals defined by `boundaries`. It should have one more element
than `boundaries`, and all elements should have the same type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.
Returns:
A no-arg function that outputs a 0-D Tensor. The output of the no-arg
function is `values[0]` when `x <= boundaries[0]`,
`values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ...,
and values[-1] when `x > boundaries[-1]`.
Raises:
ValueError: if types of `x` and `boundaries` do not match, or types of all
`values` do not match or
the number of elements in the lists does not match.
"""
if len(boundaries) != len(values) - 1:
raise ValueError(
"The length of boundaries should be 1 less than the length of values")
def decayed_lr(x, boundaries, values, name):
"""Helper to recompute learning rate; most helpful in eager-mode."""
with ops.name_scope(name, "PiecewiseConstant",
[x, boundaries, values, name]) as name:
boundaries = ops.convert_n_to_tensor(boundaries)
values = ops.convert_n_to_tensor(values)
x_recomp = ops.convert_to_tensor(x)
# Avoid explicit conversion to x's dtype. This could result in faulty
# comparisons, for example if floats are converted to integers.
for i, b in enumerate(boundaries):
if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
# We can promote int32 boundaries to int64 without loss of precision.
# This covers the most common case where the user passes in boundaries
# as an array of Python integers.
if (b.dtype.base_dtype == dtypes.int32 and
x_recomp.dtype.base_dtype == dtypes.int64):
b = math_ops.cast(b, x_recomp.dtype.base_dtype)
boundaries[i] = b
else:
raise ValueError(
"Boundaries (%s) must have the same dtype as x (%s)." %
(b.dtype.base_dtype, x_recomp.dtype.base_dtype))
# TODO(rdipietro): Ensure that boundaries' elements strictly increases.
for v in values[1:]:
if v.dtype.base_dtype != values[0].dtype.base_dtype:
raise ValueError(
"Values must have elements all with the same dtype (%s vs %s)." %
(values[0].dtype.base_dtype, v.dtype.base_dtype))
pred_fn_pairs = []
pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0]))
pred_fn_pairs.append((x_recomp > boundaries[-1], lambda: values[-1]))
for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
# Need to bind v here; can do this with lambda v=v: ...
pred = (x_recomp > low) & (x_recomp <= high)
pred_fn_pairs.append((pred, lambda v=v: v))
# The default isn't needed here because our conditions are mutually
# exclusive and exhaustive, but tf.case requires it.
default = lambda: values[0]
return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
return functools.partial(decayed_lr, x, boundaries, values, name)
@tf_export("train.polynomial_decay", v1=[])
def polynomial_decay(learning_rate,
global_step,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False,
name=None):
"""Applies a polynomial decay to the learning rate.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This function applies a polynomial decay function to a provided initial
`learning_rate` to reach an `end_learning_rate` in the given `decay_steps`.
It requires a `global_step` value to compute the decayed learning rate. You
can just pass a TensorFlow variable that you increment at each training step.
The function returns a no-arg callable that outputs the decayed learning
rate. This can be useful for changing the learning rate value across
different invocations of optimizer functions. It is computed as:
```python
global_step = min(global_step, decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `global_steps`.
```python
decay_steps = decay_steps * ceil(global_step / decay_steps)
decayed_learning_rate_fn = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
decayed_learning_rate = decayed_learning_rate_fn()
```
Example: decay from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5):
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate_fn = tf.train.polynomial_decay(starter_learning_rate,
global_step, decay_steps,
end_learning_rate,
power=0.5)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate_fn)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
end_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The minimal end learning rate.
power: A scalar `float32` or `float64` `Tensor` or a
Python number. The power of the polynomial. Defaults to linear, 1.0.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
Returns:
A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
of the same type as `learning_rate`.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("global_step is required for polynomial_decay.")
def decayed_lr(learning_rate, global_step, decay_steps, end_learning_rate,
power, cycle, name):
"""Helper to recompute learning rate; most helpful in eager-mode."""
with ops.name_scope(
name, "PolynomialDecay",
[learning_rate, global_step, decay_steps, end_learning_rate, power]
) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
end_learning_rate = math_ops.cast(end_learning_rate, dtype)
power = math_ops.cast(power, dtype)
global_step_recomp = math_ops.cast(global_step, dtype)
decay_steps_recomp = math_ops.cast(decay_steps, dtype)
if cycle:
# Find the first multiple of decay_steps that is bigger than
# global_step. If global_step is zero set the multiplier to 1
multiplier = control_flow_ops.cond(
math_ops.equal(global_step_recomp, 0), lambda: 1.0,
lambda: math_ops.ceil(global_step_recomp / decay_steps))
decay_steps_recomp = math_ops.multiply(decay_steps_recomp, multiplier)
else:
# Make sure that the global_step used is not bigger than decay_steps.
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
p = math_ops.div(global_step_recomp, decay_steps_recomp)
return math_ops.add(
math_ops.multiply(learning_rate - end_learning_rate,
math_ops.pow(1 - p, power)),
end_learning_rate,
name=name)
return functools.partial(
decayed_lr, learning_rate, global_step, decay_steps, end_learning_rate,
power, cycle, name)
@tf_export("train.natural_exp_decay", v1=[])
def natural_exp_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies natural exponential decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns a no-arg callable that produces the decayed learning
rate. This can be useful for changing the learning rate value across
different invocations of optimizer functions. It is computed as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * global_step /
decay_step)
```
or, if `staircase` is `True`, as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * floor(global_step /
decay_step))
```
Example: decay exponentially with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
decay_steps = 5
k = 0.5
learning_rate_fn = tf.train.natural_exp_decay(learning_rate, global_step,
decay_steps, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate_fn)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'ExponentialTimeDecay'.
Returns:
A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
of the same type as `learning_rate`.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("global_step is required for natural_exp_decay.")
def decayed_lr(learning_rate, global_step, decay_steps, decay_rate, staircase,
name):
"""Helper to recompute learning rate; most helpful in eager-mode."""
with ops.name_scope(name, "NaturalExpDecay",
[learning_rate, global_step, decay_rate]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
global_step_recomp = math_ops.cast(global_step, dtype)
p = global_step_recomp / decay_steps
if staircase:
p = math_ops.floor(p)
exponent = math_ops.exp(
math_ops.multiply(math_ops.negative(decay_rate), p))
return math_ops.multiply(learning_rate, exponent, name=name)
return functools.partial(decayed_lr, learning_rate, global_step, decay_steps,
decay_rate, staircase, name)
@tf_export("train.inverse_time_decay", v1=[])
def inverse_time_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies inverse time decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an inverse decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns a no-arg callable that produces the decayed learning
rate. This can be useful for changing the learning rate value across
different invocations of optimizer functions. It is computed as:
```python
decayed_learning_rate = learning_rate / (1 + decay_rate * global_step /
decay_step)
```
or, if `staircase` is `True`, as:
```python
decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step /
decay_step))
```
Example: decay 1/t with a rate of 0.5:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
decay_steps = 1.0
decay_rate = 0.5
learning_rate_fn = tf.train.inverse_time_decay(learning_rate, global_step,
decay_steps, decay_rate)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate_fn)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'.
Returns:
A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
of the same type as `learning_rate`.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("global_step is required for inverse_time_decay.")
def decayed_lr(learning_rate, global_step, decay_steps, decay_rate, staircase,
name):
"""Helper to recompute learning rate; most helpful in eager-mode."""
with ops.name_scope(name, "InverseTimeDecay",
[learning_rate, global_step, decay_rate]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
global_step_recomp = math_ops.cast(global_step, dtype)
p = global_step_recomp / decay_steps
if staircase:
p = math_ops.floor(p)
const = math_ops.cast(constant_op.constant(1), dtype)
denom = math_ops.add(const, math_ops.multiply(decay_rate, p))
return math_ops.div(learning_rate, denom, name=name)
return functools.partial(decayed_lr, learning_rate, global_step, decay_steps,
decay_rate, staircase, name)
@tf_export("train.cosine_decay", v1=[])
def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0,
name=None):
"""Applies cosine decay to the learning rate.
See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a cosine decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns a no-arg callable that produces the decayed learning
rate. This can be useful for changing the learning rate value across
different invocations of optimizer functions. It is computed as:
```python
global_step = min(global_step, decay_steps)
cosine_decay = 0.5 * (1 + cos(pi * global_step / decay_steps))
decayed = (1 - alpha) * cosine_decay + alpha
decayed_learning_rate = learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = tf.train.cosine_decay(learning_rate, global_step, decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of learning_rate.
name: String. Optional name of the operation. Defaults to 'CosineDecay'.
Returns:
A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
of the same type as `learning_rate`.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("cosine decay requires global_step")
def decayed_lr(learning_rate, global_step, decay_steps, alpha, name):
"""Helper to recompute learning rate; most helpful in eager-mode."""
with ops.name_scope(name, "CosineDecay",
[learning_rate, global_step]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
decay_steps = math_ops.cast(decay_steps, dtype)
global_step_recomp = math_ops.cast(global_step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
completed_fraction = global_step_recomp / decay_steps
cosine_decayed = 0.5 * (1.0 + math_ops.cos(
constant_op.constant(math.pi) * completed_fraction))
decayed = (1 - alpha) * cosine_decayed + alpha
return math_ops.multiply(learning_rate, decayed)
return functools.partial(decayed_lr, learning_rate, global_step, decay_steps,
alpha, name)
@tf_export("train.cosine_decay_restarts", v1=[])
def cosine_decay_restarts(learning_rate,
global_step,
first_decay_steps,
t_mul=2.0,
m_mul=1.0,
alpha=0.0,
name=None):
"""Applies cosine decay with restarts to the learning rate.
See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a cosine decay function with
restarts to a provided initial learning rate. It requires a `global_step`
value to compute the decayed learning rate. You can just pass a TensorFlow
variable that you increment at each training step.
The function returns a no-arg callable that produces the decayed learning
rate while taking into account possible warm restarts. This can be useful for
changing the learning rate value across different invocations of optimizer
functions.
The learning rate multiplier first decays
from 1 to `alpha` for `first_decay_steps` steps. Then, a warm
restart is performed. Each new warm restart runs for `t_mul` times more steps
and with `m_mul` times smaller initial learning rate.
Example usage:
```python
first_decay_steps = 1000
lr_decayed_fn = tf.train.cosine_decay_restarts(learning_rate, global_step,
first_decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation.
first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the number of iterations in the i-th period
m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the initial learning rate of the i-th period:
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of the learning_rate.
name: String. Optional name of the operation. Defaults to 'SGDRDecay'.
Returns:
A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
of the same type as `learning_rate`.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("cosine decay restarts requires global_step")
def decayed_lr(learning_rate, global_step, first_decay_steps, t_mul, m_mul,
alpha, name):
"""Helper to recompute learning rate; most helpful in eager-mode."""
with ops.name_scope(name, "SGDRDecay", [learning_rate, global_step]
) as name:
learning_rate = ops.convert_to_tensor(
learning_rate, name="initial_learning_rate")
dtype = learning_rate.dtype
first_decay_steps = math_ops.cast(first_decay_steps, dtype)
alpha = math_ops.cast(alpha, dtype)
t_mul = math_ops.cast(t_mul, dtype)
m_mul = math_ops.cast(m_mul, dtype)
global_step_recomp = math_ops.cast(global_step, dtype)
completed_fraction = global_step_recomp / first_decay_steps
def compute_step(completed_fraction, geometric=False):
"""Helper for `cond` operation."""
if geometric:
i_restart = math_ops.floor(
math_ops.log(1.0 - completed_fraction * (1.0 - t_mul)) /
math_ops.log(t_mul))
sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul)
completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart
else:
i_restart = math_ops.floor(completed_fraction)
completed_fraction -= i_restart
return i_restart, completed_fraction
i_restart, completed_fraction = control_flow_ops.cond(
math_ops.equal(t_mul, 1.0),
lambda: compute_step(completed_fraction, geometric=False),
lambda: compute_step(completed_fraction, geometric=True))
m_fac = m_mul**i_restart
cosine_decayed = 0.5 * m_fac * (1.0 + math_ops.cos(
constant_op.constant(math.pi) * completed_fraction))
decayed = (1 - alpha) * cosine_decayed + alpha
return math_ops.multiply(learning_rate, decayed, name=name)
return functools.partial(decayed_lr, learning_rate, global_step,
first_decay_steps, t_mul, m_mul, alpha, name)
@tf_export("train.linear_cosine_decay", v1=[])
def linear_cosine_decay(learning_rate,
global_step,
decay_steps,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies linear cosine decay to the learning rate.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a linear cosine decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns a no-arg callable that produces the decayed learning
rate. This can be useful for changing the learning rate value across
different invocations of optimizer functions. It is computed as:
```python
global_step = min(global_step, decay_steps)
linear_decay = (decay_steps - global_step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * global_step / decay_steps))
decayed = (alpha + linear_decay) * cosine_decay + beta
decayed_learning_rate = learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = tf.train.linear_cosine_decay(learning_rate, global_step,
decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'LinearCosineDecay'.
Returns:
A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
of the same type as `learning_rate`.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("linear cosine decay requires global_step")
def decayed_lr(learning_rate, global_step, decay_steps, num_periods, alpha,
beta, name):
"""Helper to recompute learning rate; most helpful in eager-mode."""
with ops.name_scope(name, "LinearCosineDecay",
[learning_rate, global_step]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
decay_steps = math_ops.cast(decay_steps, dtype)
num_periods = math_ops.cast(num_periods, dtype)
alpha = math_ops.cast(alpha, dtype)
beta = math_ops.cast(beta, dtype)
global_step_recomp = math_ops.cast(global_step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
linear_cosine_decayed = (alpha + linear_decayed) * cosine_decayed + beta
return math_ops.multiply(learning_rate, linear_cosine_decayed, name=name)
return functools.partial(decayed_lr, learning_rate, global_step, decay_steps,
num_periods, alpha, beta, name)
@tf_export("train.noisy_linear_cosine_decay", v1=[])
def noisy_linear_cosine_decay(learning_rate,
global_step,
decay_steps,
initial_variance=1.0,
variance_decay=0.55,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies noisy linear cosine decay to the learning rate.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a noisy linear
cosine decay function to a provided initial learning rate.
It requires a `global_step` value to compute the decayed learning rate.
You can just pass a TensorFlow variable that you increment at each
training step.
The function returns a no-arg callable that produces the decayed learning
rate. This can be useful for changing the learning rate value across
different invocations of optimizer functions. It is computed as:
```python
global_step = min(global_step, decay_steps)
linear_decay = (decay_steps - global_step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * global_step / decay_steps))
decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta
decayed_learning_rate = learning_rate * decayed
```
where eps_t is 0-centered gaussian noise with variance
initial_variance / (1 + global_step) ** variance_decay
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = tf.train.noisy_linear_cosine_decay(learning_rate, global_step,
decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
initial_variance: initial variance for the noise. See computation above.
variance_decay: decay for the noise's variance. See computation above.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'NoisyLinearCosineDecay'.
Returns:
A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
of the same type as `learning_rate`.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("noisy linear cosine decay requires global_step")
def decayed_lr(learning_rate, global_step, decay_steps, initial_variance,
variance_decay, num_periods, alpha, beta, name):
"""Helper to recompute learning rate; most helpful in eager-mode."""
with ops.name_scope(name, "NoisyLinearCosineDecay",
[learning_rate, global_step]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
decay_steps = math_ops.cast(decay_steps, dtype)
initial_variance = math_ops.cast(initial_variance, dtype)
variance_decay = math_ops.cast(variance_decay, dtype)
num_periods = math_ops.cast(num_periods, dtype)
alpha = math_ops.cast(alpha, dtype)
beta = math_ops.cast(beta, dtype)
global_step_recomp = math_ops.cast(global_step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
variance = initial_variance / (
math_ops.pow(1.0 + global_step_recomp, variance_decay))
std = math_ops.sqrt(variance)
noisy_linear_decayed = (
linear_decayed + random_ops.random_normal(
linear_decayed.shape, stddev=std))
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
noisy_linear_cosine_decayed = (
(alpha + noisy_linear_decayed) * cosine_decayed + beta)
return math_ops.multiply(
learning_rate, noisy_linear_cosine_decayed, name=name)
return functools.partial(decayed_lr, learning_rate, global_step, decay_steps,
initial_variance, variance_decay, num_periods, alpha,
beta, name)
| hfp/tensorflow-xsmm | tensorflow/python/training/learning_rate_decay_v2.py | Python | apache-2.0 | 38,287 | [
"Gaussian"
] | 7c7975337abedbc10e7c981e1f52595b8fb3f98f23478902439fce05f7bc72dd |
#!/usr/bin/env python3
#
#
# Mike Dvorak
# Sail Tactics
# mike@sailtactics.com
#
# Created: 2014-11-05
# Modified: 2016-06-13
#
#
# Description: Inserts a arbitrary WRF netCDF file into a WindDB at the specified height.
#
# Returns -1 if there is an IntegrityError, which is triggered by a duplicate key error if the data already exists.
#
import configparser
import fnmatch
import os
import os.path
import sys
import re
dir = os.path.dirname(__file__)
sys.path.append(os.path.join(dir, '../'))
import argparse
from windb2.model.wrf import heightinterpfile
from windb2.model.wrf.copyvar import copyvar
from windb2.model.wrf import config
import logging
# Get the command line opts
parser = argparse.ArgumentParser()
parser.add_argument("ncfile", type=str, help="WRF netCDF filename to interpolate")
parser.add_argument("-c", "--copy", help="Copy WRF variables to the interp file",
action="store_true")
parser.add_argument("-o", "--overwrite", help="Overwrite an existing interp file",
action="store_true")
args = parser.parse_args()
windb2_config = config.Windb2WrfConfigParser('windb2-wrf.json').config
# Set up logging
logger = logging.getLogger('windb2')
try:
logger.setLevel(windb2_config['loglevel'])
except KeyError:
logger.setLevel(logging.INFO)
logging.basicConfig()
# Extension
interp_extension = '-height-interp.nc'
# Get rid of escaped colon characters that are often added in Unix shells
ncfile_cleansed = re.sub(r'[\\]', '', args.ncfile)
# Check to see if the file already exists and abort if 'overwrite' is not enabled
if not args.overwrite and os.path.exists(ncfile_cleansed + interp_extension):
logger.error('Interp file already exists and the overwrite option is not enabled: {}'.format(ncfile_cleansed + interp_extension))
sys.exit(-3)
elif args.overwrite and os.path.exists(ncfile_cleansed + interp_extension):
logger.info('Overwriting existing interp file: {}'.format(ncfile_cleansed + interp_extension))
# Interpolate this file and leave the file open if we're copying WRF vars
close_file = True if args.copy else False
heightinterpfile.HeightInterpFile(windb2_config).interp_file(ncfile_cleansed, close_file=close_file)
# Copy of WRF vars
if args.copy:
# Get the WRF vars to copy
wrf_vars = []
for key, val in windb2_config['vars'].items():
try:
val['copy'] # Generates an exception if not there
wrf_vars.append(key)
except KeyError as e:
True
# Exit if nothing to do
if len(wrf_vars) == 0:
exit(0)
# Copy the vars
copyvar(wrf_vars, ncfile_cleansed, ncfile_cleansed + interp_extension)
| sailorsenergy/windb2 | bin/interpolate-wrf-file.py | Python | gpl-3.0 | 2,667 | [
"NetCDF"
] | f062f546ecb1a6532a91df8c6629f76a6dc857fdb1b12e563b36a23249f0df48 |
#!/usr/bin/env python
"""
Show storage quotas for specified users or for all registered users if nobody is specified
Example:
$ dirac-admin-user-quota
------------------------------
Username | Quota (GB)
------------------------------
atsareg | None
msapunov | None
vhamar | None
------------------------------
"""
import DIRAC
from DIRAC.Core.Base.Script import Script
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(["User: list of SEs or comma-separated SEs"], mandatory=False)
_, users = Script.parseCommandLine()
from DIRAC import gLogger, gConfig
if not users:
res = gConfig.getSections("/Registry/Users")
if not res["OK"]:
gLogger.error("Failed to retrieve user list from CS", res["Message"])
DIRAC.exit(2)
users = res["Value"]
gLogger.notice("-" * 30)
gLogger.notice("%s|%s" % ("Username".ljust(15), "Quota (GB)".rjust(15)))
gLogger.notice("-" * 30)
for user in sorted(users):
quota = gConfig.getValue("/Registry/Users/%s/Quota" % user, 0)
if not quota:
quota = gConfig.getValue("/Registry/DefaultStorageQuota")
gLogger.notice("%s|%s" % (user.ljust(15), str(quota).rjust(15)))
gLogger.notice("-" * 30)
DIRAC.exit(0)
if __name__ == "__main__":
main()
| DIRACGrid/DIRAC | src/DIRAC/DataManagementSystem/scripts/dirac_admin_user_quota.py | Python | gpl-3.0 | 1,456 | [
"DIRAC"
] | 4cff3fca4e58bd9f250af65987d564c30ede2f0f9eb3a8d9e09b486ad4ad9127 |
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS Instructor Dashboard.
"""
from ..helpers import UniqueCourseTest, get_modal_alert
from ...pages.common.logout import LogoutPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.instructor_dashboard import InstructorDashboardPage
from ...fixtures.course import CourseFixture
class AutoEnrollmentWithCSVTest(UniqueCourseTest):
"""
End-to-end tests for Auto-Registration and enrollment functionality via CSV file.
"""
def setUp(self):
super(AutoEnrollmentWithCSVTest, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
# login as an instructor
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# go to the membership page on the instructor dashboard
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
self.auto_enroll_section = instructor_dashboard_page.select_membership().select_auto_enroll_section()
def test_browse_and_upload_buttons_are_visible(self):
"""
Scenario: On the Membership tab of the Instructor Dashboard, Auto-Enroll Browse and Upload buttons are visible.
Given that I am on the Membership tab on the Instructor Dashboard
Then I see the 'REGISTER/ENROLL STUDENTS' section on the page with the 'Browse' and 'Upload' buttons
"""
self.assertTrue(self.auto_enroll_section.is_file_attachment_browse_button_visible())
self.assertTrue(self.auto_enroll_section.is_upload_button_visible())
def test_clicking_file_upload_button_without_file_shows_error(self):
"""
Scenario: Clicking on the upload button without specifying a CSV file results in error.
Given that I am on the Membership tab on the Instructor Dashboard
When I click the Upload Button without specifying a CSV file
Then I should be shown an Error Notification
And The Notification message should read 'File is not attached.'
"""
self.auto_enroll_section.click_upload_file_button()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "File is not attached.")
def test_uploading_correct_csv_file_results_in_success(self):
"""
Scenario: Uploading a CSV with correct data results in Success.
Given that I am on the Membership tab on the Instructor Dashboard
When I select a csv file with correct data and click the Upload Button
Then I should be shown a Success Notification.
"""
self.auto_enroll_section.upload_correct_csv_file()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_SUCCESS))
def test_uploading_csv_file_with_bad_data_results_in_errors_and_warnings(self):
"""
Scenario: Uploading a CSV with incorrect data results in error and warnings.
Given that I am on the Membership tab on the Instructor Dashboard
When I select a csv file with incorrect data and click the Upload Button
Then I should be shown an Error Notification
And a corresponding Error Message.
And I should be shown a Warning Notification
And a corresponding Warning Message.
"""
self.auto_enroll_section.upload_csv_file_with_errors_warnings()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "Data in row #2 must have exactly four columns: email, username, full name, and country")
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_WARNING))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_WARNING), "ename (d@a.com): (An account with email d@a.com exists but the provided username ename is different. Enrolling anyway with d@a.com.)")
def test_uploading_non_csv_file_results_in_error(self):
"""
Scenario: Uploading an image file for auto-enrollment results in error.
Given that I am on the Membership tab on the Instructor Dashboard
When I select an image file (a non-csv file) and click the Upload Button
Then I should be shown an Error Notification
And The Notification message should read 'Make sure that the file you upload is in CSV..'
"""
self.auto_enroll_section.upload_non_csv_file()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "Make sure that the file you upload is in CSV format with no extraneous characters or rows.")
class EntranceExamGradeTest(UniqueCourseTest):
"""
Tests for Entrance exam specific student grading tasks.
"""
def setUp(self):
super(EntranceExamGradeTest, self).setUp()
self.course_info.update({"settings": {"entrance_exam_enabled": "true"}})
CourseFixture(**self.course_info).install()
self.student_identifier = "johndoe_saee@example.com"
# Create the user (automatically logs us in)
AutoAuthPage(
self.browser,
username="johndoe_saee",
email=self.student_identifier,
course_id=self.course_id,
staff=False
).visit()
LogoutPage(self.browser).visit()
# login as an instructor
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# go to the student admin page on the instructor dashboard
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
self.student_admin_section = instructor_dashboard_page.select_student_admin()
def test_input_text_and_buttons_are_visible(self):
"""
Scenario: On the Student admin tab of the Instructor Dashboard, Student Email input box,
Reset Student Attempt, Rescore Student Submission, Delete Student State for entrance exam
and Show Background Task History for Student buttons are visible
Given that I am on the Student Admin tab on the Instructor Dashboard
Then I see Student Email input box, Reset Student Attempt, Rescore Student Submission,
Delete Student State for entrance exam and Show Background Task History for Student buttons
"""
self.assertTrue(self.student_admin_section.is_student_email_input_visible())
self.assertTrue(self.student_admin_section.is_reset_attempts_button_visible())
self.assertTrue(self.student_admin_section.is_rescore_submission_button_visible())
self.assertTrue(self.student_admin_section.is_delete_student_state_button_visible())
self.assertTrue(self.student_admin_section.is_background_task_history_button_visible())
def test_clicking_reset_student_attempts_button_without_email_shows_error(self):
"""
Scenario: Clicking on the Reset Student Attempts button without entering student email
address or username results in error.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Reset Student Attempts Button under Entrance Exam Grade
Adjustment without enter an email address
Then I should be shown an Error Notification
And The Notification message should read 'Please enter a student email address or username.'
"""
self.student_admin_section.click_reset_attempts_button()
self.assertEqual(
'Please enter a student email address or username.',
self.student_admin_section.top_notification.text[0]
)
def test_clicking_reset_student_attempts_button_with_success(self):
"""
Scenario: Clicking on the Reset Student Attempts button with valid student email
address or username should result in success prompt.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Reset Student Attempts Button under Entrance Exam Grade
Adjustment after entering a valid student
email address or username
Then I should be shown an alert with success message
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_reset_attempts_button()
alert = get_modal_alert(self.student_admin_section.browser)
alert.dismiss()
def test_clicking_reset_student_attempts_button_with_error(self):
"""
Scenario: Clicking on the Reset Student Attempts button with email address or username
of a non existing student should result in error message.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Reset Student Attempts Button under Entrance Exam Grade
Adjustment after non existing student email address or username
Then I should be shown an error message
"""
self.student_admin_section.set_student_email('non_existing@example.com')
self.student_admin_section.click_reset_attempts_button()
self.student_admin_section.wait_for_ajax()
self.assertGreater(len(self.student_admin_section.top_notification.text[0]), 0)
def test_clicking_rescore_submission_button_with_success(self):
"""
Scenario: Clicking on the Rescore Student Submission button with valid student email
address or username should result in success prompt.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Rescore Student Submission Button under Entrance Exam Grade
Adjustment after entering a valid student email address or username
Then I should be shown an alert with success message
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_rescore_submissions_button()
alert = get_modal_alert(self.student_admin_section.browser)
alert.dismiss()
def test_clicking_rescore_submission_button_with_error(self):
"""
Scenario: Clicking on the Rescore Student Submission button with email address or username
of a non existing student should result in error message.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Rescore Student Submission Button under Entrance Exam Grade
Adjustment after non existing student email address or username
Then I should be shown an error message
"""
self.student_admin_section.set_student_email('non_existing@example.com')
self.student_admin_section.click_rescore_submissions_button()
self.student_admin_section.wait_for_ajax()
self.assertGreater(len(self.student_admin_section.top_notification.text[0]), 0)
def test_clicking_skip_entrance_exam_button_with_success(self):
"""
Scenario: Clicking on the Let Student Skip Entrance Exam button with
valid student email address or username should result in success prompt.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Let Student Skip Entrance Exam Button under
Entrance Exam Grade Adjustment after entering a valid student
email address or username
Then I should be shown an alert with success message
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_skip_entrance_exam_button()
#first we have window.confirm
alert = get_modal_alert(self.student_admin_section.browser)
alert.accept()
# then we have alert confirming action
alert = get_modal_alert(self.student_admin_section.browser)
alert.dismiss()
def test_clicking_skip_entrance_exam_button_with_error(self):
"""
Scenario: Clicking on the Let Student Skip Entrance Exam button with
email address or username of a non existing student should result in error message.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Let Student Skip Entrance Exam Button under
Entrance Exam Grade Adjustment after entering non existing
student email address or username
Then I should be shown an error message
"""
self.student_admin_section.set_student_email('non_existing@example.com')
self.student_admin_section.click_skip_entrance_exam_button()
#first we have window.confirm
alert = get_modal_alert(self.student_admin_section.browser)
alert.accept()
self.student_admin_section.wait_for_ajax()
self.assertGreater(len(self.student_admin_section.top_notification.text[0]), 0)
def test_clicking_delete_student_attempts_button_with_success(self):
"""
Scenario: Clicking on the Delete Student State for entrance exam button
with valid student email address or username should result in success prompt.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Delete Student State for entrance exam Button
under Entrance Exam Grade Adjustment after entering a valid student
email address or username
Then I should be shown an alert with success message
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_delete_student_state_button()
alert = get_modal_alert(self.student_admin_section.browser)
alert.dismiss()
def test_clicking_delete_student_attempts_button_with_error(self):
"""
Scenario: Clicking on the Delete Student State for entrance exam button
with email address or username of a non existing student should result
in error message.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Delete Student State for entrance exam Button
under Entrance Exam Grade Adjustment after non existing student
email address or username
Then I should be shown an error message
"""
self.student_admin_section.set_student_email('non_existing@example.com')
self.student_admin_section.click_delete_student_state_button()
self.student_admin_section.wait_for_ajax()
self.assertGreater(len(self.student_admin_section.top_notification.text[0]), 0)
def test_clicking_task_history_button_with_success(self):
"""
Scenario: Clicking on the Show Background Task History for Student
with valid student email address or username should result in table of tasks.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Show Background Task History for Student Button
under Entrance Exam Grade Adjustment after entering a valid student
email address or username
Then I should be shown an table listing all background tasks
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_task_history_button()
self.assertTrue(self.student_admin_section.is_background_task_history_table_visible())
| eestay/edx-platform | common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py | Python | agpl-3.0 | 16,277 | [
"VisIt"
] | 6de68b7d351a097b666c676ff6488f4805dbeb934a328fb0329722f56e6193d2 |
# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Author(s): Brian Harring (ferringb@gentoo.org)
from __future__ import unicode_literals
from portage.cache import fs_template
from portage.cache import cache_errors
import errno
import io
import stat
import sys
import os as _os
from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage.exception import InvalidData
from portage.versions import _pkg_str
if sys.hexversion >= 0x3000000:
# pylint: disable=W0622
long = int
class database(fs_template.FsBased):
autocommits = True
def __init__(self, *args, **config):
super(database,self).__init__(*args, **config)
self.location = os.path.join(self.location,
self.label.lstrip(os.path.sep).rstrip(os.path.sep))
write_keys = set(self._known_keys)
write_keys.add("_eclasses_")
write_keys.add("_%s_" % (self.validation_chf,))
self._write_keys = sorted(write_keys)
if not self.readonly and not os.path.exists(self.location):
self._ensure_dirs()
def _getitem(self, cpv):
# Don't use os.path.join, for better performance.
fp = self.location + _os.sep + cpv
try:
with io.open(_unicode_encode(fp,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace') as myf:
lines = myf.read().split("\n")
if not lines[-1]:
lines.pop()
d = self._parse_data(lines, cpv)
if '_mtime_' not in d:
# Backward compatibility with old cache
# that uses mtime mangling.
d['_mtime_'] = _os.fstat(myf.fileno())[stat.ST_MTIME]
return d
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise cache_errors.CacheCorruption(cpv, e)
raise KeyError(cpv, e)
def _parse_data(self, data, cpv):
try:
return dict( x.split("=", 1) for x in data )
except ValueError as e:
# If a line is missing an "=", the split length is 1 instead of 2.
raise cache_errors.CacheCorruption(cpv, e)
def _setitem(self, cpv, values):
s = cpv.rfind("/")
fp = os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
try:
myf = io.open(_unicode_encode(fp,
encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
errors='backslashreplace')
except (IOError, OSError) as e:
if errno.ENOENT == e.errno:
try:
self._ensure_dirs(cpv)
myf = io.open(_unicode_encode(fp,
encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
errors='backslashreplace')
except (OSError, IOError) as e:
raise cache_errors.CacheCorruption(cpv, e)
else:
raise cache_errors.CacheCorruption(cpv, e)
try:
for k in self._write_keys:
v = values.get(k)
if not v:
continue
# NOTE: This format string requires unicode_literals, so that
# k and v are coerced to unicode, in order to prevent TypeError
# when writing raw bytes to TextIOWrapper with Python 2.
myf.write("%s=%s\n" % (k, v))
finally:
myf.close()
self._ensure_access(fp)
#update written. now we move it.
new_fp = os.path.join(self.location,cpv)
try:
os.rename(fp, new_fp)
except (OSError, IOError) as e:
os.remove(fp)
raise cache_errors.CacheCorruption(cpv, e)
def _delitem(self, cpv):
# import pdb;pdb.set_trace()
try:
os.remove(os.path.join(self.location,cpv))
except OSError as e:
if errno.ENOENT == e.errno:
raise KeyError(cpv)
else:
raise cache_errors.CacheCorruption(cpv, e)
def __contains__(self, cpv):
return os.path.exists(os.path.join(self.location, cpv))
def __iter__(self):
"""generator for walking the dir struct"""
dirs = [(0, self.location)]
len_base = len(self.location)
while dirs:
depth, dir_path = dirs.pop()
try:
dir_list = os.listdir(dir_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
del e
continue
for l in dir_list:
p = os.path.join(dir_path, l)
try:
st = os.lstat(p)
except OSError:
# Cache entry disappeared.
continue
if stat.S_ISDIR(st.st_mode):
# Only recurse 1 deep, in order to avoid iteration over
# entries from another nested cache instance. This can
# happen if the user nests an overlay inside
# /usr/portage/local as in bug #302764.
if depth < 1:
dirs.append((depth+1, p))
continue
try:
yield _pkg_str(p[len_base+1:])
except InvalidData:
continue
class md5_database(database):
validation_chf = 'md5'
store_eclass_paths = False
| ptisserand/portage | pym/portage/cache/flat_hash.py | Python | gpl-2.0 | 4,610 | [
"Brian"
] | b9ae3b71a9ab2b9ce39c473922f184378904450819fc4fe18c998ad37d5cc279 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 30.04.2015
@author: marscher
'''
from tempfile import NamedTemporaryFile
import os
import tempfile
import unittest
from unittest import mock
from pyemma.coordinates import api
from pyemma.coordinates.data.feature_reader import FeatureReader
from pyemma.coordinates.data.numpy_filereader import NumPyFileReader
from pyemma.coordinates.data.py_csv_reader import PyCSVReader
from pyemma.coordinates.data.util.traj_info_backends import SqliteDB
from pyemma.coordinates.data.util.traj_info_cache import TrajectoryInfoCache
from pyemma.coordinates.tests.util import create_traj
from pyemma.datasets import get_bpti_test_data
from pyemma.util import config
from pyemma.util.contexts import settings
from pyemma.util.files import TemporaryDirectory
import mdtraj
import pkg_resources
import pyemma
import numpy as np
xtcfiles = get_bpti_test_data()['trajs']
pdbfile = get_bpti_test_data()['top']
class TestTrajectoryInfoCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.old_instance = TrajectoryInfoCache.instance()
config.use_trajectory_lengths_cache = True
def setUp(self):
self.work_dir = tempfile.mkdtemp(prefix="traj_cache_test")
self.tmpfile = tempfile.mktemp(dir=self.work_dir)
self.db = TrajectoryInfoCache(self.tmpfile)
# overwrite TrajectoryInfoCache._instance with self.db...
TrajectoryInfoCache._instance = self.db
config.use_trajectory_lengths_cache = True
def tearDown(self):
self.db.close()
os.unlink(self.tmpfile)
import shutil
shutil.rmtree(self.work_dir, ignore_errors=True)
@classmethod
def tearDownClass(cls):
TrajectoryInfoCache._instance = cls.old_instance
config.use_trajectory_lengths_cache = False
def test_get_instance(self):
# test for exceptions in singleton creation
inst = TrajectoryInfoCache.instance()
inst.current_db_version
self.assertIs(inst, self.db)
def test_store_load_traj_info(self):
x = np.random.random((10, 3))
from pyemma.util._config import Config
my_conf = Config()
my_conf.cfg_dir = self.work_dir
with mock.patch('pyemma.coordinates.data.util.traj_info_cache.config', my_conf):
with NamedTemporaryFile(delete=False) as fh:
np.savetxt(fh.name, x)
reader = api.source(fh.name)
info = self.db[fh.name, reader]
self.db.close()
self.db.__init__(self.db._database.filename)
info2 = self.db[fh.name, reader]
self.assertEqual(info2, info)
def test_exceptions(self):
# in accessible files
not_existant = ''.join(
chr(i) for i in np.random.randint(65, 90, size=10)) + '.npy'
bad = [not_existant] # should be unaccessible or non existent
with self.assertRaises(ValueError) as cm:
api.source(bad)
assert bad[0] in cm.exception.message
# empty files
with NamedTemporaryFile(delete=False) as f:
f.close()
with self.assertRaises(ValueError) as cm:
api.source(f.name)
assert f.name in cm.exception.message
# bogus files
with NamedTemporaryFile(suffix='.npy', delete=False) as f:
x = np.array([1, 2, 3])
np.save(f, x)
with open(f.name, 'wb') as f2:
f2.write(b'asdf')
with self.assertRaises(IOError) as cm:
api.source(f.name)
def test_featurereader_xtc(self):
# cause cache failures
with settings(use_trajectory_lengths_cache=False):
reader = FeatureReader(xtcfiles, pdbfile)
results = {}
for f in xtcfiles:
traj_info = self.db[f, reader]
results[f] = traj_info.ndim, traj_info.length, traj_info.offsets
expected = {}
for f in xtcfiles:
with mdtraj.open(f) as fh:
length = len(fh)
ndim = fh.read(1)[0].shape[1]
offsets = fh.offsets if hasattr(fh, 'offsets') else []
expected[f] = ndim, length, offsets
np.testing.assert_equal(results, expected)
def test_npy_reader(self):
lengths_and_dims = [(7, 3), (23, 3), (27, 3)]
data = [
np.empty((n, dim)) for n, dim in lengths_and_dims]
files = []
with TemporaryDirectory() as td:
for i, x in enumerate(data):
fn = os.path.join(td, "%i.npy" % i)
np.save(fn, x)
files.append(fn)
reader = NumPyFileReader(files)
# cache it and compare
results = {f: (self.db[f, reader].length, self.db[f, reader].ndim,
self.db[f, reader].offsets) for f in files}
expected = {f: (len(data[i]), data[i].shape[1], [])
for i, f in enumerate(files)}
np.testing.assert_equal(results, expected)
def test_csvreader(self):
data = np.random.random((101, 3))
fn = tempfile.mktemp()
try:
np.savetxt(fn, data)
# calc offsets
offsets = [0]
with open(fn, PyCSVReader.DEFAULT_OPEN_MODE) as new_fh:
while new_fh.readline():
offsets.append(new_fh.tell())
reader = PyCSVReader(fn)
assert reader.dimension() == 3
trajinfo = reader._get_traj_info(fn)
np.testing.assert_equal(offsets, trajinfo.offsets)
finally:
os.unlink(fn)
def test_fragmented_reader(self):
top_file = pkg_resources.resource_filename(__name__, 'data/test.pdb')
trajfiles = []
nframes = []
with TemporaryDirectory() as wd:
for _ in range(3):
f, _, l = create_traj(top_file, dir=wd)
trajfiles.append(f)
nframes.append(l)
# three trajectories: one consisting of all three, one consisting of the first,
# one consisting of the first and the last
reader = api.source(
[trajfiles, [trajfiles[0]], [trajfiles[0], trajfiles[2]]], top=top_file)
np.testing.assert_equal(reader.trajectory_lengths(),
[sum(nframes), nframes[0], nframes[0] + nframes[2]])
def test_feature_reader_xyz(self):
traj = mdtraj.load(xtcfiles, top=pdbfile)
length = len(traj)
with NamedTemporaryFile(mode='wb', suffix='.xyz', delete=False) as f:
fn = f.name
traj.save_xyz(fn)
f.close()
reader = pyemma.coordinates.source(fn, top=pdbfile)
self.assertEqual(reader.trajectory_length(0), length)
def test_data_in_mem(self):
# make sure cache is not used for data in memory!
data = [np.empty((3, 3))] * 3
api.source(data)
self.assertEqual(self.db.num_entries, 0)
def test_old_db_conversion(self):
# prior 2.1, database only contained lengths (int as string) entries
# check conversion is happening
with NamedTemporaryFile(suffix='.npy', delete=False) as f:
db = TrajectoryInfoCache(None)
fn = f.name
np.save(fn, [1, 2, 3])
f.close() # windows sucks
reader = api.source(fn)
hash = db._get_file_hash(fn)
from pyemma.coordinates.data.util.traj_info_backends import DictDB
db._database = DictDB()
db._database.db_version = 0
info = db[fn, reader]
assert info.length == 3
assert info.ndim == 1
assert info.offsets == []
def test_corrupted_db(self):
with NamedTemporaryFile(mode='w', suffix='.dat', delete=False) as f:
f.write("makes no sense!!!!")
f.close()
name = f.name
import warnings
with warnings.catch_warnings(record=True) as cm:
warnings.simplefilter('always')
db = TrajectoryInfoCache(name)
assert len(cm) == 1
assert "corrupted" in str(cm[-1].message)
# ensure we can perform lookups on the broken db without exception.
r = api.source(xtcfiles[0], top=pdbfile)
db[xtcfiles[0], r]
def test_n_entries(self):
assert config.use_trajectory_lengths_cache
self.assertEqual(self.db.num_entries, 0)
assert TrajectoryInfoCache._instance is self.db
pyemma.coordinates.source(xtcfiles, top=pdbfile)
self.assertEqual(self.db.num_entries, len(xtcfiles))
def test_max_n_entries(self):
data = [np.random.random((10, 3)) for _ in range(20)]
max_entries = 10
config.traj_info_max_entries = max_entries
files = []
with TemporaryDirectory() as td:
for i, arr in enumerate(data):
f = os.path.join(td, "%s.npy" % i)
np.save(f, arr)
files.append(f)
pyemma.coordinates.source(files)
self.assertLessEqual(self.db.num_entries, max_entries)
self.assertGreater(self.db.num_entries, 0)
def test_max_size(self):
data = [np.random.random((150, 10)) for _ in range(150)]
max_size = 1
files = []
with TemporaryDirectory() as td, settings(traj_info_max_size=max_size, show_progress_bars=False):
for i, arr in enumerate(data):
f = os.path.join(td, "%s.txt" % i)
# save as txt to enforce creation of offsets
np.savetxt(f, arr)
files.append(f)
pyemma.coordinates.source(files)
self.assertLessEqual(os.stat(self.db.database_filename).st_size / 1024, config.traj_info_max_size)
self.assertGreater(self.db.num_entries, 0)
def test_no_working_directory(self):
# this is the case as long as the user has not yet created a config directory via config.save()
self.db._database = SqliteDB(filename=None)
# trigger caching
pyemma.coordinates.source(xtcfiles, top=pdbfile)
def test_no_sqlite(self):
# create new instance (init has to be called, install temporary import hook to raise importerror for sqlite3
import sys
del sys.modules['sqlite3']
class meta_ldr(object):
def find_module(self, fullname, path):
if fullname.startswith('sqlite3'):
return self
def load_module(self, fullname, path=None):
raise ImportError()
import warnings
try:
sys.meta_path.insert(0, meta_ldr())
# import sqlite3
with warnings.catch_warnings(record=True) as cw:
db = TrajectoryInfoCache()
self.assertNotIsInstance(db._database, SqliteDB)
self.assertEqual(len(cw), 1)
self.assertIn("sqlite3 package not available", cw[0].message.args[0])
finally:
del sys.meta_path[0]
def test_in_memory_db(self):
""" new instance, not yet saved to disk, no lru cache avail """
old_cfg_dir = config.cfg_dir
try:
config._cfg_dir = ''
db = TrajectoryInfoCache()
reader = pyemma.coordinates.source(xtcfiles, top=pdbfile)
info = db[xtcfiles[0], reader]
self.assertIsInstance(db._database, SqliteDB)
directory = db._database._database_from_key(info.hash_value)
assert directory is None
finally:
from pyemma.util.exceptions import ConfigDirectoryException
try:
config.cfg_dir = old_cfg_dir
except ConfigDirectoryException:
pass
def test_stress(self):
arrays = [np.empty((5, 2))] * 100
npy_files = [os.path.join(self.work_dir, '{}.npy'.format(i)) for i in range(len(arrays))]
[np.save(f, x) for f, x in zip(npy_files, arrays)]
env = os.environ.copy()
env['PYEMMA_CFG_DIR'] = self.work_dir
import subprocess
import sys
import time
script = 'import pyemma; pyemma.coordinates.source({files})' \
.format(cfg_dir=self.work_dir, files=npy_files)
failed = False
procs = [subprocess.Popen([sys.executable, '-c', script], env=env) for _ in range(10)]
error = None
while procs:
for proc in procs:
retcode = proc.poll()
if retcode is not None:
if retcode != 0:
pass
#stdout = proc.stdout.read()
#stderr = proc.stderr.read()
#error = '{};;{}'.format(stdout, stderr)
procs.remove(proc)
#break
else: # No process is done, wait a bit and check again.
time.sleep(.1)
continue
# Here, `proc` has finished with return code `retcode`
if retcode is not None and retcode != 0:
print('process failed with {}'.format(retcode))
failed = True
break
self.assertTrue(not failed, msg=error)
if __name__ == "__main__":
unittest.main()
| markovmodel/PyEMMA | pyemma/coordinates/tests/test_traj_info_cache.py | Python | lgpl-3.0 | 14,155 | [
"MDTraj"
] | 8078a0e4ad17ec62a162a19a2ad2cc903a765c83328343c1f397f014b6f4ad3f |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from ._base import _validate
def ace(counts, rare_threshold=10):
"""Calculate the ACE metric (Abundance-based Coverage Estimator).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
rare_threshold : int, optional
Threshold at which an OTU containing as many or fewer individuals will
be considered rare.
Returns
-------
double
Computed ACE metric.
Raises
------
ValueError
If every rare OTU is a singleton.
Notes
-----
ACE was first introduced in [1]_ and [2]_. The implementation here is based
on the description given in the EstimateS manual [3]_.
If no rare OTUs exist, returns the number of abundant OTUs. The default
value of 10 for `rare_threshold` is based on [4]_.
If `counts` contains zeros, indicating OTUs which are known to exist in the
environment but did not appear in the sample, they will be ignored for the
purpose of calculating the number of rare OTUs.
References
----------
.. [1] Chao, A. & S.-M Lee. 1992 Estimating the number of classes via
sample coverage. Journal of the American Statistical Association 87,
210-217.
.. [2] Chao, A., M.-C. Ma, & M. C. K. Yang. 1993. Stopping rules and
estimation for recapture debugging with unequal failure rates.
Biometrika 80, 193-201.
.. [3] http://viceroy.eeb.uconn.edu/estimates/
.. [4] Chao, A., W.-H. Hwang, Y.-C. Chen, and C.-Y. Kuo. 2000. Estimating
the number of shared species in two communities. Statistica Sinica
10:227-246.
"""
counts = _validate(counts)
freq_counts = np.bincount(counts)
s_rare = _otus_rare(freq_counts, rare_threshold)
singles = freq_counts[1]
if singles > 0 and singles == s_rare:
raise ValueError("The only rare OTUs are singletons, so the ACE "
"metric is undefined. EstimateS suggests using "
"bias-corrected Chao1 instead.")
s_abun = _otus_abundant(freq_counts, rare_threshold)
if s_rare == 0:
return s_abun
n_rare = _number_rare(freq_counts, rare_threshold)
c_ace = 1 - singles / n_rare
top = s_rare * _number_rare(freq_counts, rare_threshold, gamma=True)
bottom = c_ace * n_rare * (n_rare - 1)
gamma_ace = (top / bottom) - 1
if gamma_ace < 0:
gamma_ace = 0
return s_abun + (s_rare / c_ace) + ((singles / c_ace) * gamma_ace)
def _otus_rare(freq_counts, rare_threshold):
"""Count number of rare OTUs."""
return freq_counts[1:rare_threshold + 1].sum()
def _otus_abundant(freq_counts, rare_threshold):
"""Count number of abundant OTUs."""
return freq_counts[rare_threshold + 1:].sum()
def _number_rare(freq_counts, rare_threshold, gamma=False):
"""Return number of individuals in rare OTUs.
``gamma=True`` generates the ``n_rare`` used for the variation coefficient.
"""
n_rare = 0
if gamma:
for i, j in enumerate(freq_counts[:rare_threshold + 1]):
n_rare = n_rare + (i * j) * (i - 1)
else:
for i, j in enumerate(freq_counts[:rare_threshold + 1]):
n_rare = n_rare + (i * j)
return n_rare
| jensreeder/scikit-bio | skbio/diversity/alpha/_ace.py | Python | bsd-3-clause | 3,672 | [
"scikit-bio"
] | 90bbd0d8767a9a37d09a54269f7f668fee5b2327f0be24fafcb737698a9209a4 |
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import matplotlib.pyplot as pyplot
import myplot
import csv
import Cdf
import correlation
import math
import random
import thinkstats
import rpy2.robjects as robjects
r = robjects.r
FORMATS = ['pdf', 'png']
def Regress(model, ys, ts, print_flag=False):
"""Run a linear regression using rpy2.
Returns a list of coefficients (which are rpy2.RVectors)
Use GetEst to extract the estimated coefficient from a coeff.
"""
t2 = [t**2 for t in ts]
# put the data into the R environment
robjects.globalenv['ts'] = robjects.FloatVector(ts)
robjects.globalenv['t2'] = robjects.FloatVector(t2)
robjects.globalenv['ys'] = robjects.FloatVector(ys)
model = r(model)
res = r.lm(model)
if print_flag:
PrintSummary(res)
coeffs = GetCoefficients(res)
return coeffs
def GetEst(coeff):
"""Extracts the estimated coefficient from a coeff."""
name, est, stderr = coeff
return est
def GetCoefficients(res):
"""Extracts coefficients from r.lm.
This is an awful function. It actually generates a text representation
of the results and then parses it. Ack!
Maybe the rpy2 interface (or it's documentation) will improve at
some point so this nonsense is no longer necessary.
"""
flag = False
lines = r.summary(res)
lines = str(lines)
coeffs = []
for line in lines.split('\n'):
line = line.strip()
if flag:
t = line.split()
if len(t) < 5:
break
name, est, stderr = t[0], float(t[1]), float(t[2])
coeffs.append((name, est, stderr))
# skip everything until we get to the coefficients
if line.startswith('Estimate'):
flag = True
return coeffs
def MakeFit(model, ys, ts, extrap=1):
"""Fit a model to the data and return the fitted values."""
coeffs = Regress(model, ys, ts)
fts = ts + [ts[-1] + 1]
fys = EvalFit(coeffs, fts)
return fts, fys
def Residuals(model, ys, ts):
"""Fit a model to the data and return the residuals."""
coeffs = Regress(model, ys, ts, print_flag=True)
fys = EvalFit(coeffs, ts)
residuals = [fy - y for fy, y in zip(fys, ys)]
return residuals
def EvalFit(coeffs, ts):
"""Evaluate a fitted model at a sequence of locations.
coeffs: a list of coefficients as returned by rpy2
ts: locations to evaluate the model
Returns a list of fitted values.
"""
betas = [GetEst(coeff) for coeff in reversed(coeffs)]
fys = [Horner(betas, t) for t in ts]
return fys
def Horner(betas, t):
"""Use Horner's method to evaluate a polynomial.
betas: coefficients in decreasing order of power.
t: where to evaluate
"""
total = 0
for beta in betas:
total = total * t + beta
return total
def MakeErrorModel(model, ys, ts, n=100):
"""Makes a model that captures sample error and residual error.
model: string representation of the regression model
ys: dependent variable
ts: explanatory variable
n: number of simulations to run
Returns a pair of models, where each model is a pair of rows.
"""
# estimate mean and stddev of the residuals
residuals = Residuals(model, ys, ts)
mu, var = thinkstats.MeanVar(residuals)
sig = math.sqrt(var)
# make the best fit
fts, fys = MakeFit(model, ys, ts)
# resample residuals and generate hypothetical fits
fits = []
for i in range(n):
fake_ys = [fy + random.gauss(mu, sig) for fy in fys[:-1]]
_, fake_fys = MakeFit(model, fake_ys, ts)
fits.append(fake_fys)
# find the 90% CI in each column
columns = zip(*fits)
sample_error = MakeStddev(columns)
total_error = MakeStddev(columns, mu, var)
return fts, sample_error, total_error
def MakeStddev(columns, mu2=0, var2=0):
"""Finds a confidence interval for each column.
Returns two rows: the low end of the intervals and the high ends.
"""
stats = [thinkstats.MeanVar(ys) for ys in columns]
min_fys = [mu1 + mu2 - 2 * math.sqrt(var1 + var2) for mu1, var1 in stats]
max_fys = [mu1 + mu2 + 2 * math.sqrt(var1 + var2) for mu1, var1 in stats]
return min_fys, max_fys
def MakeIntervals(columns, low=5, high=95):
"""Finds a confidence interval for each column.
Returns two rows: the low end of the intervals and the high ends.
"""
cdfs = [Cdf.MakeCdfFromList(ys) for ys in columns]
min_fys = [cdf.Percentile(low) for cdf in cdfs]
max_fys = [cdf.Percentile(high) for cdf in cdfs]
return min_fys, max_fys
def AddResidualError(columns, mu, sig):
"""Adds Gaussian noise to the data in columns.
columns: list of columns, where each column is a set of y-values
for a given t-value
mu, sig: parameters of the noise
"""
return [[y + random.gauss(mu, sig) for y in col]
for col in columns]
def ReadData(filename):
"""Reads a CSV file of data from HERI scores.
Args:
filename: string filename
Returns:
list of (score, number) pairs
"""
fp = open(filename)
res = []
for line in fp:
try:
t = [float(x) for x in line.split()]
res.append(t)
except ValueError:
pass
return zip(*res)
def MakePlot(ts, ys, model):
"""Generates a plot with the data, a fitted model, and error bars."""
pyplot.clf()
# shift the times to start at 0 (but use the originals for plots)
shift = ts[0]
tshift = [t-shift for t in ts]
# plot the error models
fts, sample_error, total_error = MakeErrorModel(model, ys, tshift)
fts = [t+shift for t in fts]
pyplot.fill_between(fts, *total_error, color='0.8', alpha=0.5, linewidth=0)
pyplot.fill_between(fts, *sample_error, color='0.6', alpha=0.5, linewidth=0)
# plot the estimated fit
fts, fys = MakeFit(model, ys, tshift)
fts = [t+shift for t in fts]
pyplot.plot(fts, fys, color='red', linewidth=2, alpha=0.5)
def PlotResiduals(ts, ys):
residuals = Residuals(ts, ys)
pyplot.clf()
pyplot.plot(ts, residuals, 'bo-', linewidth=2)
myplot.Save(root='heri5',
formats=FORMATS,
title='',
xlabel='',
ylabel='Residuals',
axis=[1968, 2012, -6, 6])
def PrintSummary(res):
"""Prints results from r.lm (just the parts we want)."""
flag = False
lines = r.summary(res)
lines = str(lines)
for line in lines.split('\n'):
# skip everything until we get to coefficients
if line.startswith('Coefficients'):
flag = True
if flag:
print line
print
def PlotReligious(filename):
"""Make plots showing percentage None broken down by college type.
filename: string filename
"""
fp = open(filename)
reader = csv.reader(fp)
header = reader.next()
years = []
rows = []
for t in reader:
year = int(t[0])
data = t[1:6]
try:
data = [float(x) for x in data]
except ValueError:
continue
years.append(year)
rows.append(data)
cols = zip(*rows)
labels = header[1:]
PlotReligiousSubset(years, cols, labels, 0, 3)
PlotReligiousSubset(years, cols, labels, 3, 5)
def PlotReligiousSubset(years, cols, labels, i, j):
"""Helper function that factors out common plotting code.
years: sequence of years
cols: list of columns to plot
labels: list of labels (corresponding to cols)
i,j: slice indices of the columns to plot
"""
pyplot.clf()
options = dict(linewidth=3, markersize=0, alpha=0.7)
for col, label in zip(cols[i:j], labels[i:j]):
pyplot.plot(years, col, label=label, **options)
root = 'heri.religious.%d.%d' % (i, j)
myplot.Save(root=root,
formats=FORMATS,
xlabel='Year',
ylabel='% None',
title='Religious preference')
def PlotReligious2(filename):
"""Makes a plot of the number/fraction of students at religious colleges.
filename: string filename
"""
fp = open(filename)
reader = csv.reader(fp)
header = reader.next()
years = [int(x) for x in header[1:]]
labels = []
cols = []
for t in reader:
label = t[0]
if label == '':
break
col = [float(x)/1000000.0 for x in t[1:]]
labels.append(label)
cols.append(col)
PlotReligiousScale2(years, cols, labels, flag='raw')
cols = PercentTotal(cols)
PlotReligiousScale2(years, cols, labels, flag='percent')
def PercentTotal(cols):
"""Converts the data in cols to percentages of total.
Modifies the columns.
cols: sequence of columns
"""
totals = []
for row in zip(*cols):
print row
total = sum(row)
totals.append(total)
for col in cols:
for i in range(len(col)):
col[i] /= totals[i] / 100
return cols
def PlotReligiousScale2(years, cols, labels, flag):
"""Helper function that factors out common plotting code.
years: sequence of years
cols: list of columns to plot
labels: list of labels (corresponding to cols)
flag: string 'raw' or 'percent'
"""
pyplot.clf()
options = dict(linewidth=3, markersize=0, alpha=0.7)
for col, label in zip(cols, labels):
pyplot.plot(years, col, label=label, **options)
root = 'heri.religious2.%s' % flag
ylabel = dict(raw='Enrollment (millions)',
percent='Enrollment (percent of total)')[flag]
axis = dict(raw=[1977, 2010, 0, 16],
percent=[1977, 2010, 0, 100])[flag]
myplot.Save(root=root,
formats=FORMATS,
xlabel='Year',
ylabel=ylabel,
title='Enrollment by college type',
axis=axis)
def main(script):
# make plots showing enrollment at private, religious and non-religious
# colleges
#PlotReligious2('heri.religious2.csv')
#return
upper = 2014
ts, ys = ReadData('heri.0')
MakePlot(ts, ys, model='ys ~ ts')
options = dict(linewidth=3, markersize=0, alpha=0.7)
pyplot.plot(ts, ys, color='purple', label='Change in no religion',
**options)
myplot.Save(root='heri12.0',
formats=FORMATS,
ylabel='Percentage points',
loc=2,
axis=[1967, upper, -3, 3])
ts, ys = ReadData('heri.1')
MakePlot(ts, ys, model='ys ~ ts + t2')
pyplot.plot(ts, ys, 'bs-', label='No religion', **options)
# add the actual value from 2012
myplot.Plot([2012], [23.8], 'bs')
myplot.Save(root='heri12.1',
formats=FORMATS,
ylabel='Percent',
loc=2,
axis=[1967, upper, 0, 30])
ts, ys = ReadData('heri.2')
MakePlot(ts, ys, model='ys ~ ts + t2')
pyplot.plot(ts, ys, 'go-', label='No attendance', **options)
# add the actual value from 2012
myplot.Plot([2012], [100 - 73.2], 'gs')
myplot.Save(root='heri12.2',
formats=FORMATS,
ylabel='Percent',
loc=2,
axis=[1967, upper, 0, 30])
print (2011 - 1973) * 0.03548 - 0.36036
if __name__ == '__main__':
import sys
main(*sys.argv)
| AllenDowney/HeriReligion | archive/heri12.py | Python | mit | 11,607 | [
"Gaussian"
] | 31caac991f1c72ed279669699d3f3b4d209aacea84002f16dad5faa2ed5b7871 |
from rest_framework import status
from rest_framework.authentication import \
SessionAuthentication, \
TokenAuthentication
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.decorators import authentication_classes, permission_classes
from rest_framework.decorators import api_view
from rest_framework.reverse import reverse
from rest_framework.response import Response
from rest_framework.views import exception_handler
from rest_framework.exceptions import NotAuthenticated
from django.views.generic.detail import SingleObjectMixin
from django.core.exceptions import PermissionDenied
from repository import models
def custom_exception_handler(exc, context):
# Call REST framework's default exception handler first,
# to get the standard error response.
if isinstance(exc, NotAuthenticated):
response = Response(
{'detail': 'Not authenticated'},
status=status.HTTP_401_UNAUTHORIZED,
exception=True
)
else:
response = exception_handler(exc, context)
return response
@api_view(['GET'])
@authentication_classes((SessionAuthentication, TokenAuthentication))
@permission_classes((IsAuthenticated,))
def repository_api_root(request):
"""
The entry endpoint of our API.
"""
return Response({
'compensations': reverse('compensation-list', request=request),
'panel-templates': reverse('panel-template-list', request=request),
'panel-variants': reverse('panel-variant-list', request=request),
'site-panels': reverse('site-panel-list', request=request),
'markers': reverse('marker-list', request=request),
'fluorochromes': reverse('fluorochrome-list', request=request),
'specimens': reverse('specimen-list', request=request),
'permissions': reverse('permission-list', request=request),
'users': reverse('user-list', request=request),
'projects': reverse('project-list', request=request),
'cell_subset_labels': reverse(
'cell-subset-label-list',
request=request
),
'create_samples': reverse('create-sample', request=request),
'samples': reverse('sample-list', request=request),
'sample_metadata': reverse('sample-metadata-list', request=request),
'sample_collections': reverse(
'sample-collection-list', request=request),
'sample_collection_members': reverse(
'sample-collection-member-list', request=request),
'sites': reverse('site-list', request=request),
'subject_groups': reverse('subject-group-list', request=request),
'subjects': reverse('subject-list', request=request),
'visit_types': reverse('visit-type-list', request=request),
'stimulations': reverse('stimulation-list', request=request),
'workers': reverse('worker-list', request=request),
'subprocess_categories': reverse(
'subprocess-category-list', request=request),
'subprocess_implementations': reverse(
'subprocess-implementation-list', request=request),
'subprocess_inputs': reverse('subprocess-input-list', request=request),
'process_requests': reverse('process-request-list', request=request),
'process_request_stage2_create': reverse(
'process-request-stage2-create',
request=request
),
'process_request_inputs': reverse(
'process-request-input-list', request=request),
'assigned_process_requests': reverse(
'assigned-process-request-list', request=request),
'viable_process_requests': reverse(
'viable-process-request-list', request=request),
'clusters': reverse('cluster-list', request=request),
'cluster-labels': reverse('cluster-label-list', request=request),
'sample_clusters': reverse('sample-cluster-list', request=request),
'sample_cluster_components': reverse(
'sample-cluster-component-list',
request=request
)
})
class LoginRequiredMixin(object):
"""
View mixin to verify a user is logged in.
"""
authentication_classes = (SessionAuthentication, TokenAuthentication)
permission_classes = (IsAuthenticated,)
class AdminRequiredMixin(object):
"""
View mixin to verify a user is an administrator.
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated, IsAdminUser)
class PermissionRequiredMixin(SingleObjectMixin):
"""
View mixin to verify a user has permission to a resource.
"""
def get_object(self, *args, **kwargs):
# TODO: see if we can check HTTP method (GET, PUT, etc.) to reduce
# duplicate code for modifying resources
obj = super(PermissionRequiredMixin, self).get_object(*args, **kwargs)
if hasattr(self, 'request'):
request = self.request
else:
raise PermissionDenied
if isinstance(obj, models.ProtectedModel):
if isinstance(obj, models.Project):
user_sites = models.Site.objects.get_sites_user_can_view(
request.user, obj)
if not obj.has_view_permission(request.user) and not (
user_sites.count() > 0):
raise PermissionDenied
elif not obj.has_view_permission(request.user):
raise PermissionDenied
return obj
| whitews/ReFlow | repository/api_utils.py | Python | bsd-3-clause | 5,518 | [
"VisIt"
] | fa1aa7eeadfc3fc43222a974a11f4ad8d5157e108cdc8b88531b7cbb155a94bc |
# Autodetecting setup.py script for building the Python extensions
#
__version__ = "$Revision$"
import sys, os, imp, re, optparse
from glob import glob
from platform import machine as platform_machine
import sysconfig
from distutils import log
from distutils import text_file
from distutils.errors import *
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.spawn import find_executable
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = hasattr(sys, 'gettotalrefcount')
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = []
def add_dir_to_list(dirlist, dir):
"""Add the directory 'dir' to the list 'dirlist' (at the front) if
1) 'dir' is not already in 'dirlist'
2) 'dir' actually exists, and is a directory."""
if dir is not None and os.path.isdir(dir) and dir not in dirlist:
dirlist.insert(0, dir)
def macosx_sdk_root():
"""
Return the directory of the current OSX SDK,
or '/' if no SDK was specified.
"""
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
return sysroot
def is_macosx_sdk_path(path):
"""
Returns True if 'path' can be located in an OSX SDK
"""
return (path.startswith('/usr/') and not path.startswith('/usr/local')) or path.startswith('/System/')
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
if sys.platform == 'darwin':
# Honor the MacOSX SDK setting when one was specified.
# An SDK is a directory with the same structure as a real
# system, but with only header files and libraries.
sysroot = macosx_sdk_root()
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
if sys.platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
if sys.platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
result = compiler.find_library_file(std_dirs + paths, libname)
if result is None:
return None
if sys.platform == 'darwin':
sysroot = macosx_sdk_root()
# Check whether the found file is in one of the standard directories
dirname = os.path.dirname(result)
for p in std_dirs:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if sys.platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ ]
if p == dirname:
return [ ]
# Otherwise, it must have been in one of the additional directories,
# so we have to figure out which one.
for p in paths:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if sys.platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ p ]
if p == dirname:
return [p]
else:
assert False, "Internal error: Path not found in std_dirs or paths"
def module_enabled(extlist, modname):
"""Returns whether the module 'modname' is present in the list
of extensions 'extlist'."""
extlist = [ext for ext in extlist if ext.name == modname]
return len(extlist)
def find_module_file(module, dirlist):
"""Find a module in a set of possible folders. If it is not found
return the unadorned filename"""
list = find_file(module, [], dirlist)
if not list:
return module
if len(list) > 1:
log.info("WARNING: multiple copies of %s found"%module)
return os.path.join(list[0], module)
class PyBuildExt(build_ext):
def __init__(self, dist):
build_ext.__init__(self, dist)
self.failed = []
self.cross_compile = os.environ.get('CROSS_COMPILE_TARGET') == 'yes'
def build_extensions(self):
# Detect which modules should be compiled
missing = self.detect_modules()
# Remove modules that are present on the disabled list
extensions = [ext for ext in self.extensions
if ext.name not in disabled_module_list]
# move ctypes to the end, it depends on other modules
ext_map = dict((ext.name, i) for i, ext in enumerate(extensions))
if "_ctypes" in ext_map:
ctypes = extensions.pop(ext_map["_ctypes"])
extensions.append(ctypes)
self.extensions = extensions
# Fix up the autodetected modules, prefixing all the source files
# with Modules/ and adding Python's include directory to the path.
(srcdir,) = sysconfig.get_config_vars('srcdir')
if not srcdir:
# Maybe running on Windows but not using CYGWIN?
raise ValueError("No source directory; cannot proceed.")
srcdir = os.path.abspath(srcdir)
moddirlist = [os.path.join(srcdir, 'Modules')]
# Platform-dependent module source and include directories
incdirlist = []
platform = self.get_platform()
if platform == 'darwin' and ("--disable-toolbox-glue" not in
sysconfig.get_config_var("CONFIG_ARGS")):
# Mac OS X also includes some mac-specific modules
macmoddir = os.path.join(srcdir, 'Mac/Modules')
moddirlist.append(macmoddir)
incdirlist.append(os.path.join(srcdir, 'Mac/Include'))
# Fix up the paths for scripts, too
self.distribution.scripts = [os.path.join(srcdir, filename)
for filename in self.distribution.scripts]
# Python header files
headers = [sysconfig.get_config_h_filename()]
headers += glob(os.path.join(sysconfig.get_path('platinclude'), "*.h"))
for ext in self.extensions[:]:
ext.sources = [ find_module_file(filename, moddirlist)
for filename in ext.sources ]
if ext.depends is not None:
ext.depends = [find_module_file(filename, moddirlist)
for filename in ext.depends]
else:
ext.depends = []
# re-compile extensions if a header file has been changed
ext.depends.extend(headers)
# platform specific include directories
ext.include_dirs.extend(incdirlist)
# If a module has already been built statically,
# don't build it here
if ext.name in sys.builtin_module_names:
self.extensions.remove(ext)
# Parse Modules/Setup and Modules/Setup.local to figure out which
# modules are turned on in the file.
remove_modules = []
for filename in ('Modules/Setup', 'Modules/Setup.local'):
input = text_file.TextFile(filename, join_lines=1)
while 1:
line = input.readline()
if not line: break
line = line.split()
remove_modules.append(line[0])
input.close()
for ext in self.extensions[:]:
if ext.name in remove_modules:
self.extensions.remove(ext)
# When you run "make CC=altcc" or something similar, you really want
# those environment variables passed into the setup.py phase. Here's
# a small set of useful ones.
compiler = os.environ.get('CC')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
(ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS')
args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags
self.compiler.set_executables(**args)
build_ext.build_extensions(self)
longest = max([len(e.name) for e in self.extensions])
if self.failed:
longest = max(longest, max([len(name) for name in self.failed]))
def print_three_column(lst):
lst.sort(key=str.lower)
# guarantee zip() doesn't drop anything
while len(lst) % 3:
lst.append("")
for e, f, g in zip(lst[::3], lst[1::3], lst[2::3]):
print "%-*s %-*s %-*s" % (longest, e, longest, f,
longest, g)
if missing:
print
print ("Python build finished, but the necessary bits to build "
"these modules were not found:")
print_three_column(missing)
print ("To find the necessary bits, look in setup.py in"
" detect_modules() for the module's name.")
print
if self.failed:
failed = self.failed[:]
print
print "Failed to build these modules:"
print_three_column(failed)
print
def build_extension(self, ext):
if ext.name == '_ctypes':
if not self.configure_ctypes(ext):
return
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsError), why:
self.announce('WARNING: building of extension "%s" failed: %s' %
(ext.name, sys.exc_info()[1]))
self.failed.append(ext.name)
return
# Import check will not work when cross-compiling.
if os.environ.has_key('PYTHONXCPREFIX'):
self.announce(
'WARNING: skipping import check for cross-compiled: "%s"' %
ext.name)
return
# Workaround for Mac OS X: The Carbon-based modules cannot be
# reliably imported into a command-line Python
if 'Carbon' in ext.extra_link_args:
self.announce(
'WARNING: skipping import check for Carbon-based "%s"' %
ext.name)
return
if self.get_platform() == 'darwin' and (
sys.maxint > 2**32 and '-arch' in ext.extra_link_args):
# Don't bother doing an import check when an extension was
# build with an explicit '-arch' flag on OSX. That's currently
# only used to build 32-bit only extensions in a 4-way
# universal build and loading 32-bit code into a 64-bit
# process will fail.
self.announce(
'WARNING: skipping import check for "%s"' %
ext.name)
return
# Workaround for Cygwin: Cygwin currently has fork issues when many
# modules have been imported
if self.get_platform() == 'cygwin':
self.announce('WARNING: skipping import check for Cygwin-based "%s"'
% ext.name)
return
ext_filename = os.path.join(
self.build_lib,
self.get_ext_filename(self.get_ext_fullname(ext.name)))
try:
imp.load_dynamic(ext.name, ext_filename)
except ImportError, why:
self.failed.append(ext.name)
self.announce('*** WARNING: renaming "%s" since importing it'
' failed: %s' % (ext.name, why), level=3)
assert not self.inplace
basename, tail = os.path.splitext(ext_filename)
newname = basename + "_failed" + tail
if os.path.exists(newname):
os.remove(newname)
os.rename(ext_filename, newname)
# XXX -- This relies on a Vile HACK in
# distutils.command.build_ext.build_extension(). The
# _built_objects attribute is stored there strictly for
# use here.
# If there is a failure, _built_objects may not be there,
# so catch the AttributeError and move on.
try:
for filename in self._built_objects:
os.remove(filename)
except AttributeError:
self.announce('unable to remove files (ignored)')
except:
exc_type, why, tb = sys.exc_info()
self.announce('*** WARNING: importing extension "%s" '
'failed with %s: %s' % (ext.name, exc_type, why),
level=3)
self.failed.append(ext.name)
def get_platform(self):
# Get value of sys.platform
for platform in ['cygwin', 'beos', 'darwin', 'atheos', 'osf1']:
if sys.platform.startswith(platform):
return platform
return sys.platform
def add_multiarch_paths(self):
# Debian/Ubuntu multiarch support.
# https://wiki.ubuntu.com/MultiarchSpec
if not find_executable('dpkg-architecture'):
return
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'dpkg-architecture -qDEB_HOST_MULTIARCH > %s 2> /dev/null' %
tmpfile)
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
finally:
os.unlink(tmpfile)
def detect_modules(self):
# Ensure that /usr/local is always used
if not self.cross_compile:
add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
self.add_multiarch_paths()
# Add paths specified in the environment variables LDFLAGS and
# CPPFLAGS for header and library files.
# We must get the values from the Makefile and not the environment
# directly since an inconsistently reproducible issue comes up where
# the environment variable is not set even though the value were passed
# into configure and stored in the Makefile (issue found on OS X 10.3).
for env_var, arg_name, dir_list in (
('LDFLAGS', '-R', self.compiler.runtime_library_dirs),
('LDFLAGS', '-L', self.compiler.library_dirs),
('CPPFLAGS', '-I', self.compiler.include_dirs)):
env_val = sysconfig.get_config_var(env_var)
if env_val:
# To prevent optparse from raising an exception about any
# options in env_val that it doesn't know about we strip out
# all double dashes and any dashes followed by a character
# that is not for the option we are dealing with.
#
# Please note that order of the regex is important! We must
# strip out double-dashes first so that we don't end up with
# substituting "--Long" to "-Long" and thus lead to "ong" being
# used for a library directory.
env_val = re.sub(r'(^|\s+)-(-|(?!%s))' % arg_name[1],
' ', env_val)
parser = optparse.OptionParser()
# Make sure that allowing args interspersed with options is
# allowed
parser.allow_interspersed_args = True
parser.error = lambda msg: None
parser.add_option(arg_name, dest="dirs", action="append")
options = parser.parse_args(env_val.split())[0]
if options.dirs:
for directory in reversed(options.dirs):
add_dir_to_list(dir_list, directory)
if os.path.normpath(sys.prefix) != '/usr' \
and not sysconfig.get_config_var('PYTHONFRAMEWORK') \
and not self.cross_compile:
# OSX note: Don't add LIBDIR and INCLUDEDIR to building a framework
# (PYTHONFRAMEWORK is set) to avoid # linking problems when
# building a framework with different architectures than
# the one that is currently installed (issue #7473)
add_dir_to_list(self.compiler.library_dirs,
sysconfig.get_config_var("LIBDIR"))
add_dir_to_list(self.compiler.include_dirs,
sysconfig.get_config_var("INCLUDEDIR"))
try:
have_unicode = unicode
except NameError:
have_unicode = 0
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
lib_dirs = self.compiler.library_dirs
inc_dirs = self.compiler.include_dirs
if not self.cross_compile:
lib_dirs += [
'/lib64', '/usr/lib64',
'/lib', '/usr/lib',
]
inc_dirs += ['/usr/include']
else:
cross_compile_lib = os.environ.get('CROSS_COMPILE_LIB','').strip()
if len(cross_compile_lib) > 0:
lib_dirs += cross_compile_lib.split(':')
cross_compile_inc = os.environ.get('CROSS_COMPILE_INC','').strip()
if len(cross_compile_inc) > 0:
inc_dirs += cross_compile_inc.split(':')
exts = []
missing = []
config_h = sysconfig.get_config_h_filename()
config_h_vars = sysconfig.parse_config_h(open(config_h))
platform = self.get_platform()
srcdir = sysconfig.get_config_var('srcdir')
# Check for AtheOS which has libraries in non-standard locations
if platform == 'atheos':
lib_dirs += ['/system/libs', '/atheos/autolnk/lib']
lib_dirs += os.getenv('LIBRARY_PATH', '').split(os.pathsep)
inc_dirs += ['/system/include', '/atheos/autolnk/include']
inc_dirs += os.getenv('C_INCLUDE_PATH', '').split(os.pathsep)
# OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb)
if platform in ['osf1', 'unixware7', 'openunix8']:
lib_dirs += ['/usr/ccs/lib']
if platform == 'darwin':
# This should work on any unixy platform ;-)
# If the user has bothered specifying additional -I and -L flags
# in OPT and LDFLAGS we might as well use them here.
# NOTE: using shlex.split would technically be more correct, but
# also gives a bootstrap problem. Let's hope nobody uses directories
# with whitespace in the name to store libraries.
cflags, ldflags = sysconfig.get_config_vars(
'CFLAGS', 'LDFLAGS')
for item in cflags.split():
if item.startswith('-I'):
inc_dirs.append(item[2:])
for item in ldflags.split():
if item.startswith('-L'):
lib_dirs.append(item[2:])
# Check for MacOS X, which doesn't need libm.a at all
math_libs = ['m']
if platform in ['darwin', 'beos']:
math_libs = []
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# Some modules that are normally always on:
#exts.append( Extension('_weakref', ['_weakref.c']) )
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# fast string operations implemented in C
exts.append( Extension('strop', ['stropmodule.c']) )
# time operations and variables
exts.append( Extension('time', ['timemodule.c'],
libraries=math_libs) )
exts.append( Extension('datetime', ['datetimemodule.c', 'timemodule.c'],
libraries=math_libs) )
# fast iterator tools implemented in C
exts.append( Extension("itertools", ["itertoolsmodule.c"]) )
# code that will be builtins in the future, but conflict with the
# current builtins
exts.append( Extension('future_builtins', ['future_builtins.c']) )
# random number generator implemented in C
exts.append( Extension("_random", ["_randommodule.c"]) )
# high-performance collections
exts.append( Extension("_collections", ["_collectionsmodule.c"]) )
# bisect
exts.append( Extension("_bisect", ["_bisectmodule.c"]) )
# heapq
exts.append( Extension("_heapq", ["_heapqmodule.c"]) )
# operator.add() and similar goodies
exts.append( Extension('operator', ['operator.c']) )
# Python 3.1 _io library
exts.append( Extension("_io",
["_io/bufferedio.c", "_io/bytesio.c", "_io/fileio.c",
"_io/iobase.c", "_io/_iomodule.c", "_io/stringio.c", "_io/textio.c"],
depends=["_io/_iomodule.h"], include_dirs=["Modules/_io"]))
# _functools
exts.append( Extension("_functools", ["_functoolsmodule.c"]) )
# _json speedups
exts.append( Extension("_json", ["_json.c"]) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c'],
depends=['testcapi_long.h']) )
# profilers (_lsprof is for cProfile.py)
exts.append( Extension('_hotshot', ['_hotshot.c']) )
exts.append( Extension('_lsprof', ['_lsprof.c', 'rotatingtree.c']) )
# static Unicode character database
if have_unicode:
exts.append( Extension('unicodedata', ['unicodedata.c']) )
else:
missing.append('unicodedata')
# access to ISO C locale support
data = open('pyconfig.h').read()
m = re.search(r"#s*define\s+WITH_LIBINTL\s+1\s*", data)
if m is not None:
locale_libs = ['intl']
else:
locale_libs = []
if platform == 'darwin':
locale_extra_link_args = ['-framework', 'CoreFoundation']
else:
locale_extra_link_args = []
exts.append( Extension('_locale', ['_localemodule.c'],
libraries=locale_libs,
extra_link_args=locale_extra_link_args) )
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
# supported...)
# fcntl(2) and ioctl(2)
libs = []
if (config_h_vars.get('FLOCK_NEEDS_LIBBSD', False)):
# May be necessary on AIX for flock function
libs = ['bsd']
exts.append( Extension('fcntl', ['fcntlmodule.c'], libraries=libs) )
# pwd(3)
exts.append( Extension('pwd', ['pwdmodule.c']) )
# grp(3)
exts.append( Extension('grp', ['grpmodule.c']) )
# spwd, shadow passwords
if (config_h_vars.get('HAVE_GETSPNAM', False) or
config_h_vars.get('HAVE_GETSPENT', False)):
exts.append( Extension('spwd', ['spwdmodule.c']) )
else:
missing.append('spwd')
# select(2); not on ancient System V
exts.append( Extension('select', ['selectmodule.c']) )
# Fred Drake's interface to the Python parser
exts.append( Extension('parser', ['parsermodule.c']) )
# cStringIO and cPickle
exts.append( Extension('cStringIO', ['cStringIO.c']) )
exts.append( Extension('cPickle', ['cPickle.c']) )
# Memory-mapped files (also works on Win32).
if platform not in ['atheos']:
exts.append( Extension('mmap', ['mmapmodule.c']) )
else:
missing.append('mmap')
# Lance Ellinghaus's syslog module
# syslog daemon interface
exts.append( Extension('syslog', ['syslogmodule.c']) )
# George Neville-Neil's timing module:
# Deprecated in PEP 4 http://www.python.org/peps/pep-0004.html
# http://mail.python.org/pipermail/python-dev/2006-January/060023.html
#exts.append( Extension('timing', ['timingmodule.c']) )
#
# Here ends the simple stuff. From here on, modules need certain
# libraries, are platform-specific, or present other surprises.
#
# Multimedia modules
# These don't work for 64-bit platforms!!!
# These represent audio samples or images as strings:
# Operations on audio samples
# According to #993173, this one should actually work fine on
# 64-bit platforms.
exts.append( Extension('audioop', ['audioop.c']) )
# Disabled on 64-bit platforms
if sys.maxint != 9223372036854775807L:
# Operations on images
exts.append( Extension('imageop', ['imageop.c']) )
else:
missing.extend(['imageop'])
# readline
do_readline = self.compiler.find_library_file(lib_dirs, 'readline')
readline_termcap_library = ""
curses_library = ""
# Determine if readline is already linked against curses or tinfo.
if do_readline and find_executable('ldd'):
fp = os.popen("ldd %s" % do_readline)
ldd_output = fp.readlines()
ret = fp.close()
if ret is None or ret >> 8 == 0:
for ln in ldd_output:
if 'curses' in ln:
readline_termcap_library = re.sub(
r'.*lib(n?cursesw?)\.so.*', r'\1', ln
).rstrip()
break
if 'tinfo' in ln: # termcap interface split out from ncurses
readline_termcap_library = 'tinfo'
break
elif self.cross_compile:
readline_termcap_library = os.environ.get('TERMCAP_LIBRARY')
# Issue 7384: If readline is already linked against curses,
# use the same library for the readline and curses modules.
if 'curses' in readline_termcap_library:
curses_library = readline_termcap_library
elif self.compiler.find_library_file(lib_dirs, 'ncursesw'):
curses_library = 'ncursesw'
elif self.compiler.find_library_file(lib_dirs, 'ncurses'):
curses_library = 'ncurses'
elif self.compiler.find_library_file(lib_dirs, 'curses'):
curses_library = 'curses'
if platform == 'darwin':
os_release = int(os.uname()[2].split('.')[0])
dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if dep_target and dep_target.split('.') < ['10', '5']:
os_release = 8
if os_release < 9:
# MacOSX 10.4 has a broken readline. Don't try to build
# the readline module unless the user has installed a fixed
# readline package
if find_file('readline/rlconf.h', inc_dirs, []) is None:
do_readline = False
if do_readline:
if platform == 'darwin' and os_release < 9:
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entiry path.
# This way a staticly linked custom readline gets picked up
# before the (possibly broken) dynamic library in /usr/lib.
readline_extra_link_args = ('-Wl,-search_paths_first',)
else:
readline_extra_link_args = ()
readline_libs = ['readline']
if readline_termcap_library:
pass # Issue 7384: Already linked against curses or tinfo.
elif curses_library:
readline_libs.append(curses_library)
elif self.compiler.find_library_file(lib_dirs +
['/usr/lib/termcap'],
'termcap'):
readline_libs.append('termcap')
exts.append( Extension('readline', ['readline.c'],
library_dirs=['/usr/lib/termcap'],
extra_link_args=readline_extra_link_args,
libraries=readline_libs) )
else:
missing.append('readline')
# crypt module.
crypt_libdir = ['../obj/local/'+os.environ['ARCH']]
if self.compiler.find_library_file(crypt_libdir, 'crypt'):
libs = ['crypt']
else:
libs = []
exts.append( Extension('crypt', ['cryptmodule.c'], library_dirs=crypt_libdir, libraries=libs) )
# CSV files
exts.append( Extension('_csv', ['_csv.c']) )
# socket(2)
exts.append( Extension('_socket', ['socketmodule.c'],
depends = ['socketmodule.h']) )
# Detect SSL support for the socket module (via _ssl)
search_for_ssl_incs_in = [
'/usr/local/ssl/include',
'/usr/contrib/ssl/include/'
]
ssl_incs = find_file('openssl/ssl.h', inc_dirs,
search_for_ssl_incs_in
)
if ssl_incs is not None:
krb5_h = find_file('krb5.h', inc_dirs,
['/usr/kerberos/include'])
if krb5_h:
ssl_incs += krb5_h
ssl_libs = find_library_file(self.compiler, 'ssl',lib_dirs,
['/usr/local/ssl/lib',
'/usr/contrib/ssl/lib/'
] )
ssl_incs = ['../openssl/include']
ssl_libs = ['../openssl/obj/local/'+os.environ['ARCH']]
if (ssl_incs is not None and
ssl_libs is not None):
exts.append( Extension('_ssl', ['_ssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto'],
depends = ['socketmodule.h']), )
else:
missing.append('_ssl')
# find out which version of OpenSSL we have
openssl_ver = 0
openssl_ver_re = re.compile(
'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' )
# look for the openssl version header on the compiler search path.
opensslv_h = find_file('openssl/opensslv.h', [],
inc_dirs + search_for_ssl_incs_in)
if opensslv_h:
name = os.path.join(opensslv_h[0], 'openssl/opensslv.h')
if sys.platform == 'darwin' and is_macosx_sdk_path(name):
name = os.path.join(macosx_sdk_root(), name[1:])
try:
incfile = open(name, 'r')
for line in incfile:
m = openssl_ver_re.match(line)
if m:
openssl_ver = eval(m.group(1))
except IOError, msg:
print "IOError while reading opensshv.h:", msg
pass
min_openssl_ver = 0x00907000
have_any_openssl = ssl_incs is not None and ssl_libs is not None
have_usable_openssl = (have_any_openssl and True)
#openssl_ver >= min_openssl_ver)
if have_any_openssl:
if have_usable_openssl:
# The _hashlib module wraps optimized implementations
# of hash functions from the OpenSSL library.
exts.append( Extension('_hashlib', ['_hashopenssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto']) )
else:
print ("warning: openssl 0x%08x is too old for _hashlib" %
openssl_ver)
missing.append('_hashlib')
if COMPILED_WITH_PYDEBUG or not have_usable_openssl:
# The _sha module implements the SHA1 hash algorithm.
exts.append( Extension('_sha', ['shamodule.c']) )
# The _md5 module implements the RSA Data Security, Inc. MD5
# Message-Digest Algorithm, described in RFC 1321. The
# necessary files md5.c and md5.h are included here.
exts.append( Extension('_md5',
sources = ['md5module.c', 'md5.c'],
depends = ['md5.h']) )
min_sha2_openssl_ver = 0x00908000
if COMPILED_WITH_PYDEBUG or openssl_ver < min_sha2_openssl_ver:
# OpenSSL doesn't do these until 0.9.8 so we'll bring our own hash
exts.append( Extension('_sha256', ['sha256module.c']) )
exts.append( Extension('_sha512', ['sha512module.c']) )
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
# your machine, though none are defined by default because of library
# dependencies. The Python module anydbm.py provides an
# implementation independent wrapper for these; dumbdbm.py provides
# similar functionality (but slower of course) implemented in Python.
# Sleepycat^WOracle Berkeley DB interface.
# http://www.oracle.com/database/berkeley-db/db/index.html
#
# This requires the Sleepycat^WOracle DB code. The supported versions
# are set below. Visit the URL above to download
# a release. Most open source OSes come with one or more
# versions of BerkeleyDB already installed.
max_db_ver = (4, 8)
min_db_ver = (4, 1)
db_setup_debug = False # verbose debug prints from this script?
def allow_db_ver(db_ver):
"""Returns a boolean if the given BerkeleyDB version is acceptable.
Args:
db_ver: A tuple of the version to verify.
"""
if not (min_db_ver <= db_ver <= max_db_ver):
return False
# Use this function to filter out known bad configurations.
if (4, 6) == db_ver[:2]:
# BerkeleyDB 4.6.x is not stable on many architectures.
arch = platform_machine()
if arch not in ('i386', 'i486', 'i586', 'i686',
'x86_64', 'ia64'):
return False
return True
def gen_db_minor_ver_nums(major):
if major == 4:
for x in range(max_db_ver[1]+1):
if allow_db_ver((4, x)):
yield x
elif major == 3:
for x in (3,):
if allow_db_ver((3, x)):
yield x
else:
raise ValueError("unknown major BerkeleyDB version", major)
# construct a list of paths to look for the header file in on
# top of the normal inc_dirs.
db_inc_paths = [
'/usr/include/db4',
'/usr/local/include/db4',
'/opt/sfw/include/db4',
'/usr/include/db3',
'/usr/local/include/db3',
'/opt/sfw/include/db3',
# Fink defaults (http://fink.sourceforge.net/)
'/sw/include/db4',
'/sw/include/db3',
]
# 4.x minor number specific paths
for x in gen_db_minor_ver_nums(4):
db_inc_paths.append('/usr/include/db4%d' % x)
db_inc_paths.append('/usr/include/db4.%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x)
db_inc_paths.append('/usr/local/include/db4%d' % x)
db_inc_paths.append('/pkg/db-4.%d/include' % x)
db_inc_paths.append('/opt/db-4.%d/include' % x)
# MacPorts default (http://www.macports.org/)
db_inc_paths.append('/opt/local/include/db4%d' % x)
# 3.x minor number specific paths
for x in gen_db_minor_ver_nums(3):
db_inc_paths.append('/usr/include/db3%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x)
db_inc_paths.append('/usr/local/include/db3%d' % x)
db_inc_paths.append('/pkg/db-3.%d/include' % x)
db_inc_paths.append('/opt/db-3.%d/include' % x)
# Add some common subdirectories for Sleepycat DB to the list,
# based on the standard include directories. This way DB3/4 gets
# picked up when it is installed in a non-standard prefix and
# the user has added that prefix into inc_dirs.
std_variants = []
for dn in inc_dirs:
std_variants.append(os.path.join(dn, 'db3'))
std_variants.append(os.path.join(dn, 'db4'))
for x in gen_db_minor_ver_nums(4):
std_variants.append(os.path.join(dn, "db4%d"%x))
std_variants.append(os.path.join(dn, "db4.%d"%x))
for x in gen_db_minor_ver_nums(3):
std_variants.append(os.path.join(dn, "db3%d"%x))
std_variants.append(os.path.join(dn, "db3.%d"%x))
db_inc_paths = std_variants + db_inc_paths
db_inc_paths = [p for p in db_inc_paths if os.path.exists(p)]
db_ver_inc_map = {}
if sys.platform == 'darwin':
sysroot = macosx_sdk_root()
class db_found(Exception): pass
try:
# See whether there is a Sleepycat header in the standard
# search path.
for d in inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
if sys.platform == 'darwin' and is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "db.h")
if db_setup_debug: print "db: looking for db.h in", f
if os.path.exists(f):
f = open(f).read()
m = re.search(r"#define\WDB_VERSION_MAJOR\W(\d+)", f)
if m:
db_major = int(m.group(1))
m = re.search(r"#define\WDB_VERSION_MINOR\W(\d+)", f)
db_minor = int(m.group(1))
db_ver = (db_major, db_minor)
# Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug
if db_ver == (4, 6):
m = re.search(r"#define\WDB_VERSION_PATCH\W(\d+)", f)
db_patch = int(m.group(1))
if db_patch < 21:
print "db.h:", db_ver, "patch", db_patch,
print "being ignored (4.6.x must be >= 4.6.21)"
continue
if ( (db_ver not in db_ver_inc_map) and
allow_db_ver(db_ver) ):
# save the include directory with the db.h version
# (first occurrence only)
db_ver_inc_map[db_ver] = d
if db_setup_debug:
print "db.h: found", db_ver, "in", d
else:
# we already found a header for this library version
if db_setup_debug: print "db.h: ignoring", d
else:
# ignore this header, it didn't contain a version number
if db_setup_debug:
print "db.h: no version number version in", d
db_found_vers = db_ver_inc_map.keys()
db_found_vers.sort()
while db_found_vers:
db_ver = db_found_vers.pop()
db_incdir = db_ver_inc_map[db_ver]
# check lib directories parallel to the location of the header
db_dirs_to_check = [
db_incdir.replace("include", 'lib64'),
db_incdir.replace("include", 'lib'),
]
if sys.platform != 'darwin':
db_dirs_to_check = filter(os.path.isdir, db_dirs_to_check)
else:
# Same as other branch, but takes OSX SDK into account
tmp = []
for dn in db_dirs_to_check:
if is_macosx_sdk_path(dn):
if os.path.isdir(os.path.join(sysroot, dn[1:])):
tmp.append(dn)
else:
if os.path.isdir(dn):
tmp.append(dn)
db_dirs_to_check = tmp
# Look for a version specific db-X.Y before an ambiguous dbX
# XXX should we -ever- look for a dbX name? Do any
# systems really not name their library by version and
# symlink to more general names?
for dblib in (('db-%d.%d' % db_ver),
('db%d%d' % db_ver),
('db%d' % db_ver[0])):
dblib_file = self.compiler.find_library_file(
db_dirs_to_check + lib_dirs, dblib )
if dblib_file:
dblib_dir = [ os.path.abspath(os.path.dirname(dblib_file)) ]
raise db_found
else:
if db_setup_debug: print "db lib: ", dblib, "not found"
except db_found:
if db_setup_debug:
print "bsddb using BerkeleyDB lib:", db_ver, dblib
print "bsddb lib dir:", dblib_dir, " inc dir:", db_incdir
db_incs = [db_incdir]
dblibs = [dblib]
# We add the runtime_library_dirs argument because the
# BerkeleyDB lib we're linking against often isn't in the
# system dynamic library search path. This is usually
# correct and most trouble free, but may cause problems in
# some unusual system configurations (e.g. the directory
# is on an NFS server that goes away).
exts.append(Extension('_bsddb', ['_bsddb.c'],
depends = ['bsddb.h'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
libraries=dblibs))
else:
if db_setup_debug: print "db: no appropriate library found"
db_incs = None
dblibs = []
dblib_dir = None
missing.append('_bsddb')
# The sqlite interface
sqlite_setup_debug = False # verbose debug prints from this script?
# We hunt for #define SQLITE_VERSION "n.n.n"
# We need to find >= sqlite version 3.0.8
sqlite_incdir = sqlite_libdir = None
sqlite_inc_paths = [ '/usr/include',
'/usr/include/sqlite',
'/usr/include/sqlite3',
'/usr/local/include',
'/usr/local/include/sqlite',
'/usr/local/include/sqlite3',
]
MIN_SQLITE_VERSION_NUMBER = (3, 0, 8)
MIN_SQLITE_VERSION = ".".join([str(x)
for x in MIN_SQLITE_VERSION_NUMBER])
# Scan the default include directories before the SQLite specific
# ones. This allows one to override the copy of sqlite on OSX,
# where /usr/include contains an old version of sqlite.
if sys.platform == 'darwin':
sysroot = macosx_sdk_root()
for d in inc_dirs + sqlite_inc_paths:
f = os.path.join(d, "sqlite3.h")
if sys.platform == 'darwin' and is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "sqlite3.h")
if os.path.exists(f):
if sqlite_setup_debug: print "sqlite: found %s"%f
incf = open(f).read()
m = re.search(
r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"(.*)"', incf)
if m:
sqlite_version = m.group(1)
sqlite_version_tuple = tuple([int(x)
for x in sqlite_version.split(".")])
if sqlite_version_tuple >= MIN_SQLITE_VERSION_NUMBER:
# we win!
if sqlite_setup_debug:
print "%s/sqlite3.h: version %s"%(d, sqlite_version)
sqlite_incdir = d
break
else:
if sqlite_setup_debug:
print "%s: version %d is too old, need >= %s"%(d,
sqlite_version, MIN_SQLITE_VERSION)
elif sqlite_setup_debug:
print "sqlite: %s had no SQLITE_VERSION"%(f,)
if sqlite_incdir:
sqlite_dirs_to_check = [
os.path.join(sqlite_incdir, '..', 'lib64'),
os.path.join(sqlite_incdir, '..', 'lib'),
os.path.join(sqlite_incdir, '..', '..', 'lib64'),
os.path.join(sqlite_incdir, '..', '..', 'lib'),
]
sqlite_libfile = self.compiler.find_library_file(
sqlite_dirs_to_check + lib_dirs, 'sqlite3')
if sqlite_libfile:
sqlite_libdir = [os.path.abspath(os.path.dirname(sqlite_libfile))]
sqlite_incdir = '../jni/sqlite3'
sqlite_libdir = ['../obj/local/'+os.environ['ARCH']]
if sqlite_incdir and sqlite_libdir:
sqlite_srcs = ['_sqlite/cache.c',
'_sqlite/connection.c',
'_sqlite/cursor.c',
'_sqlite/microprotocols.c',
'_sqlite/module.c',
'_sqlite/prepare_protocol.c',
'_sqlite/row.c',
'_sqlite/statement.c',
'_sqlite/util.c', ]
sqlite_defines = []
if sys.platform != "win32":
sqlite_defines.append(('MODULE_NAME', '"sqlite3"'))
else:
sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"'))
# Comment this out if you want the sqlite3 module to be able to load extensions.
sqlite_defines.append(("SQLITE_OMIT_LOAD_EXTENSION", "1"))
if sys.platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a statically linked custom sqlite gets picked up
# before the dynamic library in /usr/lib.
sqlite_extra_link_args = ('-Wl,-search_paths_first',)
else:
sqlite_extra_link_args = ()
exts.append(Extension('_sqlite3', sqlite_srcs,
define_macros=sqlite_defines,
include_dirs=["Modules/_sqlite",
sqlite_incdir],
library_dirs=sqlite_libdir,
runtime_library_dirs=sqlite_libdir,
extra_link_args=sqlite_extra_link_args,
libraries=["sqlite3",]))
else:
missing.append('_sqlite3')
# Look for Berkeley db 1.85. Note that it is built as a different
# module name so it can be included even when later versions are
# available. A very restrictive search is performed to avoid
# accidentally building this module with a later version of the
# underlying db library. May BSD-ish Unixes incorporate db 1.85
# symbols into libc and place the include file in /usr/include.
#
# If the better bsddb library can be built (db_incs is defined)
# we do not build this one. Otherwise this build will pick up
# the more recent berkeleydb's db.h file first in the include path
# when attempting to compile and it will fail.
f = "/usr/include/db.h"
if sys.platform == 'darwin':
if is_macosx_sdk_path(f):
sysroot = macosx_sdk_root()
f = os.path.join(sysroot, f[1:])
if os.path.exists(f) and not db_incs:
data = open(f).read()
m = re.search(r"#s*define\s+HASHVERSION\s+2\s*", data)
if m is not None:
# bingo - old version used hash file format version 2
### XXX this should be fixed to not be platform-dependent
### but I don't have direct access to an osf1 platform and
### seemed to be muffing the search somehow
libraries = platform == "osf1" and ['db'] or None
if libraries is not None:
exts.append(Extension('bsddb185', ['bsddbmodule.c'],
libraries=libraries))
else:
exts.append(Extension('bsddb185', ['bsddbmodule.c']))
else:
missing.append('bsddb185')
else:
missing.append('bsddb185')
dbm_order = ['gdbm']
# The standard Unix dbm module:
if platform not in ['cygwin']:
config_args = [arg.strip("'")
for arg in sysconfig.get_config_var("CONFIG_ARGS").split()]
dbm_args = [arg for arg in config_args
if arg.startswith('--with-dbmliborder=')]
if dbm_args:
dbm_order = [arg.split('=')[-1] for arg in dbm_args][-1].split(":")
else:
dbm_order = "ndbm:gdbm:bdb".split(":")
dbmext = None
for cand in dbm_order:
if cand == "ndbm":
if find_file("ndbm.h", inc_dirs, []) is not None:
# Some systems have -lndbm, others don't
if self.compiler.find_library_file(lib_dirs,
'ndbm'):
ndbm_libs = ['ndbm']
else:
ndbm_libs = []
print "building dbm using ndbm"
dbmext = Extension('dbm', ['dbmmodule.c'],
define_macros=[
('HAVE_NDBM_H',None),
],
libraries=ndbm_libs)
break
elif cand == "gdbm":
if self.compiler.find_library_file(lib_dirs, 'gdbm'):
gdbm_libs = ['gdbm']
if self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
gdbm_libs.append('gdbm_compat')
if find_file("gdbm/ndbm.h", inc_dirs, []) is not None:
print "building dbm using gdbm"
dbmext = Extension(
'dbm', ['dbmmodule.c'],
define_macros=[
('HAVE_GDBM_NDBM_H', None),
],
libraries = gdbm_libs)
break
if find_file("gdbm-ndbm.h", inc_dirs, []) is not None:
print "building dbm using gdbm"
dbmext = Extension(
'dbm', ['dbmmodule.c'],
define_macros=[
('HAVE_GDBM_DASH_NDBM_H', None),
],
libraries = gdbm_libs)
break
elif cand == "bdb":
if db_incs is not None:
print "building dbm using bdb"
dbmext = Extension('dbm', ['dbmmodule.c'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
define_macros=[
('HAVE_BERKDB_H', None),
('DB_DBM_HSEARCH', None),
],
libraries=dblibs)
break
if dbmext is not None:
exts.append(dbmext)
else:
missing.append('dbm')
# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm:
if ('gdbm' in dbm_order and
self.compiler.find_library_file(lib_dirs, 'gdbm')):
exts.append( Extension('gdbm', ['gdbmmodule.c'],
libraries = ['gdbm'] ) )
else:
missing.append('gdbm')
# Unix-only modules
if platform not in ['win32']:
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
if platform not in ['atheos']:
exts.append( Extension('resource', ['resource.c']) )
else:
missing.append('resource')
# Sun yellow pages. Some systems have the functions in libc.
if (platform not in ['cygwin', 'atheos', 'qnx6'] and
find_file('rpcsvc/yp_prot.h', inc_dirs, []) is not None):
if (self.compiler.find_library_file(lib_dirs, 'nsl')):
libs = ['nsl']
else:
libs = []
exts.append( Extension('nis', ['nismodule.c'],
libraries = libs) )
else:
missing.append('nis')
else:
missing.extend(['nis', 'resource', 'termios'])
# Curses support, requiring the System V version of curses, often
# provided by the ncurses library.
panel_library = 'panel'
if curses_library.startswith('ncurses'):
if curses_library == 'ncursesw':
# Bug 1464056: If _curses.so links with ncursesw,
# _curses_panel.so must link with panelw.
panel_library = 'panelw'
curses_libs = [curses_library]
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
elif curses_library == 'curses' and platform != 'darwin':
# OSX has an old Berkeley curses, not good enough for
# the _curses module.
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
curses_libs = ['curses', 'terminfo']
elif (self.compiler.find_library_file(lib_dirs, 'termcap')):
curses_libs = ['curses', 'termcap']
else:
curses_libs = ['curses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
else:
missing.append('_curses')
# If the curses module is enabled, check for the panel module
if (module_enabled(exts, '_curses') and
self.compiler.find_library_file(lib_dirs, panel_library)):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
libraries = [panel_library] + curses_libs) )
else:
missing.append('_curses_panel')
# Andrew Kuchling's zlib module. Note that some versions of zlib
# 1.1.3 have security problems. See CERT Advisory CA-2002-07:
# http://www.cert.org/advisories/CA-2002-07.html
#
# zlib 1.1.4 is fixed, but at least one vendor (RedHat) has decided to
# patch its zlib 1.1.3 package instead of upgrading to 1.1.4. For
# now, we still accept 1.1.3, because we think it's difficult to
# exploit this in Python, and we'd rather make it RedHat's problem
# than our problem <wink>.
#
# You can upgrade zlib to version 1.1.4 yourself by going to
# http://www.gzip.org/zlib/
zlib_inc = [os.path.join(os.environ['NDKPLATFORM'], 'usr/include')]
zlib_libdir = [os.path.join(os.environ['NDKPLATFORM'], 'usr/lib')]
have_zlib = False
if zlib_inc is not None:
zlib_h = zlib_inc[0] + '/zlib.h'
version = '"0.0.0"'
version_req = '"1.1.3"'
fp = open(zlib_h)
while 1:
line = fp.readline()
if not line:
break
if line.startswith('#define ZLIB_VERSION'):
version = line.split()[2]
break
if version >= version_req:
if (self.compiler.find_library_file(zlib_libdir, 'z')):
if sys.platform == "darwin":
zlib_extra_link_args = ('-Wl,-search_paths_first',)
else:
zlib_extra_link_args = ()
exts.append( Extension('zlib', ['zlibmodule.c'],
library_dirs=zlib_libdir,
libraries = ['z'],
extra_link_args = zlib_extra_link_args))
have_zlib = True
else:
missing.append('zlib')
else:
missing.append('zlib')
else:
missing.append('zlib')
# Helper module for various ascii-encoders. Uses zlib for an optimized
# crc32 if we have it. Otherwise binascii uses its own.
if have_zlib:
extra_compile_args = ['-DUSE_ZLIB_CRC32']
libraries = ['z']
extra_link_args = zlib_extra_link_args
else:
extra_compile_args = []
libraries = []
extra_link_args = []
exts.append( Extension('binascii', ['binascii.c'],
extra_compile_args = extra_compile_args,
libraries = libraries,
extra_link_args = extra_link_args) )
# Gustavo Niemeyer's bz2 module.
if (self.compiler.find_library_file(lib_dirs, 'bz2')):
if sys.platform == "darwin":
bz2_extra_link_args = ('-Wl,-search_paths_first',)
else:
bz2_extra_link_args = ()
exts.append( Extension('bz2', ['bz2module.c'],
libraries = ['bz2'],
extra_link_args = bz2_extra_link_args) )
else:
missing.append('bz2')
# Interface to the Expat XML parser
#
# Expat was written by James Clark and is now maintained by a group of
# developers on SourceForge; see www.libexpat.org for more information.
# The pyexpat module was written by Paul Prescod after a prototype by
# Jack Jansen. The Expat source is included in Modules/expat/. Usage
# of a system shared libexpat.so is possible with --with-system-expat
# configure option.
#
# More information on Expat can be found at www.libexpat.org.
#
if '--with-system-expat' in sysconfig.get_config_var("CONFIG_ARGS"):
expat_inc = []
define_macros = []
expat_lib = ['expat']
expat_sources = []
else:
expat_inc = [os.path.join(os.getcwd(), srcdir, 'Modules', 'expat')]
define_macros = [
('HAVE_EXPAT_CONFIG_H', '1'),
]
expat_lib = []
expat_sources = ['expat/xmlparse.c',
'expat/xmlrole.c',
'expat/xmltok.c']
exts.append(Extension('pyexpat',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['pyexpat.c'] + expat_sources
))
# Fredrik Lundh's cElementTree module. Note that this also
# uses expat (via the CAPI hook in pyexpat).
if os.path.isfile(os.path.join(srcdir, 'Modules', '_elementtree.c')):
define_macros.append(('USE_PYEXPAT_CAPI', None))
exts.append(Extension('_elementtree',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['_elementtree.c'],
))
else:
missing.append('_elementtree')
# Hye-Shik Chang's CJKCodecs modules.
if have_unicode:
exts.append(Extension('_multibytecodec',
['cjkcodecs/multibytecodec.c']))
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
exts.append(Extension('_codecs_%s' % loc,
['cjkcodecs/_codecs_%s.c' % loc]))
else:
missing.append('_multibytecodec')
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
missing.append('_codecs_%s' % loc)
# Dynamic loading module
if sys.maxint == 0x7fffffff:
# This requires sizeof(int) == sizeof(long) == sizeof(char*)
dl_inc = find_file('dlfcn.h', [], inc_dirs)
if (dl_inc is not None) and (platform not in ['atheos']):
exts.append( Extension('dl', ['dlmodule.c']) )
else:
missing.append('dl')
else:
missing.append('dl')
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
# Richard Oudkerk's multiprocessing module
if platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif platform == 'darwin': # Mac OSX
macros = dict()
libraries = []
elif platform == 'cygwin': # Cygwin
macros = dict()
libraries = []
elif platform in ('freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'freebsd8'):
# FreeBSD's P1003.1b semaphore support is very experimental
# and has many known problems. (as of June 2008)
macros = dict()
libraries = []
elif platform.startswith('openbsd'):
macros = dict()
libraries = []
elif platform.startswith('netbsd'):
macros = dict()
libraries = []
else: # Linux and other unices
macros = dict()
libraries = ['rt']
if platform == 'win32':
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/semaphore.c',
'_multiprocessing/pipe_connection.c',
'_multiprocessing/socket_connection.c',
'_multiprocessing/win32_functions.c'
]
else:
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/socket_connection.c'
]
if (sysconfig.get_config_var('HAVE_SEM_OPEN') and not
sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')):
multiprocessing_srcs.append('_multiprocessing/semaphore.c')
if sysconfig.get_config_var('WITH_THREAD'):
exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
define_macros=macros.items(),
include_dirs=["Modules/_multiprocessing"]))
else:
missing.append('_multiprocessing')
# End multiprocessing
# Platform-specific libraries
if platform == 'linux2':
# Linux-specific modules
exts.append( Extension('linuxaudiodev', ['linuxaudiodev.c']) )
else:
missing.append('linuxaudiodev')
if (platform in ('linux2', 'freebsd4', 'freebsd5', 'freebsd6',
'freebsd7', 'freebsd8')
or platform.startswith("gnukfreebsd")):
exts.append( Extension('ossaudiodev', ['ossaudiodev.c']) )
else:
missing.append('ossaudiodev')
if platform == 'sunos5':
# SunOS specific modules
exts.append( Extension('sunaudiodev', ['sunaudiodev.c']) )
else:
missing.append('sunaudiodev')
if platform == 'darwin':
# _scproxy
exts.append(Extension("_scproxy", [os.path.join(srcdir, "Mac/Modules/_scproxy.c")],
extra_link_args= [
'-framework', 'SystemConfiguration',
'-framework', 'CoreFoundation'
]))
if platform == 'darwin' and ("--disable-toolbox-glue" not in
sysconfig.get_config_var("CONFIG_ARGS")):
if int(os.uname()[2].split('.')[0]) >= 8:
# We're on Mac OS X 10.4 or later, the compiler should
# support '-Wno-deprecated-declarations'. This will
# surpress deprecation warnings for the Carbon extensions,
# these extensions wrap the Carbon APIs and even those
# parts that are deprecated.
carbon_extra_compile_args = ['-Wno-deprecated-declarations']
else:
carbon_extra_compile_args = []
# Mac OS X specific modules.
def macSrcExists(name1, name2=''):
if not name1:
return None
names = (name1,)
if name2:
names = (name1, name2)
path = os.path.join(srcdir, 'Mac', 'Modules', *names)
return os.path.exists(path)
def addMacExtension(name, kwds, extra_srcs=[]):
dirname = ''
if name[0] == '_':
dirname = name[1:].lower()
cname = name + '.c'
cmodulename = name + 'module.c'
# Check for NNN.c, NNNmodule.c, _nnn/NNN.c, _nnn/NNNmodule.c
if macSrcExists(cname):
srcs = [cname]
elif macSrcExists(cmodulename):
srcs = [cmodulename]
elif macSrcExists(dirname, cname):
# XXX(nnorwitz): If all the names ended with module, we
# wouldn't need this condition. ibcarbon is the only one.
srcs = [os.path.join(dirname, cname)]
elif macSrcExists(dirname, cmodulename):
srcs = [os.path.join(dirname, cmodulename)]
else:
raise RuntimeError("%s not found" % name)
# Here's the whole point: add the extension with sources
exts.append(Extension(name, srcs + extra_srcs, **kwds))
# Core Foundation
core_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework', 'CoreFoundation'],
}
addMacExtension('_CF', core_kwds, ['cf/pycfbridge.c'])
addMacExtension('autoGIL', core_kwds)
# Carbon
carbon_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework', 'Carbon'],
}
CARBON_EXTS = ['ColorPicker', 'gestalt', 'MacOS', 'Nav',
'OSATerminology', 'icglue',
# All these are in subdirs
'_AE', '_AH', '_App', '_CarbonEvt', '_Cm', '_Ctl',
'_Dlg', '_Drag', '_Evt', '_File', '_Folder', '_Fm',
'_Help', '_Icn', '_IBCarbon', '_List',
'_Menu', '_Mlte', '_OSA', '_Res', '_Qd', '_Qdoffs',
'_Scrap', '_Snd', '_TE',
]
for name in CARBON_EXTS:
addMacExtension(name, carbon_kwds)
# Workaround for a bug in the version of gcc shipped with Xcode 3.
# The _Win extension should build just like the other Carbon extensions, but
# this actually results in a hard crash of the linker.
#
if '-arch ppc64' in cflags and '-arch ppc' in cflags:
win_kwds = {'extra_compile_args': carbon_extra_compile_args + ['-arch', 'i386', '-arch', 'ppc'],
'extra_link_args': ['-framework', 'Carbon', '-arch', 'i386', '-arch', 'ppc'],
}
addMacExtension('_Win', win_kwds)
else:
addMacExtension('_Win', carbon_kwds)
# Application Services & QuickTime
app_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework','ApplicationServices'],
}
addMacExtension('_Launch', app_kwds)
addMacExtension('_CG', app_kwds)
exts.append( Extension('_Qt', ['qt/_Qtmodule.c'],
extra_compile_args=carbon_extra_compile_args,
extra_link_args=['-framework', 'QuickTime',
'-framework', 'Carbon']) )
self.extensions.extend(exts)
# Call the method for detecting whether _tkinter can be compiled
self.detect_tkinter(inc_dirs, lib_dirs)
if '_tkinter' not in [e.name for e in self.extensions]:
missing.append('_tkinter')
return missing
def detect_tkinter_darwin(self, inc_dirs, lib_dirs):
# The _tkinter module, using frameworks. Since frameworks are quite
# different the UNIX search logic is not sharable.
from os.path import join, exists
framework_dirs = [
'/Library/Frameworks',
'/System/Library/Frameworks/',
join(os.getenv('HOME'), '/Library/Frameworks')
]
sysroot = macosx_sdk_root()
# Find the directory that contains the Tcl.framework and Tk.framework
# bundles.
# XXX distutils should support -F!
for F in framework_dirs:
# both Tcl.framework and Tk.framework should be present
for fw in 'Tcl', 'Tk':
if is_macosx_sdk_path(F):
if not exists(join(sysroot, F[1:], fw + '.framework')):
break
else:
if not exists(join(F, fw + '.framework')):
break
else:
# ok, F is now directory with both frameworks. Continure
# building
break
else:
# Tk and Tcl frameworks not found. Normal "unix" tkinter search
# will now resume.
return 0
# For 8.4a2, we must add -I options that point inside the Tcl and Tk
# frameworks. In later release we should hopefully be able to pass
# the -F option to gcc, which specifies a framework lookup path.
#
include_dirs = [
join(F, fw + '.framework', H)
for fw in 'Tcl', 'Tk'
for H in 'Headers', 'Versions/Current/PrivateHeaders'
]
# For 8.4a2, the X11 headers are not included. Rather than include a
# complicated search, this is a hard-coded path. It could bail out
# if X11 libs are not found...
include_dirs.append('/usr/X11R6/include')
frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
# All existing framework builds of Tcl/Tk don't support 64-bit
# architectures.
cflags = sysconfig.get_config_vars('CFLAGS')[0]
archs = re.findall('-arch\s+(\w+)', cflags)
if is_macosx_sdk_path(F):
fp = os.popen("file %s/Tk.framework/Tk | grep 'for architecture'"%(os.path.join(sysroot, F[1:]),))
else:
fp = os.popen("file %s/Tk.framework/Tk | grep 'for architecture'"%(F,))
detected_archs = []
for ln in fp:
a = ln.split()[-1]
if a in archs:
detected_archs.append(ln.split()[-1])
fp.close()
for a in detected_archs:
frameworks.append('-arch')
frameworks.append(a)
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
include_dirs = include_dirs,
libraries = [],
extra_compile_args = frameworks[2:],
extra_link_args = frameworks,
)
self.extensions.append(ext)
return 1
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
# Rather than complicate the code below, detecting and building
# AquaTk is a separate method. Only one Tkinter will be built on
# Darwin - either AquaTk, if it is found, or X11 based Tk.
platform = self.get_platform()
if (platform == 'darwin' and
self.detect_tkinter_darwin(inc_dirs, lib_dirs)):
return
# Assume we haven't found any of the libraries or include files
# The versions with dots are used on Unix, and the versions without
# dots on Windows, for detection by cygwin.
tcllib = tklib = tcl_includes = tk_includes = None
for version in ['8.6', '86', '8.5', '85', '8.4', '84', '8.3', '83',
'8.2', '82', '8.1', '81', '8.0', '80']:
tklib = self.compiler.find_library_file(lib_dirs,
'tk' + version)
tcllib = self.compiler.find_library_file(lib_dirs,
'tcl' + version)
if tklib and tcllib:
# Exit the loop when we've found the Tcl/Tk libraries
break
# Now check for the header files
if tklib and tcllib:
# Check for the include files on Debian and {Free,Open}BSD, where
# they're put in /usr/include/{tcl,tk}X.Y
dotversion = version
if '.' not in dotversion and "bsd" in sys.platform.lower():
# OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a,
# but the include subdirs are named like .../include/tcl8.3.
dotversion = dotversion[:-1] + '.' + dotversion[-1]
tcl_include_sub = []
tk_include_sub = []
for dir in inc_dirs:
tcl_include_sub += [dir + os.sep + "tcl" + dotversion]
tk_include_sub += [dir + os.sep + "tk" + dotversion]
tk_include_sub += tcl_include_sub
tcl_includes = find_file('tcl.h', inc_dirs, tcl_include_sub)
tk_includes = find_file('tk.h', inc_dirs, tk_include_sub)
if (tcllib is None or tklib is None or
tcl_includes is None or tk_includes is None):
self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2)
return
# OK... everything seems to be present for Tcl/Tk.
include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = []
for dir in tcl_includes + tk_includes:
if dir not in include_dirs:
include_dirs.append(dir)
# Check for various platform-specific directories
if platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
elif os.path.exists('/usr/X11R6/include'):
include_dirs.append('/usr/X11R6/include')
added_lib_dirs.append('/usr/X11R6/lib64')
added_lib_dirs.append('/usr/X11R6/lib')
elif os.path.exists('/usr/X11R5/include'):
include_dirs.append('/usr/X11R5/include')
added_lib_dirs.append('/usr/X11R5/lib')
else:
# Assume default location for X11
include_dirs.append('/usr/X11/include')
added_lib_dirs.append('/usr/X11/lib')
# If Cygwin, then verify that X is installed before proceeding
if platform == 'cygwin':
x11_inc = find_file('X11/Xlib.h', [], include_dirs)
if x11_inc is None:
return
# Check for BLT extension
if self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT8.0'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT8.0')
elif self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT')
# Add the Tcl/Tk libraries
libs.append('tk'+ version)
libs.append('tcl'+ version)
if platform in ['aix3', 'aix4']:
libs.append('ld')
# Finally, link with the X11 libraries (not appropriate on cygwin)
if platform != "cygwin":
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)] + defs,
include_dirs = include_dirs,
libraries = libs,
library_dirs = added_lib_dirs,
)
self.extensions.append(ext)
## # Uncomment these lines if you want to play with xxmodule.c
## ext = Extension('xx', ['xxmodule.c'])
## self.extensions.append(ext)
# XXX handle these, but how to detect?
# *** Uncomment and edit for PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
# *** Uncomment and edit for TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment these for TOGL extension only:
# -lGL -lGLU -lXext -lXmu \
def configure_ctypes_darwin(self, ext):
# Darwin (OS X) uses preconfigured files, in
# the Modules/_ctypes/libffi_osx directory.
srcdir = sysconfig.get_config_var('srcdir')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi_osx'))
sources = [os.path.join(ffi_srcdir, p)
for p in ['ffi.c',
'x86/darwin64.S',
'x86/x86-darwin.S',
'x86/x86-ffi_darwin.c',
'x86/x86-ffi64.c',
'powerpc/ppc-darwin.S',
'powerpc/ppc-darwin_closure.S',
'powerpc/ppc-ffi_darwin.c',
'powerpc/ppc64-darwin_closure.S',
]]
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_srcdir, 'include'),
os.path.join(ffi_srcdir, 'powerpc')]
ext.include_dirs.extend(include_dirs)
ext.sources.extend(sources)
return True
def configure_ctypes(self, ext):
if not self.use_system_libffi:
if sys.platform == 'darwin':
return self.configure_ctypes_darwin(ext)
srcdir = sysconfig.get_config_var('srcdir')
ffi_builddir = os.path.join(self.build_temp, 'libffi')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi'))
ffi_configfile = os.path.join(ffi_builddir, 'fficonfig.py')
from distutils.dep_util import newer_group
config_sources = [os.path.join(ffi_srcdir, fname)
for fname in os.listdir(ffi_srcdir)
if os.path.isfile(os.path.join(ffi_srcdir, fname))]
if self.force or newer_group(config_sources,
ffi_configfile):
from distutils.dir_util import mkpath
mkpath(ffi_builddir)
config_args = []
# Pass empty CFLAGS because we'll just append the resulting
# CFLAGS to Python's; -g or -O2 is to be avoided.
if self.cross_compile:
cmd = "cd %s && env CFLAGS='' %s/configure --host=%s --build=%s %s" \
% (ffi_builddir, ffi_srcdir,
os.environ.get('HOSTARCH'),
os.environ.get('BUILDARCH'),
" ".join(config_args))
else:
cmd = "cd %s && env CFLAGS='' '%s/configure' %s" \
% (ffi_builddir, ffi_srcdir, " ".join(config_args))
res = os.system(cmd)
if res or not os.path.exists(ffi_configfile):
print "Failed to configure _ctypes module"
return False
fficonfig = {}
with open(ffi_configfile) as f:
exec f in fficonfig
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_builddir, 'include'),
ffi_builddir,
os.path.join(ffi_srcdir, 'src')]
extra_compile_args = fficonfig['ffi_cflags'].split()
ext.sources.extend(os.path.join(ffi_srcdir, f) for f in
fficonfig['ffi_sources'])
ext.include_dirs.extend(include_dirs)
ext.extra_compile_args.extend(extra_compile_args)
return True
def detect_ctypes(self, inc_dirs, lib_dirs):
self.use_system_libffi = False
include_dirs = []
extra_compile_args = []
extra_link_args = []
sources = ['_ctypes/_ctypes.c',
'_ctypes/callbacks.c',
'_ctypes/callproc.c',
'_ctypes/stgdict.c',
'_ctypes/cfield.c']
depends = ['_ctypes/ctypes.h']
if sys.platform == 'darwin':
sources.append('_ctypes/malloc_closure.c')
sources.append('_ctypes/darwin/dlfcn_simple.c')
extra_compile_args.append('-DMACOSX')
include_dirs.append('_ctypes/darwin')
# XXX Is this still needed?
## extra_link_args.extend(['-read_only_relocs', 'warning'])
elif sys.platform == 'sunos5':
# XXX This shouldn't be necessary; it appears that some
# of the assembler code is non-PIC (i.e. it has relocations
# when it shouldn't. The proper fix would be to rewrite
# the assembler code to be PIC.
# This only works with GCC; the Sun compiler likely refuses
# this option. If you want to compile ctypes with the Sun
# compiler, please research a proper solution, instead of
# finding some -z option for the Sun compiler.
extra_link_args.append('-mimpure-text')
elif sys.platform.startswith('hp-ux'):
extra_link_args.append('-fPIC')
ext = Extension('_ctypes',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=[],
sources=sources,
depends=depends)
ext_test = Extension('_ctypes_test',
sources=['_ctypes/_ctypes_test.c'])
self.extensions.extend([ext, ext_test])
if not '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS"):
return
if sys.platform == 'darwin':
# OS X 10.5 comes with libffi.dylib; the include files are
# in /usr/include/ffi
inc_dirs.append('/usr/include/ffi')
ffi_inc = [sysconfig.get_config_var("LIBFFI_INCLUDEDIR")]
if not ffi_inc or ffi_inc[0] == '':
ffi_inc = find_file('ffi.h', [], inc_dirs)
if ffi_inc is not None:
ffi_h = ffi_inc[0] + '/ffi.h'
fp = open(ffi_h)
while 1:
line = fp.readline()
if not line:
ffi_inc = None
break
if line.startswith('#define LIBFFI_H'):
break
ffi_lib = None
if ffi_inc is not None:
for lib_name in ('ffi_convenience', 'ffi_pic', 'ffi'):
if (self.compiler.find_library_file(lib_dirs, lib_name)):
ffi_lib = lib_name
break
if ffi_inc and ffi_lib:
ext.include_dirs.extend(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = True
class PyBuildInstall(install):
# Suppress the warning about installation into the lib_dynload
# directory, which is not in sys.path when running Python during
# installation:
def initialize_options (self):
install.initialize_options(self)
self.warn_dir=0
class PyBuildInstallLib(install_lib):
# Do exactly what install_lib does but make sure correct access modes get
# set on installed directories and files. All installed files with get
# mode 644 unless they are a shared library in which case they will get
# mode 755. All installed directories will get mode 755.
so_ext = sysconfig.get_config_var("SO")
def install(self):
outfiles = install_lib.install(self)
self.set_file_modes(outfiles, 0644, 0755)
self.set_dir_modes(self.install_dir, 0755)
return outfiles
def set_file_modes(self, files, defaultMode, sharedLibMode):
if not self.is_chmod_supported(): return
if not files: return
for filename in files:
if os.path.islink(filename): continue
mode = defaultMode
if filename.endswith(self.so_ext): mode = sharedLibMode
log.info("changing mode of %s to %o", filename, mode)
if not self.dry_run: os.chmod(filename, mode)
def set_dir_modes(self, dirname, mode):
if not self.is_chmod_supported(): return
os.path.walk(dirname, self.set_dir_modes_visitor, mode)
def set_dir_modes_visitor(self, mode, dirname, names):
if os.path.islink(dirname): return
log.info("changing mode of %s to %o", dirname, mode)
if not self.dry_run: os.chmod(dirname, mode)
def is_chmod_supported(self):
return hasattr(os, 'chmod')
SUMMARY = """
Python is an interpreted, interactive, object-oriented programming
language. It is often compared to Tcl, Perl, Scheme or Java.
Python combines remarkable power with very clear syntax. It has
modules, classes, exceptions, very high level dynamic data types, and
dynamic typing. There are interfaces to many system calls and
libraries, as well as to various windowing systems (X11, Motif, Tk,
Mac, MFC). New built-in modules are easily written in C or C++. Python
is also usable as an extension language for applications that need a
programmable interface.
The Python implementation is portable: it runs on many brands of UNIX,
on Windows, DOS, OS/2, Mac, Amiga... If your favorite system isn't
listed here, it may still be supported, if there's a C compiler for
it. Ask around on comp.lang.python -- or just try compiling Python
yourself.
"""
CLASSIFIERS = """
Development Status :: 6 - Mature
License :: OSI Approved :: Python Software Foundation License
Natural Language :: English
Programming Language :: C
Programming Language :: Python
Topic :: Software Development
"""
def main():
# turn off warnings when deprecated modules are imported
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
setup(# PyPI Metadata (PEP 301)
name = "Python",
version = sys.version.split()[0],
url = "http://www.python.org/%s" % sys.version[:3],
maintainer = "Guido van Rossum and the Python community",
maintainer_email = "python-dev@python.org",
description = "A high-level object-oriented programming language",
long_description = SUMMARY.strip(),
license = "PSF license",
classifiers = filter(None, CLASSIFIERS.split("\n")),
platforms = ["Many"],
# Build info
cmdclass = {'build_ext':PyBuildExt, 'install':PyBuildInstall,
'install_lib':PyBuildInstallLib},
# The struct module is defined here, because build_ext won't be
# called unless there's at least one extension module defined.
ext_modules=[Extension('_struct', ['_struct.c'])],
# Scripts to install
scripts = ['Tools/scripts/pydoc', 'Tools/scripts/idle',
'Tools/scripts/2to3',
'Lib/smtpd.py']
)
# --install-platlib
if __name__ == '__main__':
main()
| jyio/botbrew | cookbook/python2.7/setup.py | Python | mit | 92,206 | [
"VisIt"
] | 945e3f929c808cdf0016e4b36fa191fb7ebf18b322a624481251e244ba7cfacd |
from twilio.rest import TwilioRestClient
# To find these visit https://www.twilio.com/user/account
ACCOUNT_SID = "AC388d08c36de5ff26c2fd8acacbb8aad8"
AUTH_TOKEN = "b308a23efb875b24021dedfd16b760cf"
client = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN)
def send_message(message_body, phone):
message = client.messages.create(
body=message_body+" You can reach them at 412-961-5899", # Message body, if any
to=phone,
from_="+16307565499",
)
| Shashank-Ojha/MakeBank | send_messages.py | Python | mit | 472 | [
"VisIt"
] | 4fbca312740d6c49912050dabdd5f28c72cfd52e915ef3c20c80955ef9d156c8 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class resourcemanagerCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'create_folder': ('folder', ),
'create_project': ('project', ),
'create_tag_binding': ('tag_binding', 'validate_only', ),
'create_tag_key': ('tag_key', 'validate_only', ),
'create_tag_value': ('tag_value', 'validate_only', ),
'delete_folder': ('name', ),
'delete_project': ('name', ),
'delete_tag_binding': ('name', ),
'delete_tag_key': ('name', 'validate_only', 'etag', ),
'delete_tag_value': ('name', 'validate_only', 'etag', ),
'get_folder': ('name', ),
'get_iam_policy': ('resource', 'options', ),
'get_organization': ('name', ),
'get_project': ('name', ),
'get_tag_key': ('name', ),
'get_tag_value': ('name', ),
'list_folders': ('parent', 'page_size', 'page_token', 'show_deleted', ),
'list_projects': ('parent', 'page_token', 'page_size', 'show_deleted', ),
'list_tag_bindings': ('parent', 'page_size', 'page_token', ),
'list_tag_keys': ('parent', 'page_size', 'page_token', ),
'list_tag_values': ('parent', 'page_size', 'page_token', ),
'move_folder': ('name', 'destination_parent', ),
'move_project': ('name', 'destination_parent', ),
'search_folders': ('page_size', 'page_token', 'query', ),
'search_organizations': ('page_size', 'page_token', 'query', ),
'search_projects': ('query', 'page_token', 'page_size', ),
'set_iam_policy': ('resource', 'policy', ),
'test_iam_permissions': ('resource', 'permissions', ),
'undelete_folder': ('name', ),
'undelete_project': ('name', ),
'update_folder': ('folder', 'update_mask', ),
'update_project': ('project', 'update_mask', ),
'update_tag_key': ('tag_key', 'update_mask', 'validate_only', ),
'update_tag_value': ('tag_value', 'update_mask', 'validate_only', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=resourcemanagerCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the resourcemanager client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| googleapis/python-resource-manager | scripts/fixup_resourcemanager_v3_keywords.py | Python | apache-2.0 | 7,811 | [
"VisIt"
] | 62daa26695a30519b00a9c298c43c7259044f8c99effc8f82a85302bbfdea5ac |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy.special import gammaln
from scipy.optimize import fmin_powell, minimize_scalar
from skbio.stats import subsample_counts
from skbio.util._decorator import experimental
from skbio.diversity._base import (_validate_counts_vector,
_validate_otu_ids_and_tree)
@experimental(as_of="0.4.0")
def berger_parker_d(counts):
r"""Calculate Berger-Parker dominance.
Berger-Parker dominance is defined as the fraction of the sample that
belongs to the most abundant OTU:
.. math::
d = \frac{N_{max}}{N}
where :math:`N_{max}` is defined as the number of individuals in the most
abundant OTU (or any of the most abundant OTUs in the case of ties), and
:math:`N` is defined as the total number of individuals in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Berger-Parker dominance.
Notes
-----
Berger-Parker dominance is defined in [1]_. The implementation here is
based on the description given in the SDR-IV online manual [2]_.
References
----------
.. [1] Berger & Parker (1970). SDR-IV online help.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
return counts.max() / counts.sum()
@experimental(as_of="0.4.0")
def brillouin_d(counts):
r"""Calculate Brillouin index of alpha diversity.
This is calculated as follows:
.. math::
HB = \frac{\ln N!-\sum^s_{i=1}{\ln n_i!}}{N}
where :math:`N` is defined as the total number of individuals in the
sample, :math:`s` is the number of OTUs, and :math:`n_i` is defined as the
number of individuals in the :math:`i^{\text{th}}` OTU.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Brillouin index.
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
nz = counts[counts.nonzero()]
n = nz.sum()
return (gammaln(n + 1) - gammaln(nz + 1).sum()) / n
@experimental(as_of="0.4.0")
def dominance(counts):
r"""Calculate dominance.
Dominance is defined as
.. math::
\sum{p_i^2}
where :math:`p_i` is the proportion of the entire community that OTU
:math:`i` represents.
Dominance can also be defined as 1 - Simpson's index. It ranges between
0 and 1.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Dominance.
See Also
--------
simpson
Notes
-----
The implementation here is based on the description given in [1]_.
References
----------
.. [1] http://folk.uio.no/ohammer/past/diversity.html
"""
counts = _validate_counts_vector(counts)
freqs = counts / counts.sum()
return (freqs * freqs).sum()
@experimental(as_of="0.4.0")
def doubles(counts):
"""Calculate number of double occurrences (doubletons).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Doubleton count.
"""
counts = _validate_counts_vector(counts)
return (counts == 2).sum()
@experimental(as_of="0.4.0")
def enspie(counts):
r"""Calculate ENS_pie alpha diversity measure.
ENS_pie is equivalent to ``1 / dominance``:
.. math::
ENS_{pie} = \frac{1}{\sum_{i=1}^s{p_i^2}}
where :math:`s` is the number of OTUs and :math:`p_i` is the proportion of
the community represented by OTU :math:`i`.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
ENS_pie alpha diversity measure.
See Also
--------
dominance
Notes
-----
ENS_pie is defined in [1]_.
References
----------
.. [1] Chase and Knight (2013). "Scale-dependent effect sizes of ecological
drivers on biodiversity: why standardised sampling is not enough".
Ecology Letters, Volume 16, Issue Supplement s1, pgs 17-26.
"""
counts = _validate_counts_vector(counts)
return 1 / dominance(counts)
@experimental(as_of="0.4.0")
def equitability(counts, base=2):
"""Calculate equitability (Shannon index corrected for number of OTUs).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
base : scalar, optional
Logarithm base to use in the calculations.
Returns
-------
double
Measure of equitability.
See Also
--------
shannon
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
numerator = shannon(counts, base)
denominator = np.log(observed_otus(counts)) / np.log(base)
return numerator / denominator
@experimental(as_of="0.4.0")
def esty_ci(counts):
r"""Calculate Esty's CI.
Esty's CI is defined as
.. math::
F_1/N \pm z\sqrt{W}
where :math:`F_1` is the number of singleton OTUs, :math:`N` is the total
number of individuals (sum of abundances for all OTUs), and :math:`z` is a
constant that depends on the targeted confidence and based on the normal
distribution.
:math:`W` is defined as
.. math::
\frac{F_1(N-F_1)+2NF_2}{N^3}
where :math:`F_2` is the number of doubleton OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
tuple
Esty's confidence interval as ``(lower_bound, upper_bound)``.
Notes
-----
Esty's CI is defined in [1]_. :math:`z` is hardcoded for a 95% confidence
interval.
References
----------
.. [1] Esty, W. W. (1983). "A normal limit law for a nonparametric
estimator of the coverage of a random sample". Ann Statist 11: 905-912.
"""
counts = _validate_counts_vector(counts)
f1 = singles(counts)
f2 = doubles(counts)
n = counts.sum()
z = 1.959963985
W = (f1 * (n - f1) + 2 * n * f2) / (n ** 3)
return f1 / n - z * np.sqrt(W), f1 / n + z * np.sqrt(W)
@experimental(as_of="0.4.0-dev")
def faith_pd(counts, otu_ids, tree, validate=True):
""" Compute Faith's phylogenetic diversity metric (PD)
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
otu_ids: list, np.array
Vector of OTU ids corresponding to tip names in ``tree``. Must be the
same length as ``counts``.
tree: skbio.TreeNode
Tree relating the OTUs in otu_ids. The set of tip names in the tree can
be a superset of ``otu_ids``, but not a subset.
validate: bool, optional
If `False`, validation of the input won't be performed. This step can
be slow, so if validation is run elsewhere it can be disabled here.
However, invalid input data can lead to invalid results, so this step
should not be bypassed all together.
Returns
-------
float
The phylogenetic diversity (PD) of the samples.
Raises
------
ValueError
If ``counts`` and ``otu_ids`` are not equal in length.
MissingNodeError
If an OTU id is provided that does not correspond to a tip in the
tree.
Notes
-----
Faith's phylogenetic diversity, often referred to as PD, was originally
described in [1]_.
This implementation differs from that in PyCogent (and therefore QIIME
versions less than 2.0.0) by imposing a few additional restrictions on the
inputs. First, the input tree must be rooted. In PyCogent, if an unrooted
tree was provided that had a single trifurcating node (a newick convention
for unrooted trees) that node was considered the root of the tree. Next,
all OTU IDs must be tips in the tree. PyCogent would silently ignore OTU
IDs that were not present the tree. To reproduce Faith PD results from
PyCogent with scikit-bio, ensure that your PyCogent Faith PD calculations
are performed on a rooted tree and that all OTU IDs are present in the
tree.
References
----------
.. [1] Faith, D. P. Conservation evaluation and phylogenetic diversity.
Biol. Conserv. (1992).
"""
if validate:
counts = _validate_counts_vector(counts)
_validate_otu_ids_and_tree(counts, otu_ids, tree)
observed_otus = {o: c for o, c in zip(otu_ids, counts) if c >= 1}
observed_nodes = tree.observed_node_counts(observed_otus)
result = sum(o.length for o in observed_nodes if o.length is not None)
return result
@experimental(as_of="0.4.0")
def fisher_alpha(counts):
r"""Calculate Fisher's alpha, a metric of diversity.
Fisher's alpha is estimated by solving the following equation for
:math:`\alpha`:
.. math::
S=\alpha\ln(1+\frac{N}{\alpha})
where :math:`S` is the number of OTUs and :math:`N` is the
total number of individuals in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Fisher's alpha.
Raises
------
RuntimeError
If the optimizer fails to converge (error > 1.0).
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_. Uses ``scipy.optimize.minimize_scalar`` to find
Fisher's alpha.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
n = counts.sum()
s = observed_otus(counts)
def f(alpha):
return (alpha * np.log(1 + (n / alpha)) - s) ** 2
# Temporarily silence RuntimeWarnings (invalid and division by zero) during
# optimization in case invalid input is provided to the objective function
# (e.g. alpha=0).
orig_settings = np.seterr(divide='ignore', invalid='ignore')
try:
alpha = minimize_scalar(f).x
finally:
np.seterr(**orig_settings)
if f(alpha) > 1.0:
raise RuntimeError("Optimizer failed to converge (error > 1.0), so "
"could not compute Fisher's alpha.")
return alpha
@experimental(as_of="0.4.0")
def goods_coverage(counts):
r"""Calculate Good's coverage of counts.
Good's coverage estimator is defined as
.. math::
1-\frac{F_1}{N}
where :math:`F_1` is the number of singleton OTUs and :math:`N` is the
total number of individuals (sum of abundances for all OTUs).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Good's coverage estimator.
"""
counts = _validate_counts_vector(counts)
f1 = singles(counts)
N = counts.sum()
return 1 - (f1 / N)
@experimental(as_of="0.4.0")
def heip_e(counts):
r"""Calculate Heip's evenness measure.
Heip's evenness is defined as:
.. math::
\frac{(e^H-1)}{(S-1)}
where :math:`H` is the Shannon-Wiener entropy of counts (using logarithm
base :math:`e`) and :math:`S` is the number of OTUs in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Heip's evenness measure.
See Also
--------
shannon
Notes
-----
The implementation here is based on the description in [1]_.
References
----------
.. [1] Heip, C. 1974. A new index measuring evenness. J. Mar. Biol. Ass.
UK., 54, 555-557.
"""
counts = _validate_counts_vector(counts)
return ((np.exp(shannon(counts, base=np.e)) - 1) /
(observed_otus(counts) - 1))
@experimental(as_of="0.4.0")
def kempton_taylor_q(counts, lower_quantile=0.25, upper_quantile=0.75):
"""Calculate Kempton-Taylor Q index of alpha diversity.
Estimates the slope of the cumulative abundance curve in the interquantile
range. By default, uses lower and upper quartiles, rounding inwards.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
lower_quantile : float, optional
Lower bound of the interquantile range. Defaults to lower quartile.
upper_quantile : float, optional
Upper bound of the interquantile range. Defaults to upper quartile.
Returns
-------
double
Kempton-Taylor Q index of alpha diversity.
Notes
-----
The index is defined in [1]_. The implementation here is based on the
description given in the SDR-IV online manual [2]_.
The implementation provided here differs slightly from the results given in
Magurran 1998. Specifically, we have 14 in the numerator rather than 15.
Magurran recommends counting half of the OTUs with the same # counts as the
point where the UQ falls and the point where the LQ falls, but the
justification for this is unclear (e.g. if there were a very large # OTUs
that just overlapped one of the quantiles, the results would be
considerably off). Leaving the calculation as-is for now, but consider
changing.
References
----------
.. [1] Kempton, R. A. and Taylor, L. R. (1976) Models and statistics for
species diversity. Nature, 262, 818-820.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
n = len(counts)
lower = int(np.ceil(n * lower_quantile))
upper = int(n * upper_quantile)
sorted_counts = np.sort(counts)
return (upper - lower) / np.log(sorted_counts[upper] /
sorted_counts[lower])
@experimental(as_of="0.4.0")
def margalef(counts):
r"""Calculate Margalef's richness index.
Margalef's D is defined as:
.. math::
D = \frac{(S - 1)}{\ln N}
where :math:`S` is the number of OTUs and :math:`N` is the total number of
individuals in the sample.
Assumes log accumulation.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Margalef's richness index.
Notes
-----
Based on the description in [1]_.
References
----------
.. [1] Magurran, A E 2004. Measuring biological diversity. Blackwell. pp.
76-77.
"""
counts = _validate_counts_vector(counts)
return (observed_otus(counts) - 1) / np.log(counts.sum())
@experimental(as_of="0.4.0")
def mcintosh_d(counts):
r"""Calculate McIntosh dominance index D.
McIntosh dominance index D is defined as:
.. math::
D = \frac{N - U}{N - \sqrt{N}}
where :math:`N` is the total number of individuals in the sample and
:math:`U` is defined as:
.. math::
U = \sqrt{\sum{{n_i}^2}}
where :math:`n_i` is the number of individuals in the :math:`i^{\text{th}}`
OTU.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
McIntosh dominance index D.
See Also
--------
mcintosh_e
Notes
-----
The index was proposed in [1]_. The implementation here is based on the
description given in the SDR-IV online manual [2]_.
References
----------
.. [1] McIntosh, R. P. 1967 An index of diversity and the relation of
certain concepts to diversity. Ecology 48, 1115-1126.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
u = np.sqrt((counts * counts).sum())
n = counts.sum()
return (n - u) / (n - np.sqrt(n))
@experimental(as_of="0.4.0")
def mcintosh_e(counts):
r"""Calculate McIntosh's evenness measure E.
McIntosh evenness measure E is defined as:
.. math::
E = \frac{\sqrt{\sum{n_i^2}}}{\sqrt{((N-S+1)^2 + S -1}}
where :math:`n_i` is the number of individuals in the :math:`i^{\text{th}}`
OTU, :math:`N` is the total number of individuals, and :math:`S` is the
number of OTUs in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
McIntosh evenness measure E.
See Also
--------
mcintosh_d
Notes
-----
The implementation here is based on the description given in [1]_, **NOT**
the one in the SDR-IV online manual, which is wrong.
References
----------
.. [1] Heip & Engels (1974) Comparing Species Diversity and Evenness
Indices. p 560.
"""
counts = _validate_counts_vector(counts)
numerator = np.sqrt((counts * counts).sum())
n = counts.sum()
s = observed_otus(counts)
denominator = np.sqrt((n - s + 1) ** 2 + s - 1)
return numerator / denominator
@experimental(as_of="0.4.0")
def menhinick(counts):
r"""Calculate Menhinick's richness index.
Menhinick's richness index is defined as:
.. math::
D_{Mn} = \frac{S}{\sqrt{N}}
where :math:`S` is the number of OTUs and :math:`N` is the total number of
individuals in the sample.
Assumes square-root accumulation.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Menhinick's richness index.
Notes
-----
Based on the description in [1]_.
References
----------
.. [1] Magurran, A E 2004. Measuring biological diversity. Blackwell. pp.
76-77.
"""
counts = _validate_counts_vector(counts)
return observed_otus(counts) / np.sqrt(counts.sum())
@experimental(as_of="0.4.0")
def michaelis_menten_fit(counts, num_repeats=1, params_guess=None):
r"""Calculate Michaelis-Menten fit to rarefaction curve of observed OTUs.
The Michaelis-Menten equation is defined as:
.. math::
S=\frac{nS_{max}}{n+B}
where :math:`n` is the number of individuals and :math:`S` is the number of
OTUs. This function estimates the :math:`S_{max}` parameter.
The fit is made to datapoints for :math:`n=1,2,...,N`, where :math:`N` is
the total number of individuals (sum of abundances for all OTUs).
:math:`S` is the number of OTUs represented in a random sample of :math:`n`
individuals.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
num_repeats : int, optional
The number of times to perform rarefaction (subsampling without
replacement) at each value of :math:`n`.
params_guess : tuple, optional
Initial guess of :math:`S_{max}` and :math:`B`. If ``None``, default
guess for :math:`S_{max}` is :math:`S` (as :math:`S_{max}` should
be >= :math:`S`) and default guess for :math:`B` is ``round(N / 2)``.
Returns
-------
S_max : double
Estimate of the :math:`S_{max}` parameter in the Michaelis-Menten
equation.
See Also
--------
skbio.stats.subsample_counts
Notes
-----
There is some controversy about how to do the fitting. The ML model given
in [1]_ is based on the assumption that error is roughly proportional to
magnitude of observation, reasonable for enzyme kinetics but not reasonable
for rarefaction data. Here we just do a nonlinear curve fit for the
parameters using least-squares.
References
----------
.. [1] Raaijmakers, J. G. W. 1987 Statistical analysis of the
Michaelis-Menten equation. Biometrics 43, 793-803.
"""
counts = _validate_counts_vector(counts)
n_indiv = counts.sum()
if params_guess is None:
S_max_guess = observed_otus(counts)
B_guess = int(round(n_indiv / 2))
params_guess = (S_max_guess, B_guess)
# observed # of OTUs vs # of individuals sampled, S vs n
xvals = np.arange(1, n_indiv + 1)
ymtx = np.empty((num_repeats, len(xvals)), dtype=int)
for i in range(num_repeats):
ymtx[i] = np.asarray([observed_otus(subsample_counts(counts, n))
for n in xvals], dtype=int)
yvals = ymtx.mean(0)
# Vectors of actual vals y and number of individuals n.
def errfn(p, n, y):
return (((p[0] * n / (p[1] + n)) - y) ** 2).sum()
# Return S_max.
return fmin_powell(errfn, params_guess, ftol=1e-5, args=(xvals, yvals),
disp=False)[0]
@experimental(as_of="0.4.0")
def observed_otus(counts):
"""Calculate the number of distinct OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Distinct OTU count.
"""
counts = _validate_counts_vector(counts)
return (counts != 0).sum()
@experimental(as_of="0.4.0")
def osd(counts):
"""Calculate observed OTUs, singles, and doubles.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
osd : tuple
Observed OTUs, singles, and doubles.
See Also
--------
observed_otus
singles
doubles
Notes
-----
This is a convenience function used by many of the other measures that rely
on these three measures.
"""
counts = _validate_counts_vector(counts)
return observed_otus(counts), singles(counts), doubles(counts)
@experimental(as_of="0.4.0")
def robbins(counts):
r"""Calculate Robbins' estimator for the probability of unobserved outcomes.
Robbins' estimator is defined as:
.. math::
\frac{F_1}{n+1}
where :math:`F_1` is the number of singleton OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Robbins' estimate.
Notes
-----
Robbins' estimator is defined in [1]_. The estimate computed here is for
:math:`n-1` counts, i.e. the x-axis is off by 1.
References
----------
.. [1] Robbins, H. E (1968). Ann. of Stats. Vol 36, pp. 256-257.
"""
counts = _validate_counts_vector(counts)
return singles(counts) / counts.sum()
@experimental(as_of="0.4.0")
def shannon(counts, base=2):
r"""Calculate Shannon entropy of counts, default in bits.
Shannon-Wiener diversity index is defined as:
.. math::
H = -\sum_{i=1}^s\left(p_i\log_2 p_i\right)
where :math:`s` is the number of OTUs and :math:`p_i` is the proportion of
the community represented by OTU :math:`i`.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
base : scalar, optional
Logarithm base to use in the calculations.
Returns
-------
double
Shannon diversity index H.
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_ except that the default logarithm base used here is 2
instead of :math:`e`.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
freqs = counts / counts.sum()
nonzero_freqs = freqs[freqs.nonzero()]
return -(nonzero_freqs * np.log(nonzero_freqs)).sum() / np.log(base)
@experimental(as_of="0.4.0")
def simpson(counts):
r"""Calculate Simpson's index.
Simpson's index is defined as ``1 - dominance``:
.. math::
1 - \sum{p_i^2}
where :math:`p_i` is the proportion of the community represented by OTU
:math:`i`.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Simpson's index.
See Also
--------
dominance
Notes
-----
The implementation here is ``1 - dominance`` as described in [1]_. Other
references (such as [2]_) define Simpson's index as ``1 / dominance``.
References
----------
.. [1] http://folk.uio.no/ohammer/past/diversity.html
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
return 1 - dominance(counts)
@experimental(as_of="0.4.0")
def simpson_e(counts):
r"""Calculate Simpson's evenness measure E.
Simpson's E is defined as
.. math::
E=\frac{1 / D}{S_{obs}}
where :math:`D` is dominance and :math:`S_{obs}` is the number of observed
OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Simpson's evenness measure E.
See Also
--------
dominance
enspie
simpson
Notes
-----
The implementation here is based on the description given in [1]_.
References
----------
.. [1] http://www.tiem.utk.edu/~gross/bioed/bealsmodules/simpsonDI.html
"""
counts = _validate_counts_vector(counts)
return enspie(counts) / observed_otus(counts)
@experimental(as_of="0.4.0")
def singles(counts):
"""Calculate number of single occurrences (singletons).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Singleton count.
"""
counts = _validate_counts_vector(counts)
return (counts == 1).sum()
@experimental(as_of="0.4.0")
def strong(counts):
r"""Calculate Strong's dominance index.
Strong's dominance index is defined as:
.. math::
D_w = max_i[(\frac{b_i}{N})-\frac{i}{S}]
where :math:`b_i` is the sequential cumulative totaling of the
:math:`i^{\text{th}}` OTU abundance values ranked from largest to smallest,
:math:`N` is the total number of individuals in the sample, and
:math:`S` is the number of OTUs in the sample. The expression in brackets
is computed for all OTUs, and :math:`max_i` denotes the maximum value in
brackets for any OTU.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Strong's dominance index (Dw).
Notes
-----
Strong's dominance index is defined in [1]_. The implementation here is
based on the description given in the SDR-IV online manual [2]_.
References
----------
.. [1] Strong, W. L., 2002 Assessing species abundance uneveness within and
between plant communities. Community Ecology, 3, 237-246.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
n = counts.sum()
s = observed_otus(counts)
i = np.arange(1, len(counts) + 1)
sorted_sum = np.sort(counts)[::-1].cumsum()
return (sorted_sum / n - (i / s)).max()
| SamStudio8/scikit-bio | skbio/diversity/alpha/_base.py | Python | bsd-3-clause | 27,424 | [
"scikit-bio"
] | c0a117045d6ab6d17e31bfc86e29dbd52959ef6cb64df0c8b8e88da239269f65 |
#!/usr/bin/env python3
#
# Script to convert SCM AMV trajectory to xyz format trajectory
# by Patrick Melix
# 2020/10
#
# You can import the module and then call .main() or use it as a script
import sys, os, glob
from ase import io
def main(argv):
inFile = argv[0]
outFile = argv[1]
data = []
nAtoms = None
with open(inFile) as f:
for line in f.readlines():
if line.strip() == '':
continue
if 'Geometry' in line:
data.append([])
data[-1].append(line)
with open(outFile,'w') as f:
for frame in data:
f.write(str(len(frame)-1)+'\n')
for i, entry in enumerate(frame):
if i == 0:
f.write("'"+entry.strip()+"'\n")
else:
f.write(entry)
if __name__ == "__main__":
if '-h' in sys.argv[1]:
print("Usage: amv2xyz.py <infile> <outfile>")
sys.exit(0)
main(sys.argv[1:])
| patrickmelix/Python4ChemistryTools | amv2xyz.py | Python | mit | 993 | [
"ASE"
] | 9bfafa1d219f6f15f54c979da134e4944e448729cd9ddb550c16240e05cc0712 |
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import math
import numpy as np
from hyperspy.component import Component
sqrt2pi = math.sqrt(2 * math.pi)
sigma2fwhm = 2 * math.sqrt(2 * math.log(2))
def _estimate_gaussian_parameters(signal, x1, x2, only_current):
axis = signal.axes_manager.signal_axes[0]
i1, i2 = axis.value_range_to_indices(x1, x2)
X = axis.axis[i1:i2]
if only_current is True:
data = signal()[i1:i2]
X_shape = (len(X),)
i = 0
centre_shape = (1,)
else:
i = axis.index_in_array
data_gi = [slice(None), ] * len(signal.data.shape)
data_gi[axis.index_in_array] = slice(i1, i2)
data = signal.data[data_gi]
X_shape = [1, ] * len(signal.data.shape)
X_shape[axis.index_in_array] = data.shape[i]
centre_shape = list(data.shape)
centre_shape[i] = 1
centre = np.sum(X.reshape(X_shape) * data, i) / np.sum(data, i)
sigma = np.sqrt(np.abs(np.sum((X.reshape(X_shape) - centre.reshape(
centre_shape)) ** 2 * data, i) / np.sum(data, i)))
height = data.max(i)
return centre, height, sigma
class Gaussian(Component):
"""Normalized gaussian function component
.. math::
f(x) = \\frac{a}{\\sqrt{2\\pi c^{2}}}exp\\left[-\\frac{\\left(x-b\\right)^{2}}{2c^{2}}\\right]
+------------+-----------+
| Parameter | Attribute |
+------------+-----------+
+------------+-----------+
| a | A |
+------------+-----------+
| b | centre |
+------------+-----------+
| c | sigma |
+------------+-----------+
For convenience the `fwhm` attribute can be used to get and set
the full-with-half-maximum.
See also
--------
hyperspy._components.gaussianhf.GaussianHF
"""
def __init__(self, A=1., sigma=1., centre=0.):
Component.__init__(self, ['A', 'sigma', 'centre'])
self.A.value = A
self.sigma.value = sigma
self.centre.value = centre
self._position = self.centre
# Boundaries
self.A.bmin = 0.
self.A.bmax = None
self.sigma.bmin = None
self.sigma.bmax = None
self.isbackground = False
self.convolved = True
# Gradients
self.A.grad = self.grad_A
self.sigma.grad = self.grad_sigma
self.centre.grad = self.grad_centre
def function(self, x):
A = self.A.value
s = self.sigma.value
c = self.centre.value
return A * (1 / (s * sqrt2pi)) * np.exp(-(x - c)**2 / (2 * s**2))
def grad_A(self, x):
return self.function(x) / self.A.value
def grad_sigma(self, x):
d2 = (x - self.centre.value)**2
s2 = self.sigma.value**2
A = self.A.value
return (d2 * A * np.exp(-d2 / (2 * s2))) / (sqrt2pi * s2**2) - \
(np.exp(-d2 / (2 * s2)) * A) / (sqrt2pi * s2)
def grad_centre(self, x):
d = x - self.centre.value
s = self.sigma.value
A = self.A.value
return (d * np.exp(-d**2 / (2 * s**2)) * A) / (sqrt2pi * s**3)
def estimate_parameters(self, signal, x1, x2, only_current=False):
"""Estimate the gaussian by calculating the momenta.
Parameters
----------
signal : Signal instance
x1 : float
Defines the left limit of the spectral range to use for the
estimation.
x2 : float
Defines the right limit of the spectral range to use for the
estimation.
only_current : bool
If False estimates the parameters for the full dataset.
Returns
-------
bool
Notes
-----
Adapted from http://www.scipy.org/Cookbook/FittingData
Examples
--------
>>> g = hs.model.components.Gaussian()
>>> x = np.arange(-10, 10, 0.01)
>>> data = np.zeros((32, 32, 2000))
>>> data[:] = g.function(x).reshape((1, 1, 2000))
>>> s = hs.signals.Spectrum(data)
>>> s.axes_manager._axes[-1].offset = -10
>>> s.axes_manager._axes[-1].scale = 0.01
>>> g.estimate_parameters(s, -10, 10, False)
"""
super(Gaussian, self)._estimate_parameters(signal)
binned = signal.metadata.Signal.binned
axis = signal.axes_manager.signal_axes[0]
centre, height, sigma = _estimate_gaussian_parameters(signal, x1, x2,
only_current)
if only_current is True:
self.centre.value = centre
self.sigma.value = sigma
self.A.value = height * sigma * sqrt2pi
if binned is True:
self.A.value /= axis.scale
return True
else:
if self.A.map is None:
self._create_arrays()
self.A.map['values'][:] = height * sigma * sqrt2pi
if binned is True:
self.A.map['values'] /= axis.scale
self.A.map['is_set'][:] = True
self.sigma.map['values'][:] = sigma
self.sigma.map['is_set'][:] = True
self.centre.map['values'][:] = centre
self.centre.map['is_set'][:] = True
self.fetch_stored_values()
return True
@property
def fwhm(self):
return self.sigma.value * sigma2fwhm
@fwhm.setter
def fwhm(self, value):
self.sigma.value = value / sigma2fwhm
| to266/hyperspy | hyperspy/_components/gaussian.py | Python | gpl-3.0 | 6,181 | [
"Gaussian"
] | 1abffc2cdad8d37a79e9cc3784e84b426f1f3588d41da5a5fa94052af4671b23 |
"""
db_admin_util v0.01
ga-bitbot database administration utility
Copyright 2011 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
__appversion__ = "0.01a"
print "ga-bitbot database admin tool v%s\n"%__appversion__
import json
import sys
# try to connect to the xml server
# to make sure it's not running.
import gene_server_config
import xmlrpclib
builtin_db_hash = '0db45d2a4141101bdfe48e3314cfbca3'
__server__ = gene_server_config.__server__
__port__ = str(gene_server_config.__port__)
gene_server_running = False
try:
#make sure the port number matches the server.
server = xmlrpclib.Server('http://' + __server__ + ":" + __port__)
server.save()
gene_server_running = True
except:
pass
print 'enter h for help.'
status = "no changes to commit"
commit_enable = True
if gene_server_running == True:
print "Warning: gene_server must not be running to use this utility."
print "committing changes will be disabled for this session."
status = "committing disabled"
commit_enable = False
#sys.exit()
#load the db
f = open('../config/gene_server_db_library.json','r')
d = f.read()
gene_library = json.loads(d)
f.close()
signed_package_library = {}
if gene_library.has_key('signed_package_library'):
signed_package_library = gene_library.pop('signed_package_library')
while 1:
ui = raw_input('\nstatus: ' + status + '\n'+'?:')
if ui == 'h':
print "commands"
print "---------------------------------------"
print "h help"
print "a list all databases"
#TODO: print "l link UNDEFINED db to the local gene_def.json"
print "d select a database to delete"
#TODO: print "o open another library to merge from"
#TODO: print "m merge selected databases"
print "c commit changes"
print "e export gene_def"
print "s stats"
print "r raw library dump"
print "zp delete all genes from the library"
print "q quit"
elif ui == 'zp':
print "deleting all records"
for key in gene_library.keys():
gene_library[key]['gene_best'] = [[],[],[],[]]
gene_library[key]['gene_high_scores'] = [[],[],[],[]]
status = "uncommited changes"
elif ui == 'r':
filename = '../report/gene_server_db_dump.csv'
print "export raw library dump (csv format) to " + filename
f = open(filename,'w')
gkeys = gene_library[gene_library.keys()[0]]['gene_high_scores'][0][0].keys()
header = 'library,type,' + ",".join(gkeys) + '\n'
f.write(header)
for key in gene_library.keys():
for l in gene_library[key]['gene_best']:
for d in l:
f.write(key + ',gene_best')
for agk in gkeys:
f.write("," + str(d[agk]))
f.write('\n')
for l in gene_library[key]['gene_high_scores']:
for d in l:
f.write(key + ',gene_high_scores')
for agk in gkeys:
f.write("," + str(d[agk]))
f.write('\n')
f.close()
elif ui == 's':
print "stats: markup"
print "-----"
for key in gene_library.keys():
print key
#count the number of records
record_count = 0
total = 0
mn = 999
mx = -999
for l in gene_library[key]['gene_best']:
for d in l:
record_count += 1
total += d['markup']
if d['markup'] < mn:
mn = d['markup']
if d['markup'] > mx:
mx = d['markup']
avg = float(total)/(record_count + 0.00001)
print 'bob',mn,avg,mx,record_count
record_count = 0
total = 0
mn = 999
mx = -999
for l in gene_library[key]['gene_high_scores']:
for d in l:
record_count += 1
total += d['markup']
if d['markup'] < mn:
mn = d['markup']
if d['markup'] > mx:
mx = d['markup']
avg = float(total)/(record_count + 0.00001)
print 'hs ',mn,avg,mx,record_count
print "-"*20
elif ui == 'a' or ui == 'd' or ui == 'e':
index = 0
for key in gene_library.keys():
#count the number of records
record_count = 0
for l in gene_library[key]['gene_best']:
record_count += len(l)
for l in gene_library[key]['gene_high_scores']:
record_count += len(l)
print "["+str(index)+"]","database:",key,"\trecord count:", record_count
index += 1
details = ""
try:
gd = json.loads(gene_library[key]['gene_def'])
if 'version' in gd.keys():
details += "\t\tversion:" + str(gd['version'])
if 'name' in gd.keys():
details += " name: " + gd['name']
if 'description' in gd.keys():
details += "\tdescription: " + gd['description']
except:
if gene_library[key]['gene_def'] == "UNDEFINED":
details += "## Built-in UNDEFINED gene_def database ##"
else:
details += "Invalid gene_def :",gene_library[key]['gene_def'][:30],"..."
print details
print ""
if ui == 'd' or ui == 'e':
index = -999
while not index in range(len(gene_library.keys())):
print "Enter the database index number 0 ...",len(gene_library.keys()) - 1
try:
index = int(raw_input('[n]?:'))
except:
pass
if ui == 'd':
if builtin_db_hash != gene_library.keys()[index]:
print "deleting database ["+str(index)+"]",gene_library.keys()[index]
gene_library.pop(gene_library.keys()[index])
status = "uncommited changes"
else:
print "can not delete built-in UNDEFINED database"
if ui == 'e':
print "exporting database gene_def ["+str(index)+"]",gene_library.keys()[index]
filename = '../config/gene_def_'+gene_library.keys()[index]+'.json'
print "writing file " + filename
f = open(filename,'w')
f.write(gene_library[gene_library.keys()[index]]['gene_def'])
f.close()
elif ui == 'c':
if commit_enable == True:
print "commiting changes"
#save the db
f = open('../config/gene_server_db_library.json','w')
f.write(json.dumps(gene_library))
f.close()
status = "no changes to commit"
else:
print "commiting disabled for this session."
elif ui == 'q':
if status == "no changes to commit" or status == "committing disabled":
sys.exit()
else:
print "changes have not been commited. quit without commiting?"
confirm_exit = ""
while confirm_exit != 'y' and confirm_exit != 'n':
confirm_exit = raw_input('(y/n): ')
if confirm_exit == 'y':
sys.exit()
else:
print "unknown command"
print "---------------------------------------"
| Pascal66/ga-bitbot | tools/db_admin_util.py | Python | gpl-3.0 | 8,303 | [
"Brian"
] | 02ecd24bba17d45679a337c19003209856fea7e5577a9e3b0d3bcee4a708718a |
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import chigger
reader = chigger.exodus.ExodusReader('../input/pipe.e')
tube = chigger.filters.TubeFilter()
pipe = chigger.exodus.ExodusResult(reader, variable='u', cmap='viridis', filters=[tube])
window = chigger.RenderWindow(pipe, size=[300,300], test=True)
window.write('tube.png')
window.start()
| nuclear-wizard/moose | python/chigger/tests/tube/tube.py | Python | lgpl-2.1 | 669 | [
"MOOSE"
] | 12ebb884ba3e7d303cb94d4f064b67bcca5221fd9938cdcd94c0264adc09e5d2 |
__package__ = "DIRAC.AccountingSystem.Client.Types" | avedaee/DIRAC | AccountingSystem/Client/Types/__init__.py | Python | gpl-3.0 | 51 | [
"DIRAC"
] | 98cfb8c1e3d95988677a1a90f1c627ddccff78afd2817d98d4b69c23757d8809 |
# Copyright (C) 2019 Alejandro Molina Sanchez - Henrique PC Miranda
# All rights reserved.
#
# This file is part of yambopy
#
# Tutorial File of Yambopy Tasks. BSE flow
#
#import argparse
#import os
#import shutil
from yambopy.flow import YambopyFlow, P2yTask, YamboTask
#from schedulerpy import Scheduler
from yambopy import yambopyenv
# Set list of task and dictionary of yambo variables
tasks = []
yamboin_dict = dict()
# Set origin of SAVE folder
p2y_task = P2yTask.from_folder('nscf_flow/t2')
print(p2y_task)
# Coulomb-cutoff and RIM dictionary
cutoffdict = dict(CUTBox = [0,0,10],CUTGeo='box z',RandQpts=1000000,RandGvec=[1,'RL'])
# Parallel Environment dictionary
paradict = dict(X_all_q_ROLEs="q",X_all_q_CPU="2")
# BSE variables dictionary
bsedict = dict(BEnSteps=1000,
FFTGvecs=[10,'Ry'],
BEnRange=[[0,5],'eV'],
BndsRnXs=[1,60],
NGsBlkXs=[1,'Ry'],
BSENGexx=[10,'Ry'],
BSENGBlk=[1,'Ry'],
BSEBands=[7,10])
# Merge all dict variables
yamboin_dict = {**yamboin_dict,**cutoffdict,**paradict,**bsedict}
# Set Yambo task (BSE in this case)
# yamboin_args >> Add arguments (ExtendOut, WRbsWF, EvalKerr, etc.)
bse_task = YamboTask.from_runlevel([p2y_task],'-r -o b -b -k sex -y d -V all',yamboin_dict,yamboin_args=['WRbsWF'])
# Introduce each task in the list of task
tasks.append(bse_task)
# Set the Yambo flow
yambo_flow = YambopyFlow.from_tasks('bse_flow',tasks)
print(yambo_flow)
# Create the Yambo flow
yambo_flow.create(agressive=True)
# Run the Yambo flow
yambo_flow.run()
print(yambo_flow)
| henriquemiranda/yambo-py | tutorial/bn/flow-bse.py | Python | bsd-3-clause | 1,626 | [
"Yambo"
] | 3d8325de9ec038c1bd9f95e04d13007dc760e7129940b40e37181c98f5590c07 |
"""
Augmenters that blur images.
List of augmenters:
* :class:`GaussianBlur`
* :class:`AverageBlur`
* :class:`MedianBlur`
* :class:`BilateralBlur`
* :class:`MotionBlur`
* :class:`MeanShiftBlur`
"""
from __future__ import print_function, division, absolute_import
import numpy as np
from scipy import ndimage
import cv2
import six.moves as sm
import imgaug as ia
from imgaug.imgaug import _normalize_cv2_input_arr_
from . import meta
from . import convolutional as iaa_convolutional
from .. import parameters as iap
from .. import dtypes as iadt
# TODO add border mode, cval
def blur_gaussian_(image, sigma, ksize=None, backend="auto", eps=1e-3):
"""Blur an image using gaussian blurring in-place.
This operation *may* change the input image in-place.
**Supported dtypes**:
if (backend="auto"):
* ``uint8``: yes; fully tested (1)
* ``uint16``: yes; tested (1)
* ``uint32``: yes; tested (2)
* ``uint64``: yes; tested (2)
* ``int8``: yes; tested (1)
* ``int16``: yes; tested (1)
* ``int32``: yes; tested (1)
* ``int64``: yes; tested (2)
* ``float16``: yes; tested (1)
* ``float32``: yes; tested (1)
* ``float64``: yes; tested (1)
* ``float128``: no
* ``bool``: yes; tested (1)
- (1) Handled by ``cv2``. See ``backend="cv2"``.
- (2) Handled by ``scipy``. See ``backend="scipy"``.
if (backend="cv2"):
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (2)
* ``uint64``: no (3)
* ``int8``: yes; tested (4)
* ``int16``: yes; tested
* ``int32``: yes; tested (5)
* ``int64``: no (6)
* ``float16``: yes; tested (7)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (8)
* ``bool``: yes; tested (1)
- (1) Mapped internally to ``float32``. Otherwise causes
``TypeError: src data type = 0 is not supported``.
- (2) Causes ``TypeError: src data type = 6 is not supported``.
- (3) Causes ``cv2.error: OpenCV(3.4.5) (...)/filter.cpp:2957:
error: (-213:The function/feature is not implemented)
Unsupported combination of source format (=4), and buffer
format (=5) in function 'getLinearRowFilter'``.
- (4) Mapped internally to ``int16``. Otherwise causes
``cv2.error: OpenCV(3.4.5) (...)/filter.cpp:2957: error:
(-213:The function/feature is not implemented) Unsupported
combination of source format (=1), and buffer format (=5)
in function 'getLinearRowFilter'``.
- (5) Mapped internally to ``float64``. Otherwise causes
``cv2.error: OpenCV(3.4.5) (...)/filter.cpp:2957: error:
(-213:The function/feature is not implemented) Unsupported
combination of source format (=4), and buffer format (=5)
in function 'getLinearRowFilter'``.
- (6) Causes ``cv2.error: OpenCV(3.4.5) (...)/filter.cpp:2957:
error: (-213:The function/feature is not implemented)
Unsupported combination of source format (=4), and buffer
format (=5) in function 'getLinearRowFilter'``.
- (7) Mapped internally to ``float32``. Otherwise causes
``TypeError: src data type = 23 is not supported``.
- (8) Causes ``TypeError: src data type = 13 is not supported``.
if (backend="scipy"):
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: yes; tested
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: yes; tested
* ``float16``: yes; tested (1)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (2)
* ``bool``: yes; tested (3)
- (1) Mapped internally to ``float32``. Otherwise causes
``RuntimeError: array type dtype('float16') not supported``.
- (2) Causes ``RuntimeError: array type dtype('float128') not
supported``.
- (3) Mapped internally to ``float32``. Otherwise too inaccurate.
Parameters
----------
image : numpy.ndarray
The image to blur. Expected to be of shape ``(H, W)`` or ``(H, W, C)``.
sigma : number
Standard deviation of the gaussian blur. Larger numbers result in
more large-scale blurring, which is overall slower than small-scale
blurring.
ksize : None or int, optional
Size in height/width of the gaussian kernel. This argument is only
understood by the ``cv2`` backend. If it is set to ``None``, an
appropriate value for `ksize` will automatically be derived from
`sigma`. The value is chosen tighter for larger sigmas to avoid as
much as possible very large kernel sizes and therey improve
performance.
backend : {'auto', 'cv2', 'scipy'}, optional
Backend library to use. If ``auto``, then the likely best library
will be automatically picked per image. That is usually equivalent
to ``cv2`` (OpenCV) and it will fall back to ``scipy`` for datatypes
not supported by OpenCV.
eps : number, optional
A threshold used to decide whether `sigma` can be considered zero.
Returns
-------
numpy.ndarray
The blurred image. Same shape and dtype as the input.
(Input image *might* have been altered in-place.)
"""
if image.size == 0:
return image
if sigma < eps:
return image
iadt.gate_dtypes_strs(
{image.dtype},
allowed="bool uint8 uint16 uint32 "
"int8 int16 int32 int64 "
"uint64 "
"float16 float32 float64",
disallowed="float128",
augmenter=None
)
dts_not_supported_by_cv2 = iadt._convert_dtype_strs_to_types(
"uint32 uint64 int64 float128"
)
backend_to_use = backend
if backend == "auto":
backend_to_use = (
"cv2"
if image.dtype not in dts_not_supported_by_cv2
else "scipy")
elif backend == "cv2":
assert image.dtype not in dts_not_supported_by_cv2, (
"Requested 'cv2' backend, but provided %s input image, which "
"cannot be handled by that backend. Choose a different "
"backend or set backend to 'auto' or use a different "
"datatype." % (
image.dtype.name,))
elif backend == "scipy":
# can handle all dtypes that were allowed in gate_dtypes()
pass
if backend_to_use == "scipy":
image = _blur_gaussian_scipy_(image, sigma, ksize)
else:
image = _blur_gaussian_cv2(image, sigma, ksize)
return image
# Added in 0.5.0.
def _blur_gaussian_scipy_(image, sigma, ksize):
dtype = image.dtype
if dtype.kind == "b":
# We convert bool to float32 here, because gaussian_filter()
# seems to only return True when the underlying value is
# approximately 1.0, not when it is above 0.5. So we do that
# here manually. cv2 does not support bool for gaussian blur.
image = image.astype(np.float32, copy=False)
elif dtype == iadt._FLOAT16_DTYPE:
image = image.astype(np.float32, copy=False)
# gaussian_filter() has no ksize argument
# TODO it does have a truncate argument that truncates at x
# standard deviations -- maybe can be used similarly to ksize
if ksize is not None:
ia.warn(
"Requested 'scipy' backend or picked it automatically by "
"backend='auto' n blur_gaussian_(), but also provided "
"'ksize' argument, which is not understood by that "
"backend and will be ignored.")
# Note that while gaussian_filter can be applied to all channels
# at the same time, that should not be done here, because then
# the blurring would also happen across channels (e.g. red values
# might be mixed with blue values in RGB)
if image.ndim == 2:
image[:, :] = ndimage.gaussian_filter(image[:, :], sigma,
mode="mirror")
else:
nb_channels = image.shape[2]
for channel in sm.xrange(nb_channels):
image[:, :, channel] = ndimage.gaussian_filter(
image[:, :, channel], sigma, mode="mirror")
if dtype.kind == "b":
image = image > 0.5
elif dtype != image.dtype:
image = iadt.restore_dtypes_(image, dtype)
return image
# Added in 0.5.0.
def _blur_gaussian_cv2(image, sigma, ksize):
dtype = image.dtype
if dtype.kind == "b":
image = image.astype(np.float32, copy=False)
elif dtype == iadt._FLOAT16_DTYPE:
image = image.astype(np.float32, copy=False)
elif dtype == iadt._INT8_DTYPE:
image = image.astype(np.int16, copy=False)
elif dtype == iadt._INT32_DTYPE:
image = image.astype(np.float64, copy=False)
# ksize here is derived from the equation to compute sigma based
# on ksize, see
# https://docs.opencv.org/3.1.0/d4/d86/group__imgproc__filter.html
# -> cv::getGaussianKernel()
# example values:
# sig = 0.1 -> ksize = -1.666
# sig = 0.5 -> ksize = 0.9999
# sig = 1.0 -> ksize = 1.0
# sig = 2.0 -> ksize = 11.0
# sig = 3.0 -> ksize = 17.666
# ksize = ((sig - 0.8)/0.3 + 1)/0.5 + 1
if ksize is None:
ksize = _compute_gaussian_blur_ksize(sigma)
else:
assert ia.is_single_integer(ksize), (
"Expected 'ksize' argument to be a number, "
"got %s." % (type(ksize),))
ksize = ksize + 1 if ksize % 2 == 0 else ksize
image_warped = image
if ksize > 0:
# works with >512 channels
# normalization not required here
# dst seems to not help here
image_warped = cv2.GaussianBlur(
image,
(ksize, ksize),
sigmaX=sigma,
sigmaY=sigma,
borderType=cv2.BORDER_REFLECT_101
)
if image_warped.ndim == 2 and image.ndim == 3:
image_warped = image_warped[..., np.newaxis]
if dtype.kind == "b":
image_warped = image_warped > 0.5
elif dtype != image.dtype:
image_warped = iadt.restore_dtypes_(image_warped, dtype)
return image_warped
def _compute_gaussian_blur_ksize(sigma):
if sigma < 3.0:
ksize = 3.3 * sigma # 99% of weight
elif sigma < 5.0:
ksize = 2.9 * sigma # 97% of weight
else:
ksize = 2.6 * sigma # 95% of weight
# we use 5x5 here as the minimum size as that simplifies
# comparisons with gaussian_filter() in the tests
# TODO reduce this to 3x3
ksize = int(max(ksize, 5))
if ksize % 2 == 0:
ksize += 1
return ksize
def blur_avg_(image, k):
"""Blur an image in-place by computing averages over local neighbourhoods.
This operation *may* change the input image in-place.
The padding behaviour around the image borders is cv2's
``BORDER_REFLECT_101``.
Added in 0.5.0.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: no (4)
* ``int64``: no (5)
* ``float16``: yes; tested (6)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no
* ``bool``: yes; tested (7)
- (1) rejected by ``cv2.blur()``
- (2) loss of resolution in ``cv2.blur()`` (result is ``int32``)
- (3) ``int8`` is mapped internally to ``int16``, ``int8`` itself
leads to cv2 error "Unsupported combination of source format
(=1), and buffer format (=4) in function 'getRowSumFilter'" in
``cv2``
- (4) results too inaccurate
- (5) loss of resolution in ``cv2.blur()`` (result is ``int32``)
- (6) ``float16`` is mapped internally to ``float32``
- (7) ``bool`` is mapped internally to ``float32``
Parameters
----------
image : numpy.ndarray
The image to blur. Expected to be of shape ``(H, W)`` or ``(H, W, C)``.
k : int or tuple of int
Kernel size to use. A single ``int`` will lead to an ``k x k``
kernel. Otherwise a ``tuple`` of two ``int`` ``(height, width)``
is expected.
Returns
-------
numpy.ndarray
The blurred image. Same shape and dtype as the input.
(Input image *might* have been altered in-place.)
"""
if isinstance(k, tuple):
k_height, k_width = k
else:
k_height, k_width = k, k
shape = image.shape
if 0 in shape:
return image
if k_height <= 0 or k_width <= 0 or (k_height, k_width) == (1, 1):
return image
iadt.gate_dtypes_strs(
{image.dtype},
allowed="bool uint8 uint16 int8 int16 float16 float32 float64",
disallowed="uint32 uint64 int32 int64 float128"
)
input_dtype = image.dtype
if image.dtype in {iadt._BOOL_DTYPE, iadt._FLOAT16_DTYPE}:
image = image.astype(np.float32, copy=False)
elif image.dtype == iadt._INT8_DTYPE:
image = image.astype(np.int16, copy=False)
input_ndim = len(shape)
if input_ndim == 2 or shape[-1] <= 512:
image = _normalize_cv2_input_arr_(image)
image_aug = cv2.blur(
image,
(k_width, k_height),
dst=image
)
# cv2.blur() removes channel axis for single-channel images
if input_ndim == 3 and image_aug.ndim == 2:
image_aug = image_aug[..., np.newaxis]
else:
# TODO this is quite inefficient
# handling more than 512 channels in cv2.blur()
channels = [
cv2.blur(
_normalize_cv2_input_arr_(image[..., c]),
(k_width, k_height)
)
for c in sm.xrange(shape[-1])
]
image_aug = np.stack(channels, axis=-1)
if input_dtype.kind == "b":
image_aug = image_aug > 0.5
elif input_dtype in {iadt._INT8_DTYPE, iadt._FLOAT16_DTYPE}:
image_aug = iadt.restore_dtypes_(image_aug, input_dtype)
return image_aug
def blur_mean_shift_(image, spatial_window_radius, color_window_radius):
"""Apply a pyramidic mean shift filter to the input image in-place.
This produces an output image that has similarity with one modified by
a bilateral filter. That is different from mean shift *segmentation*,
which averages the colors in segments found by mean shift clustering.
This function is a thin wrapper around ``cv2.pyrMeanShiftFiltering``.
.. note::
This function does *not* change the image's colorspace to ``RGB``
before applying the mean shift filter. A non-``RGB`` colorspace will
hence influence the results.
.. note::
This function is quite slow.
Added in 0.4.0.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) Not supported by ``cv2.pyrMeanShiftFiltering``.
Parameters
----------
image : ndarray
``(H,W)`` or ``(H,W,1)`` or ``(H,W,3)`` image to blur.
Images with no or one channel will be temporarily tiled to have
three channels.
spatial_window_radius : number
Spatial radius for pixels that are assumed to be similar.
color_window_radius : number
Color radius for pixels that are assumed to be similar.
Returns
-------
ndarray
Blurred input image. Same shape and dtype as the input.
(Input image *might* have been altered in-place.)
"""
if 0 in image.shape[0:2]:
return image
# opencv method only supports uint8
iadt.allow_only_uint8({image.dtype})
shape_is_hw = (image.ndim == 2)
shape_is_hw1 = (image.ndim == 3 and image.shape[-1] == 1)
shape_is_hw3 = (image.ndim == 3 and image.shape[-1] == 3)
assert shape_is_hw or shape_is_hw1 or shape_is_hw3, (
"Expected (H,W) or (H,W,1) or (H,W,3) image, "
"got shape %s." % (image.shape,))
# opencv method only supports (H,W,3), so we have to tile here for (H,W)
# and (H,W,1)
if shape_is_hw:
image = np.tile(image[..., np.newaxis], (1, 1, 3))
elif shape_is_hw1:
image = np.tile(image, (1, 1, 3))
spatial_window_radius = max(spatial_window_radius, 0)
color_window_radius = max(color_window_radius, 0)
image = _normalize_cv2_input_arr_(image)
image = cv2.pyrMeanShiftFiltering(
image,
sp=spatial_window_radius,
sr=color_window_radius,
dst=image)
if shape_is_hw:
image = image[..., 0]
elif shape_is_hw1:
image = image[..., 0:1]
return image
# TODO offer different values for sigma on x/y-axis, supported by cv2 but not
# by scipy
# TODO add channelwise flag - channelwise=False would be supported by scipy
class GaussianBlur(meta.Augmenter):
"""Augmenter to blur images using gaussian kernels.
**Supported dtypes**:
See ``~imgaug.augmenters.blur.blur_gaussian_(backend="auto")``.
Parameters
----------
sigma : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the gaussian kernel.
Values in the range ``0.0`` (no blur) to ``3.0`` (strong blur) are
common.
* If a single ``float``, that value will always be used as the
standard deviation.
* If a tuple ``(a, b)``, then a random value from the interval
``[a, b]`` will be picked per image.
* If a list, then a random value will be sampled per image from
that list.
* If a ``StochasticParameter``, then ``N`` samples will be drawn
from that parameter per ``N`` input images.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.GaussianBlur(sigma=1.5)
Blur all images using a gaussian kernel with a standard deviation of
``1.5``.
>>> aug = iaa.GaussianBlur(sigma=(0.0, 3.0))
Blur images using a gaussian kernel with a random standard deviation
sampled uniformly (per image) from the interval ``[0.0, 3.0]``.
"""
def __init__(self, sigma=(0.0, 3.0),
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(GaussianBlur, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.sigma = iap.handle_continuous_param(
sigma, "sigma", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
# epsilon value to estimate whether sigma is sufficently above 0 to
# apply the blur
self.eps = 1e-3
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
samples = self.sigma.draw_samples((nb_images,),
random_state=random_state)
for image, sig in zip(images, samples):
image[...] = blur_gaussian_(image, sigma=sig, eps=self.eps)
return batch
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.sigma]
class AverageBlur(meta.Augmenter):
"""Blur an image by computing simple means over neighbourhoods.
The padding behaviour around the image borders is cv2's
``BORDER_REFLECT_101``.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: no (4)
* ``int64``: no (5)
* ``float16``: yes; tested (6)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no
* ``bool``: yes; tested (7)
- (1) rejected by ``cv2.blur()``
- (2) loss of resolution in ``cv2.blur()`` (result is ``int32``)
- (3) ``int8`` is mapped internally to ``int16``, ``int8`` itself
leads to cv2 error "Unsupported combination of source format
(=1), and buffer format (=4) in function 'getRowSumFilter'" in
``cv2``
- (4) results too inaccurate
- (5) loss of resolution in ``cv2.blur()`` (result is ``int32``)
- (6) ``float16`` is mapped internally to ``float32``
- (7) ``bool`` is mapped internally to ``float32``
Parameters
----------
k : int or tuple of int or tuple of tuple of int or imgaug.parameters.StochasticParameter or tuple of StochasticParameter, optional
Kernel size to use.
* If a single ``int``, then that value will be used for the height
and width of the kernel.
* If a tuple of two ``int`` s ``(a, b)``, then the kernel size will
be sampled from the interval ``[a..b]``.
* If a tuple of two tuples of ``int`` s ``((a, b), (c, d))``,
then per image a random kernel height will be sampled from the
interval ``[a..b]`` and a random kernel width will be sampled
from the interval ``[c..d]``.
* If a ``StochasticParameter``, then ``N`` samples will be drawn
from that parameter per ``N`` input images, each representing
the kernel size for the n-th image.
* If a tuple ``(a, b)``, where either ``a`` or ``b`` is a tuple,
then ``a`` and ``b`` will be treated according to the rules
above. This leads to different values for height and width of
the kernel.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.AverageBlur(k=5)
Blur all images using a kernel size of ``5x5``.
>>> aug = iaa.AverageBlur(k=(2, 5))
Blur images using a varying kernel size, which is sampled (per image)
uniformly from the interval ``[2..5]``.
>>> aug = iaa.AverageBlur(k=((5, 7), (1, 3)))
Blur images using a varying kernel size, which's height is sampled
(per image) uniformly from the interval ``[5..7]`` and which's width is
sampled (per image) uniformly from ``[1..3]``.
"""
def __init__(self, k=(1, 7),
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(AverageBlur, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
# TODO replace this by iap.handle_discrete_kernel_size()
self.mode = "single"
if ia.is_single_number(k):
self.k = iap.Deterministic(int(k))
elif ia.is_iterable(k):
assert len(k) == 2, (
"Expected iterable 'k' to contain exactly 2 entries, "
"got %d." % (len(k),))
if all([ia.is_single_number(ki) for ki in k]):
self.k = iap.DiscreteUniform(int(k[0]), int(k[1]))
elif all([isinstance(ki, iap.StochasticParameter) for ki in k]):
self.mode = "two"
self.k = (k[0], k[1])
else:
k_tuple = [None, None]
if ia.is_single_number(k[0]):
k_tuple[0] = iap.Deterministic(int(k[0]))
elif (ia.is_iterable(k[0])
and all([ia.is_single_number(ki) for ki in k[0]])):
k_tuple[0] = iap.DiscreteUniform(int(k[0][0]),
int(k[0][1]))
else:
raise Exception(
"k[0] expected to be int or tuple of two ints, "
"got %s" % (type(k[0]),))
if ia.is_single_number(k[1]):
k_tuple[1] = iap.Deterministic(int(k[1]))
elif (ia.is_iterable(k[1])
and all([ia.is_single_number(ki) for ki in k[1]])):
k_tuple[1] = iap.DiscreteUniform(int(k[1][0]),
int(k[1][1]))
else:
raise Exception(
"k[1] expected to be int or tuple of two ints, "
"got %s" % (type(k[1]),))
self.mode = "two"
self.k = k_tuple
elif isinstance(k, iap.StochasticParameter):
self.k = k
else:
raise Exception(
"Expected int, tuple/list with 2 entries or "
"StochasticParameter. Got %s." % (type(k),))
self.k = iap._wrap_leafs_of_param_in_prefetchers(
self.k, iap._NB_PREFETCH
)
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
if self.mode == "single":
samples = self.k.draw_samples((nb_images,),
random_state=random_state)
samples = (samples, samples)
else:
rss = random_state.duplicate(2)
samples = (
self.k[0].draw_samples((nb_images,), random_state=rss[0]),
self.k[1].draw_samples((nb_images,), random_state=rss[1]),
)
gen = enumerate(zip(images, samples[0], samples[1]))
for i, (image, ksize_h, ksize_w) in gen:
batch.images[i] = blur_avg_(image, (ksize_h, ksize_w))
return batch
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.k]
class MedianBlur(meta.Augmenter):
"""Blur an image by computing median values over neighbourhoods.
Median blurring can be used to remove small dirt from images.
At larger kernel sizes, its effects have some similarity with Superpixels.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
k : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Kernel size.
* If a single ``int``, then that value will be used for the
height and width of the kernel. Must be an odd value.
* If a tuple of two ints ``(a, b)``, then the kernel size will be
an odd value sampled from the interval ``[a..b]``. ``a`` and
``b`` must both be odd values.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then ``N`` samples will be drawn
from that parameter per ``N`` input images, each representing
the kernel size for the nth image. Expected to be discrete. If
a sampled value is not odd, then that value will be increased
by ``1``.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.MedianBlur(k=5)
Blur all images using a kernel size of ``5x5``.
>>> aug = iaa.MedianBlur(k=(3, 7))
Blur images using varying kernel sizes, which are sampled uniformly from
the interval ``[3..7]``. Only odd values will be sampled, i.e. ``3``
or ``5`` or ``7``.
"""
def __init__(self, k=(1, 7),
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(MedianBlur, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
# TODO replace this by iap.handle_discrete_kernel_size()
self.k = iap.handle_discrete_param(
k, "k", value_range=(1, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
if ia.is_single_integer(k):
assert k % 2 != 0, (
"Expected k to be odd, got %d. Add or subtract 1." % (
int(k),))
elif ia.is_iterable(k):
assert all([ki % 2 != 0 for ki in k]), (
"Expected all values in iterable k to be odd, but at least "
"one was not. Add or subtract 1 to/from that value.")
self.k = iap._wrap_leafs_of_param_in_prefetchers(
self.k, iap._NB_PREFETCH
)
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
samples = self.k.draw_samples((nb_images,), random_state=random_state)
for i, (image, ksize) in enumerate(zip(images, samples)):
has_zero_sized_axes = (image.size == 0)
if ksize > 1 and not has_zero_sized_axes:
ksize = ksize + 1 if ksize % 2 == 0 else ksize
if image.ndim == 2 or image.shape[-1] <= 512:
image_aug = cv2.medianBlur(
_normalize_cv2_input_arr_(image), ksize)
# cv2.medianBlur() removes channel axis for single-channel
# images
if image_aug.ndim == 2:
image_aug = image_aug[..., np.newaxis]
else:
# TODO this is quite inefficient
# handling more than 512 channels in cv2.medainBlur()
channels = [
cv2.medianBlur(
_normalize_cv2_input_arr_(image[..., c]), ksize)
for c in sm.xrange(image.shape[-1])
]
image_aug = np.stack(channels, axis=-1)
batch.images[i] = image_aug
return batch
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.k]
# TODO tests
class BilateralBlur(meta.Augmenter):
"""Blur/Denoise an image using a bilateral filter.
Bilateral filters blur homogenous and textured areas, while trying to
preserve edges.
See
http://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html#bilateralfilter
for more information regarding the parameters.
**Supported dtypes**:
* ``uint8``: yes; not tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
d : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Diameter of each pixel neighborhood with value range ``[1 .. inf)``.
High values for `d` lead to significantly worse performance. Values
equal or less than ``10`` seem to be good. Use ``<5`` for real-time
applications.
* If a single ``int``, then that value will be used for the
diameter.
* If a tuple of two ``int`` s ``(a, b)``, then the diameter will
be a value sampled from the interval ``[a..b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then ``N`` samples will be drawn
from that parameter per ``N`` input images, each representing
the diameter for the n-th image. Expected to be discrete.
sigma_color : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Filter sigma in the color space with value range ``[1, inf)``. A
large value of the parameter means that farther colors within the
pixel neighborhood (see `sigma_space`) will be mixed together,
resulting in larger areas of semi-equal color.
* If a single ``int``, then that value will be used for the
diameter.
* If a tuple of two ``int`` s ``(a, b)``, then the diameter will
be a value sampled from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then ``N`` samples will be drawn
from that parameter per ``N`` input images, each representing
the diameter for the n-th image. Expected to be discrete.
sigma_space : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Filter sigma in the coordinate space with value range ``[1, inf)``. A
large value of the parameter means that farther pixels will influence
each other as long as their colors are close enough (see
`sigma_color`).
* If a single ``int``, then that value will be used for the
diameter.
* If a tuple of two ``int`` s ``(a, b)``, then the diameter will
be a value sampled from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then ``N`` samples will be drawn
from that parameter per ``N`` input images, each representing
the diameter for the n-th image. Expected to be discrete.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.BilateralBlur(
>>> d=(3, 10), sigma_color=(10, 250), sigma_space=(10, 250))
Blur all images using a bilateral filter with a `max distance` sampled
uniformly from the interval ``[3, 10]`` and wide ranges for `sigma_color`
and `sigma_space`.
"""
def __init__(self, d=(1, 9), sigma_color=(10, 250), sigma_space=(10, 250),
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
# pylint: disable=invalid-name
super(BilateralBlur, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.d = iap.handle_discrete_param(
d, "d", value_range=(1, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
self.sigma_color = iap.handle_continuous_param(
sigma_color, "sigma_color", value_range=(1, None),
tuple_to_uniform=True, list_to_choice=True)
self.sigma_space = iap.handle_continuous_param(
sigma_space, "sigma_space", value_range=(1, None),
tuple_to_uniform=True, list_to_choice=True)
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
# pylint: disable=invalid-name
if batch.images is None:
return batch
images = batch.images
# Make sure that all images have 3 channels
assert all([image.shape[2] == 3 for image in images]), (
"BilateralBlur can currently only be applied to images with 3 "
"channels. Got channels: %s" % (
[image.shape[2] for image in images],))
nb_images = len(images)
rss = random_state.duplicate(3)
samples_d = self.d.draw_samples((nb_images,), random_state=rss[0])
samples_sigma_color = self.sigma_color.draw_samples(
(nb_images,), random_state=rss[1])
samples_sigma_space = self.sigma_space.draw_samples(
(nb_images,), random_state=rss[2])
gen = enumerate(zip(images, samples_d, samples_sigma_color,
samples_sigma_space))
for i, (image, di, sigma_color_i, sigma_space_i) in gen:
has_zero_sized_axes = (image.size == 0)
if di != 1 and not has_zero_sized_axes:
batch.images[i] = cv2.bilateralFilter(
_normalize_cv2_input_arr_(image),
di, sigma_color_i, sigma_space_i)
return batch
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.d, self.sigma_color, self.sigma_space]
# TODO add k sizing via float/percentage
class MotionBlur(iaa_convolutional.Convolve):
"""Blur images in a way that fakes camera or object movements.
**Supported dtypes**:
See :class:`~imgaug.augmenters.convolutional.Convolve`.
Parameters
----------
k : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Kernel size to use.
* If a single ``int``, then that value will be used for the height
and width of the kernel.
* If a tuple of two ``int`` s ``(a, b)``, then the kernel size
will be sampled from the interval ``[a..b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then ``N`` samples will be drawn
from that parameter per ``N`` input images, each representing
the kernel size for the n-th image.
angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Angle of the motion blur in degrees (clockwise, relative to top center
direction).
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the interval
``[a, b]`` will be uniformly sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
direction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Forward/backward direction of the motion blur. Lower values towards
``-1.0`` will point the motion blur towards the back (with angle
provided via `angle`). Higher values towards ``1.0`` will point the
motion blur forward. A value of ``0.0`` leads to a uniformly (but
still angled) motion blur.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the interval
``[a, b]`` will be uniformly sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
order : int or iterable of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
Interpolation order to use when rotating the kernel according to
`angle`.
See :func:`~imgaug.augmenters.geometric.Affine.__init__`.
Recommended to be ``0`` or ``1``, with ``0`` being faster, but less
continuous/smooth as `angle` is changed, particularly around multiple
of ``45`` degrees.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.MotionBlur(k=15)
Apply motion blur with a kernel size of ``15x15`` pixels to images.
>>> aug = iaa.MotionBlur(k=15, angle=[-45, 45])
Apply motion blur with a kernel size of ``15x15`` pixels and a blur angle
of either ``-45`` or ``45`` degrees (randomly picked per image).
"""
def __init__(self, k=(3, 7), angle=(0, 360), direction=(-1.0, 1.0), order=1,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
# TODO allow (1, None) and set to identity matrix if k == 1
k_param = iap.handle_discrete_param(
k, "k", value_range=(3, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
angle_param = iap.handle_continuous_param(
angle, "angle", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
direction_param = iap.handle_continuous_param(
direction, "direction", value_range=(-1.0-1e-6, 1.0+1e-6),
tuple_to_uniform=True, list_to_choice=True)
matrix_gen = _MotionBlurMatrixGenerator(k_param, angle_param,
direction_param, order)
super(MotionBlur, self).__init__(
matrix_gen,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
# Added in 0.4.0.
class _MotionBlurMatrixGenerator(object):
# Added in 0.4.0.
def __init__(self, k, angle, direction, order):
self.k = k
self.angle = angle
self.direction = direction
self.order = order
# Added in 0.4.0.
def __call__(self, _image, nb_channels, random_state):
# avoid cyclic import between blur and geometric
from . import geometric as iaa_geometric
# force discrete for k_sample via int() in case of stochastic
# parameter
k_sample = int(
self.k.draw_sample(random_state=random_state))
angle_sample = self.angle.draw_sample(
random_state=random_state)
direction_sample = self.direction.draw_sample(
random_state=random_state)
k_sample = k_sample if k_sample % 2 != 0 else k_sample + 1
direction_sample = np.clip(direction_sample, -1.0, 1.0)
direction_sample = (direction_sample + 1.0) / 2.0
matrix = np.zeros((k_sample, k_sample), dtype=np.float32)
matrix[:, k_sample//2] = np.linspace(
float(direction_sample),
1.0 - float(direction_sample),
num=k_sample)
rot = iaa_geometric.Affine(rotate=angle_sample, order=self.order)
matrix = (
rot.augment_image(
(matrix * 255).astype(np.uint8)
).astype(np.float32) / 255.0
)
return [matrix/np.sum(matrix)] * nb_channels
# TODO add a per_channel flag?
# TODO make spatial_radius a fraction of the input image size?
class MeanShiftBlur(meta.Augmenter):
"""Apply a pyramidic mean shift filter to each image.
See also :func:`blur_mean_shift_` for details.
This augmenter expects input images of shape ``(H,W)`` or ``(H,W,1)``
or ``(H,W,3)``.
.. note::
This augmenter is quite slow.
Added in 0.4.0.
**Supported dtypes**:
See :func:`~imgaug.augmenters.blur.blur_mean_shift_`.
Parameters
----------
spatial_radius : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Spatial radius for pixels that are assumed to be similar.
* If ``number``: Exactly that value will be used for all images.
* If ``tuple`` ``(a, b)``: A random value will be uniformly
sampled per image from the interval ``[a, b)``.
* If ``list``: A random value will be sampled from that ``list``
per image.
* If ``StochasticParameter``: The parameter will be queried once
per batch for ``(N,)`` values with ``N`` denoting the number of
images.
color_radius : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Color radius for pixels that are assumed to be similar.
* If ``number``: Exactly that value will be used for all images.
* If ``tuple`` ``(a, b)``: A random value will be uniformly
sampled per image from the interval ``[a, b)``.
* If ``list``: A random value will be sampled from that ``list``
per image.
* If ``StochasticParameter``: The parameter will be queried once
per batch for ``(N,)`` values with ``N`` denoting the number of
images.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.MeanShiftBlur()
Create a mean shift blur augmenter.
"""
# Added in 0.4.0.
def __init__(self, spatial_radius=(5.0, 40.0), color_radius=(5.0, 40.0),
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(MeanShiftBlur, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.spatial_window_radius = iap.handle_continuous_param(
spatial_radius, "spatial_radius",
value_range=(0.01, None), tuple_to_uniform=True,
list_to_choice=True)
self.color_window_radius = iap.handle_continuous_param(
color_radius, "color_radius",
value_range=(0.01, None), tuple_to_uniform=True,
list_to_choice=True)
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is not None:
samples = self._draw_samples(batch, random_state)
for i, image in enumerate(batch.images):
batch.images[i] = blur_mean_shift_(
image,
spatial_window_radius=samples[0][i],
color_window_radius=samples[1][i]
)
return batch
# Added in 0.4.0.
def _draw_samples(self, batch, random_state):
nb_rows = batch.nb_rows
return (
self.spatial_window_radius.draw_samples((nb_rows,),
random_state=random_state),
self.color_window_radius.draw_samples((nb_rows,),
random_state=random_state)
)
# Added in 0.4.0.
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.spatial_window_radius, self.color_window_radius]
| aleju/ImageAugmenter | imgaug/augmenters/blur.py | Python | mit | 52,323 | [
"Gaussian"
] | 46b5f833e6c26196f2291090878ba9832965f6cd22257846fcbc844a163037a5 |
"""CatBoost coding"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
import category_encoders.utils as util
from sklearn.utils.random import check_random_state
__author__ = 'Jan Motl'
class CatBoostEncoder(BaseEstimator, util.TransformerWithTargetMixin):
"""CatBoost coding for categorical features.
Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper.
This is very similar to leave-one-out encoding, but calculates the
values "on-the-fly". Consequently, the values naturally vary
during the training phase and it is not necessary to add random noise.
Beware, the training data have to be randomly permutated. E.g.:
# Random permutation
perm = np.random.permutation(len(X))
X = X.iloc[perm].reset_index(drop=True)
y = y.iloc[perm].reset_index(drop=True)
This is necessary because some data sets are sorted based on the target
value and this coder encodes the features on-the-fly in a single pass.
Parameters
----------
verbose: int
integer indicating verbosity of the output. 0 for none.
cols: list
a list of columns to encode, if None, all string columns will be encoded.
drop_invariant: bool
boolean for whether or not to drop columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
handle_missing: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
handle_unknown: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
sigma: float
adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched).
sigma gives the standard deviation (spread or "width") of the normal distribution.
a: float
additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> bunch = load_boston()
>>> y = bunch.target
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null float64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null float64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(13)
memory usage: 51.5 KB
None
References
----------
.. [1] Transforming categorical features to numerical features, from
https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/
.. [2] CatBoost: unbiased boosting with categorical features, from
https://arxiv.org/abs/1706.09516
"""
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True,
handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1):
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.verbose = verbose
self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X
self.cols = cols
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self._mean = None
self.random_state = random_state
self.sigma = sigma
self.feature_names = None
self.a = a
def fit(self, X, y, **kwargs):
"""Fit encoder according to X and y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# unite the input into pandas types
X, y = util.convert_inputs(X, y)
self._dim = X.shape[1]
# if columns aren't passed, just use every string column
if self.use_default_cols:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
categories = self._fit(
X, y,
cols=self.cols
)
self.mapping = categories
X_temp = self.transform(X, y, override_return_df=True)
self.feature_names = X_temp.columns.tolist()
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self
def transform(self, X, y=None, override_return_df=False):
"""Perform the transformation to new categorical data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples] when transform by leave one out
None, when transform without target information (such as transform test set)
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
if self._dim is None:
raise ValueError('Must train encoder before it can be used to transform data.')
# unite the input into pandas types
X, y = util.convert_inputs(X, y)
# then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,))
if not list(self.cols):
return X
X = self._transform(
X, y,
mapping=self.mapping
)
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df or override_return_df:
return X
else:
return X.values
def _fit(self, X_in, y, cols=None):
X = X_in.copy(deep=True)
if cols is None:
cols = X.columns.values
self._mean = y.mean()
return {col: self._fit_column_map(X[col], y) for col in cols}
def _fit_column_map(self, series, y):
category = pd.Categorical(series)
categories = category.categories
codes = category.codes.copy()
codes[codes == -1] = len(categories)
categories = np.append(categories, np.nan)
return_map = pd.Series(dict([(code, category) for code, category in enumerate(categories)]))
result = y.groupby(codes).agg(['sum', 'count'])
return result.rename(return_map)
def _transform(self, X_in, y, mapping=None):
"""
The model uses a single column of floats to represent the means of the target variables.
"""
X = X_in.copy(deep=True)
random_state_ = check_random_state(self.random_state)
# Prepare the data
if y is not None:
# Convert bools to numbers (the target must be summable)
y = y.astype('double')
for col, colmap in mapping.items():
level_notunique = colmap['count'] > 1
unique_train = colmap.index
unseen_values = pd.Series([x for x in X_in[col].unique() if x not in unique_train], dtype=unique_train.dtype)
is_nan = X_in[col].isnull()
is_unknown_value = X_in[col].isin(unseen_values.dropna().astype(object))
if self.handle_unknown == 'error' and is_unknown_value.any():
raise ValueError('Columns to be encoded can not contain new values')
if y is None: # Replace level with its mean target; if level occurs only once, use global mean
level_means = ((colmap['sum'] + self._mean) / (colmap['count'] + self.a)).where(level_notunique, self._mean)
X[col] = X[col].map(level_means)
else:
# Simulation of CatBoost implementation, which calculates leave-one-out on the fly.
# The nice thing about this is that it helps to prevent overfitting. The bad thing
# is that CatBoost uses many iterations over the data. But we run just one iteration.
# Still, it works better than leave-one-out without any noise.
# See:
# https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/
# Cumsum does not work nicely with None (while cumcount does).
# As a workaround, we cast the grouping column as string.
# See: issue #209
temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount'])
X[col] = (temp['cumsum'] - y + self._mean) / (temp['cumcount'] + self.a)
if self.handle_unknown == 'value':
if X[col].dtype.name == 'category':
X[col] = X[col].astype(float)
X.loc[is_unknown_value, col] = self._mean
elif self.handle_unknown == 'return_nan':
X.loc[is_unknown_value, col] = np.nan
if self.handle_missing == 'value':
X.loc[is_nan & unseen_values.isnull().any(), col] = self._mean
elif self.handle_missing == 'return_nan':
X.loc[is_nan, col] = np.nan
if self.sigma is not None and y is not None:
X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0])
return X
def get_feature_names(self):
"""
Returns the names of all transformed / added columns.
Returns
-------
feature_names: list
A list with all feature names transformed or added.
Note: potentially dropped features are not included!
"""
if not isinstance(self.feature_names, list):
raise ValueError('Must fit data first. Affected feature names are not known before.')
else:
return self.feature_names
| wdm0006/categorical_encoding | category_encoders/cat_boost.py | Python | bsd-3-clause | 11,562 | [
"Gaussian"
] | 7b2bc746f0819e309e0204bef9d7ba2849d330d92d1ad0ed0241dda75ec515c6 |
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit https://code.google.com/p/feedparser/ for the latest version
Visit http://packages.python.org/feedparser/ for the latest documentation
Required: Python 2.4 or later
Recommended: iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.1.2"
__license__ = """
Copyright (c) 2010-2012 Kurt McKee <contactme@kurtmckee.org>
Copyright (c) 2002-2008 Mark Pilgrim
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>"]
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# If you want feedparser to automatically parse microformat content embedded
# in entry contents, set this to 1
PARSE_MICROFORMATS = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
try:
# Python 3.1 introduces bytes.maketrans and simultaneously
# deprecates string.maketrans; use bytes.maketrans if possible
_maketrans = bytes.maketrans
except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
try:
if bytes is str:
# In Python 2.5 and below, bytes doesn't exist (NameError)
# In Python 2.6 and above, bytes and str are the same type
raise NameError
except NameError:
# Python 2
def _s2bytes(s):
return s
def _l2bytes(l):
return ''.join(map(chr, l))
else:
# Python 3
def _s2bytes(s):
return bytes(s, 'utf8')
def _l2bytes(l):
return bytes(l)
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import cgi
import codecs
import copy
import datetime
import re
import struct
import time
import types
import urllib
import urllib2
import urlparse
import warnings
from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
from xml.sax.saxutils import escape as _xmlescape
except ImportError:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
else:
try:
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
except xml.sax.SAXReaderNotAvailable:
_XML_AVAILABLE = 0
else:
_XML_AVAILABLE = 1
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing, content santizing, and
# microformat support (at least while feedparser depends on BeautifulSoup).
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
# iconv_codec provides support for more character encodings.
# It's available from http://cjkpython.i18n.org/
try:
import iconv_codec
except ImportError:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
except ImportError:
chardet = None
# BeautifulSoup is used to extract microformat content from HTML
# feedparser is tested using BeautifulSoup 3.2.0
# http://www.crummy.com/software/BeautifulSoup/
try:
import BeautifulSoup
except ImportError:
BeautifulSoup = None
PARSE_MICROFORMATS = False
try:
# the utf_32 codec was introduced in Python 2.6; it's necessary to
# check this as long as feedparser supports Python 2.4 and 2.5
codecs.lookup('utf_32')
except LookupError:
_UTF32_AVAILABLE = False
else:
_UTF32_AVAILABLE = True
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
SUPPORTED_VERSIONS = {'': u'unknown',
'rss090': u'RSS 0.90',
'rss091n': u'RSS 0.91 (Netscape)',
'rss091u': u'RSS 0.91 (Userland)',
'rss092': u'RSS 0.92',
'rss093': u'RSS 0.93',
'rss094': u'RSS 0.94',
'rss20': u'RSS 2.0',
'rss10': u'RSS 1.0',
'rss': u'RSS (unknown version)',
'atom01': u'Atom 0.1',
'atom02': u'Atom 0.2',
'atom03': u'Atom 0.3',
'atom10': u'Atom 1.0',
'atom': u'Atom (unknown version)',
'cdf': u'CDF',
}
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError("object doesn't have key 'category'")
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']==u'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if not dict.__contains__(self, 'updated') and \
dict.__contains__(self, 'published'):
#warnings.warn("To avoid breaking existing software while "
# "fixing issue 310, a temporary mapping has been created "
# "from `updated` to `published` if `updated` doesn't "
# "exist. This fallback will be removed in a future version "
# "of feedparser.", DeprecationWarning)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if not dict.__contains__(self, 'updated_parsed') and \
dict.__contains__(self, 'published_parsed'):
#warnings.warn("To avoid breaking existing software while "
# "fixing issue 310, a temporary mapping has been created "
# "from `updated_parsed` to `published_parsed` if "
# "`updated_parsed` doesn't exist. This fallback will be "
# "removed in a future version of feedparser.",
# DeprecationWarning)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError("object has no attribute '%s'" % key)
def __hash__(self):
return id(self)
_cp1252 = {
128: unichr(8364), # euro sign
130: unichr(8218), # single low-9 quotation mark
131: unichr( 402), # latin small letter f with hook
132: unichr(8222), # double low-9 quotation mark
133: unichr(8230), # horizontal ellipsis
134: unichr(8224), # dagger
135: unichr(8225), # double dagger
136: unichr( 710), # modifier letter circumflex accent
137: unichr(8240), # per mille sign
138: unichr( 352), # latin capital letter s with caron
139: unichr(8249), # single left-pointing angle quotation mark
140: unichr( 338), # latin capital ligature oe
142: unichr( 381), # latin capital letter z with caron
145: unichr(8216), # left single quotation mark
146: unichr(8217), # right single quotation mark
147: unichr(8220), # left double quotation mark
148: unichr(8221), # right double quotation mark
149: unichr(8226), # bullet
150: unichr(8211), # en dash
151: unichr(8212), # em dash
152: unichr( 732), # small tilde
153: unichr(8482), # trade mark sign
154: unichr( 353), # latin small letter s with caron
155: unichr(8250), # single right-pointing angle quotation mark
156: unichr( 339), # latin small ligature oe
158: unichr( 382), # latin small letter z with caron
159: unichr( 376), # latin capital letter y with diaeresis
}
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
#try:
if not isinstance(uri, unicode):
uri = uri.decode('utf-8', 'ignore')
uri = urlparse.urljoin(base, uri)
if not isinstance(uri, unicode):
return uri.decode('utf-8', 'ignore')
return uri
#except:
# uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
# return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
}
_matchnamespaces = {}
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
html_types = [u'text/html', u'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or u''
self.lang = baselang or None
self.svgOK = 0
self.title_depth = -1
self.depth = 0
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
v = v.replace('&', '&')
if not isinstance(v, unicode):
v = v.decode('utf-8')
return (k, v)
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = map(self._normalize_attributes, attrs)
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if not isinstance(baseuri, unicode):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == u'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = u'text/plain'
elif contentType == 'html':
contentType = u'text/html'
elif contentType == 'xhtml':
contentType = u'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = u'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = u'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = u'atom10'
if loweruri.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = u'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or u'', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, unicode):
pieces[i] = v.decode('utf-8')
output = u''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = u'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if PARSE_MICROFORMATS and is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
if self.encoding and not isinstance(output, unicode):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if isinstance(output, unicode):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)):
return
# all entities must have been defined as valid HTML entities
if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith(u'text/'):
return 0
if self.contentparams['type'].endswith(u'+xml'):
return 0
if self.contentparams['type'].endswith(u'/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': u'rss091u',
'0.92': u'rss092',
'0.93': u'rss093',
'0.94': u'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith(u'rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = u'rss20'
else:
self.version = u'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': u'atom01',
'0.2': u'atom02',
'0.3': u'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = u'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = u'%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, u'')
author = author.replace(u'()', u'')
author = author.replace(u'<>', u'')
author = author.replace(u'<>', u'')
author = author.strip()
if author and (author[0] == u'('):
author = author[1:]
if author and (author[-1] == u')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, u'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, u'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._addTag(term.strip(), u'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', u'alternate')
if attrsD['rel'] == u'self':
attrsD.setdefault('type', u'application/atom+xml')
else:
attrsD.setdefault('type', u'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if 'href' in attrsD:
expectingText = 0
if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
self.title_depth = self.depth
_end_dc_title = _end_title
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_description(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, u'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = u'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, u'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, u'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
elif attrsD.get('url'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = u'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
raise UndeclaredNamespace("'%s' is not associated with a namespace" % givenprefix)
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
])
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
goahead.func_code = sgmllib.SGMLParser.goahead.func_code
def __parse_starttag(self, i):
pass
__parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + u'_INVALID_PYTHON_3'
except NameError:
if self.encoding and isinstance(data, unicode):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if not isinstance(value, unicode):
value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs = strattrs.encode(self.encoding)
except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if ref in name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', u'xml').endswith(u'xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = set(['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'])
known_binary_extensions = set(['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'])
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if isinstance(data, unicode):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if isinstance(s, basestring):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)}
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple:
return []
elif iPropertyType == self.STRING:
return ''
elif iPropertyType == self.DATE:
return None
elif iPropertyType == self.URI:
return ''
elif iPropertyType == self.NODE:
return None
else:
return None
arValues = []
for elmResult in arResults:
sValue = None
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a':
sValue = elmResult.get('href')
elif sNodeName == 'img':
sValue = elmResult.get('src')
elif sNodeName == 'object':
sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or u''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
# Completely remove the agent element from the parse tree
elmAgent.extract()
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard']
# XXX - this is super ugly; properly fix this with issue 148
for i, s in enumerate(arLines):
if not isinstance(s, unicode):
arLines[i] = s.decode('utf-8', 'ignore')
sVCards += u'\n'.join(arLines) + u'\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if 'href' not in attrsD:
return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1:
return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href:
continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
if segments:
tag = segments.pop()
else:
# there are no tags
continue
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', u'')) and not self.isProbablyDownloadable(elm):
continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', u'').split()
xfn_rels = [r for r in rels if r in self.known_xfn_relationships]
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup:
return
try:
p = _MicroformatsParser(htmlSource, baseURI, encoding)
except UnicodeEncodeError:
# sgmllib throws this exception when performing lookups of tags
# with non-ASCII characters in them.
return
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
try:
return _urljoin(base, rel or u'')
except ValueError:
return u''
if not base:
return rel or u''
if not rel:
try:
scheme = urlparse.urlparse(base)[0]
except ValueError:
return u''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
try:
uri = _urljoin(base, rel)
except ValueError:
return u''
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
'xml:lang'])
unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
acceptable_css_properties = set(['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width'])
# survey of common keywords found in feeds
acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow'])
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics'])
mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use'])
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan'])
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity'])
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda n,v: n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == u'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = isinstance(data, unicode)
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
code, msg, hdrs)
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib2.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
if base64 is None or 'Authorization' not in req.headers \
or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = _base64decode(req.headers['Authorization'].split(' ')[1])
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if isinstance(url_file_stream_or_string, basestring) \
and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
if isinstance(url_file_stream_or_string, unicode):
url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except (IOError, UnicodeEncodeError, TypeError):
# if url_file_stream_or_string is a unicode object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
# treat url_file_stream_or_string as string
if isinstance(url_file_stream_or_string, unicode):
return _StringIO(url_file_stream_or_string.encode('utf-8'))
return _StringIO(url_file_stream_or_string)
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = u''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, basestring):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
# Modified to also support MSSQL-style datetimes as defined at:
# http://msdn.microsoft.com/en-us/library/ms186724.aspx
# (which basically means allowing a space as a date/time/timezone separator)
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?'
'|(?P<julian>\d\d\d)))?')
__tzd_re = ' ?(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)?'
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?'
+ __tzd_re)
__datetime_re = '%s(?:[T ]%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString):
return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0:
return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
# Define the strings used by the RFC822 datetime parser
_rfc822_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
_rfc822_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# Only the first three letters of the month name matter
_rfc822_month = "(?P<month>%s)(?:[a-z]*,?)" % ('|'.join(_rfc822_months))
# The year may be 2 or 4 digits; capture the century if it exists
_rfc822_year = "(?P<year>(?:\d{2})?\d{2})"
_rfc822_day = "(?P<day> *\d{1,2})"
_rfc822_date = "%s %s %s" % (_rfc822_day, _rfc822_month, _rfc822_year)
_rfc822_hour = "(?P<hour>\d{2}):(?P<minute>\d{2})(?::(?P<second>\d{2}))?"
_rfc822_tz = "(?P<tz>ut|gmt(?:[+-]\d{2}:\d{2})?|[aecmp][sd]?t|[zamny]|[+-]\d{4})"
_rfc822_tznames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
# The timezone may be prefixed by 'Etc/'
_rfc822_time = "%s (?:etc/)?%s" % (_rfc822_hour, _rfc822_tz)
_rfc822_dayname = "(?P<dayname>%s)" % ('|'.join(_rfc822_daynames))
_rfc822_match = re.compile(
"(?:%s, )?%s(?: %s)?" % (_rfc822_dayname, _rfc822_date, _rfc822_time)
).match
def _parse_date_rfc822(dt):
"""Parse RFC 822 dates and times, with one minor
difference: years may be 4DIGIT or 2DIGIT.
http://tools.ietf.org/html/rfc822#section-5"""
try:
m = _rfc822_match(dt.lower()).groupdict(0)
except AttributeError:
return None
# Calculate a date and timestamp
for k in ('year', 'day', 'hour', 'minute', 'second'):
m[k] = int(m[k])
m['month'] = _rfc822_months.index(m['month']) + 1
# If the year is 2 digits, assume everything in the 90's is the 1990's
if m['year'] < 100:
m['year'] += (1900, 2000)[m['year'] < 90]
stamp = datetime.datetime(*[m[i] for i in
('year', 'month', 'day', 'hour', 'minute', 'second')])
# Use the timezone information to calculate the difference between
# the given date and timestamp and Universal Coordinated Time
tzhour = 0
tzmin = 0
if m['tz'] and m['tz'].startswith('gmt'):
# Handle GMT and GMT+hh:mm timezone syntax (the trailing
# timezone info will be handled by the next `if` block)
m['tz'] = ''.join(m['tz'][3:].split(':')) or 'gmt'
if not m['tz']:
pass
elif m['tz'].startswith('+'):
tzhour = int(m['tz'][1:3])
tzmin = int(m['tz'][3:])
elif m['tz'].startswith('-'):
tzhour = int(m['tz'][1:3]) * -1
tzmin = int(m['tz'][3:]) * -1
else:
tzhour = _rfc822_tznames[m['tz']]
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in UTC
return (stamp - delta).utctimetuple()
registerDateHandler(_parse_date_rfc822)
def _parse_date_asctime(dt):
"""Parse asctime-style dates"""
dayname, month, day, remainder = dt.split(None, 3)
# Convert month and day into zero-padded integers
month = '%02i ' % (_rfc822_months.index(month.lower()) + 1)
day = '%02i ' % (int(day),)
dt = month + day + remainder
return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, )
registerDateHandler(_parse_date_asctime)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). iconv_codec can help a lot;
you should definitely install it if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
charset = params.get('charset', '').replace("'", "")
if not isinstance(charset, unicode):
charset = charset.decode('utf-8', 'ignore')
return content_type, charset
sniffed_xml_encoding = u''
xml_encoding = u''
true_encoding = u''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == _l2bytes([0x4c, 0x6f, 0xa7, 0x94]):
# In all forms of EBCDIC, these four bytes correspond
# to the string '<?xm'; try decoding using CP037
sniffed_xml_encoding = u'cp037'
xml_data = xml_data.decode('cp037').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x00, 0x3c, 0x00, 0x3f]):
# UTF-16BE
sniffed_xml_encoding = u'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xfe, 0xff])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])):
# UTF-16BE with BOM
sniffed_xml_encoding = u'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x3f, 0x00]):
# UTF-16LE
sniffed_xml_encoding = u'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xff, 0xfe])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])):
# UTF-16LE with BOM
sniffed_xml_encoding = u'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x00, 0x00, 0x00, 0x3c]):
# UTF-32BE
sniffed_xml_encoding = u'utf-32be'
if _UTF32_AVAILABLE:
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x00, 0x00]):
# UTF-32LE
sniffed_xml_encoding = u'utf-32le'
if _UTF32_AVAILABLE:
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]):
# UTF-32BE with BOM
sniffed_xml_encoding = u'utf-32be'
if _UTF32_AVAILABLE:
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]):
# UTF-32LE with BOM
sniffed_xml_encoding = u'utf-32le'
if _UTF32_AVAILABLE:
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == _l2bytes([0xef, 0xbb, 0xbf]):
# UTF-8 with BOM
sniffed_xml_encoding = u'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')).match(xml_data)
except UnicodeDecodeError:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
if sniffed_xml_encoding and (xml_encoding in (u'iso-10646-ucs-2', u'ucs-2', u'csunicode', u'iso-10646-ucs-4', u'ucs-4', u'csucs4', u'utf-16', u'utf-32', u'utf_16', u'utf_32', u'utf16', u'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = (u'application/xml', u'application/xml-dtd', u'application/xml-external-parsed-entity')
text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith(u'application/') and http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or u'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith(u'text/')) and http_content_type.endswith(u'+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or u'us-ascii'
elif http_content_type.startswith(u'text/'):
true_encoding = http_encoding or u'us-ascii'
elif http_headers and 'content-type' not in http_headers:
true_encoding = xml_encoding or u'iso-8859-1'
else:
true_encoding = xml_encoding or u'utf-8'
# some feeds claim to be gb2312 but are actually gb18030.
# apparently MSIE and Firefox both do the following switch:
if true_encoding.lower() == u'gb2312':
true_encoding = u'gb18030'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == _l2bytes([0xfe, 0xff])) and (data[2:4] != _l2bytes([0x00, 0x00])):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == _l2bytes([0xff, 0xfe])) and (data[2:4] != _l2bytes([0x00, 0x00])):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == _l2bytes([0xef, 0xbb, 0xbf]):
encoding = 'utf-8'
data = data[3:]
elif data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]):
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]):
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head,data = data[:start+1], data[start+1:]
entity_pattern = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
entity_results=entity_pattern.findall(head)
head = entity_pattern.sub(_s2bytes(''), head)
doctype_pattern = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
doctype_results = doctype_pattern.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if doctype.lower().count(_s2bytes('netscape')):
version = u'rss091n'
else:
version = None
# only allow in 'safe' inline entity definitions
replacement=_s2bytes('')
if len(doctype_results)==1 and entity_results:
safe_pattern=re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
if safe_entities:
replacement=_s2bytes('<!DOCTYPE feed [\n <!ENTITY') + _s2bytes('>\n <!ENTITY ').join(safe_entities) + _s2bytes('>\n]>')
data = doctype_pattern.sub(replacement, head) + data
return version, data, dict(replacement and [(k.decode('utf-8'), v.decode('utf-8')) for k, v in safe_pattern.findall(replacement)])
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
if handlers is None:
handlers = []
if request_headers is None:
request_headers = {}
if response_headers is None:
response_headers = {}
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception as e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# lowercase all of the HTTP headers for comparisons per RFC 2616
if 'headers' in result:
http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
else:
http_headers = {}
# if feed is gzip-compressed, decompress it
if f and data and http_headers:
if gzip and 'gzip' in http_headers.get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except (IOError, struct.error) as e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = 1
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error as e:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error as e:
result['bozo'] = 1
result['bozo_exception'] = e
# save HTTP headers
if http_headers:
if 'etag' in http_headers:
etag = http_headers.get('etag', u'')
if not isinstance(etag, unicode):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in http_headers:
modified = http_headers.get('last-modified', u'')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if hasattr(f, 'url'):
if not isinstance(f.url, unicode):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
if data is None:
return result
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
# ensure that baseuri is an absolute uri using an acceptable URI scheme
contentloc = http_headers.get('content-location', u'')
href = result.get('href', u'')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', None)
if not isinstance(baselang, unicode) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
# if server sent 304, we're done
if getattr(f, 'code', 0) == 304:
result['version'] = u''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if data is None:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = use_strict_parser = 1
break
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
proposed_encoding = unicode(chardet.detect(data)['encoding'], 'ascii', 'ignore')
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = use_strict_parser = 1
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and (u'utf-8' not in tried_encodings):
proposed_encoding = u'utf-8'
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except UnicodeDecodeError:
pass
else:
known_encoding = use_strict_parser = 1
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and (u'windows-1252' not in tried_encodings):
proposed_encoding = u'windows-1252'
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except UnicodeDecodeError:
pass
else:
known_encoding = use_strict_parser = 1
# if still no luck and we haven't tried iso-8859-2 yet, try that.
if (not known_encoding) and (u'iso-8859-2' not in tried_encodings):
proposed_encoding = u'iso-8859-2'
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except UnicodeDecodeError:
pass
else:
known_encoding = use_strict_parser = 1
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = u''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'document declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
result['version'], data, entities = _stripDoctype(data)
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXParseException as e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser and _SGML_AVAILABLE:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
| krishna11888/ai | third_party/pattern/pattern/web/feed/feedparser.py | Python | gpl-2.0 | 167,919 | [
"NetCDF",
"VisIt"
] | c942727ad968e416cc04eaece06a6193fee1665d9cbf5340d263d34f9577e9c5 |
# -*- coding: utf-8 -*-
import socket
import platform
import os
import os.path
import click
from turbo_markdown import run_server
def port_is_used(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if sock.connect_ex(('localhost', port)) == 0:
return True
return False
@click.command()
def main(args=None):
"""Console script for turbo_markdown"""
abspath = os.path.abspath('.')
port = 8888
while True:
if not port_is_used(port):
break
port += 1
url = 'http://localhost:%s' % port
if platform.system() == 'Darwin':
os.system('open %s' % url)
else:
click.echo("Open your brower visit %s" % url)
run_server(abspath, port)
if __name__ == "__main__":
main()
| wecatch/turbo-markdown | turbo_markdown/cli.py | Python | mit | 775 | [
"VisIt"
] | 062c047d4b408d25e8bb5a76f6be8cfa4a0bb7e5db37ba6fbd1d77ed5b0e2c59 |
from __future__ import print_function
import numpy as np
import scipy
import pyqtgraph as pg
from acq4.analysis.tools import functions as afn
from acq4.util import Qt
Ui_Form = Qt.importTemplate('.MapConvolverTemplate')
class MapConvolver(Qt.QWidget):
sigOutputChanged = Qt.Signal(object, object)
sigFieldsChanged = Qt.Signal(object)
def __init__(self, parent=None, filePath=None, data=None):
Qt.QWidget.__init__(self, parent)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.ui.spacingSpin.setOpts(suffix='m', value=5e-6, siPrefix=True, dec=False, step=1e-6)
self.addBtn = Qt.QPushButton('Add New')
item = Qt.QTreeWidgetItem()
self.ui.tree.addTopLevelItem(item)
self.ui.tree.setItemWidget(item, 0, self.addBtn)
self.items = []
self.filePath = filePath
self.data = data
self.output = None
self._availableFields = None ## a list of fieldnames that are available for coloring/contouring
self.ui.processBtn.hide()
self.addBtn.clicked.connect(self.addItem)
self.ui.processBtn.clicked.connect(self.processClicked)
def setData(self, data):
self.data = data
fields = []
#self.blockSignals = True
try:
self.blockSignals(True)
for i in self.items:
fields.append(i.getParamName())
i.updateParamCombo(data.dtype.names)
finally:
self.blockSignals(False)
newFields = [i.getParamName() for i in self.items]
if fields != newFields:
self.fieldsChanged()
#self.blockSignals = False
self.process()
def addItem(self):
item = ConvolverItem(self)
self.ui.tree.insertTopLevelItem(self.ui.tree.topLevelItemCount()-1, item)
item.postAdd()
self.items.append(item)
#self.blockSignals = True
if self.data is not None:
item.updateParamCombo(self.data.dtype.names)
self.fieldsChanged()
#self.blockSignals = False
def remClicked(self, item):
#item = self.ui.tree.currentItem()
if item is None:
return
self.remItem(item)
#self.emitChanged()
def remItem(self, item):
index = self.ui.tree.indexOfTopLevelItem(item)
self.ui.tree.takeTopLevelItem(index)
self.items.remove(item)
self.fieldsChanged()
def fieldsChanged(self):
#if self.blockSignals:
# return
fields = []
for i in self.items:
fields.append(i.getParamName())
self._availableFields = fields
self.sigFieldsChanged.emit(fields)
def getFields(self):
return self._availableFields
def itemChanged(self):
self.process()
def processClicked():
self.process()
def process(self):
if self.data == None:
return
if len(self.items) == 0:
return
params = {}
spacing = self.ui.spacingSpin.value()
for i in self.items:
if str(i.convolutionCombo.currentText()) == "Gaussian convolution":
params[str(i.paramCombo.currentText())] = {'sigma':i.sigmaSpin.value()}
elif str(i.convolutionCombo.currentText()) == "interpolation":
params[str(i.paramCombo.currentText())]= {'mode':i.modeCombo.currentText()}
else:
pass
arr = MapConvolver.convolveMaptoImage(self.data, params, spacing=spacing)
arrs = MapConvolver.interpolateMapToImage(self.data, params, spacing)
dtype = arr.dtype.descr
for p in arrs.keys():
if p not in arr.dtype.names:
dtype.append((p, float))
self.output = np.zeros(arr.shape, dtype=dtype)
self.output[:] = arr
for p in arrs:
self.output[p] = arrs[p]
self.sigOutputChanged.emit(self.output, spacing)
@staticmethod
def interpolateMapToImage(data, params, spacing=0.000005):
"""Function for interpolating a list of stimulation spots and their associated values into a fine-scale smoothed image.
data - a numpy record array which includes fields for 'xPos', 'yPos' and the parameters specified in params.
params - a dict of parameters to project and their corresponding interpolation modes. Mode options are:
'nearest', 'linear', 'cubic' (see documentation for scipy.interpolate.griddata)
ex: {'postCharge': {'mode':'nearest'}, 'dirCharge':{'mode':'cubic'}}
spacing - the size of each pixel in the returned grids (default is 5um)
"""
xmin = data['xPos'].min()
ymin = data['yPos'].min()
xdim = int((data['xPos'].max()-xmin)/spacing)+5
ydim = int((data['yPos'].max()-ymin)/spacing)+5
pts = np.array([data['xPos'], data['yPos']], dtype=float)
pts[0] = pts[0]-xmin
pts[1] = pts[1]-ymin
pts = pts.transpose()/spacing
xi = np.indices((xdim, ydim))
xi = xi.transpose(1,2,0)
arrs = {}
for p in params:
if 'mode' in params[p].keys():
arrs[p] = scipy.interpolate.griddata(pts, data[p], xi, method=params[p]['mode']) ## griddata function hangs when method='linear' in scipy versions earlier than 0.10.0
arrs[p][np.isnan(arrs[p])] = 0
return arrs
@staticmethod
def convolveMaptoImage(data, params, spacing=5e-6):
"""Function for converting a list of stimulation spots and their associated values into a fine-scale smoothed image using a gaussian convolution.
data - a numpy record array which includes fields for 'xPos', 'yPos' and the parameters specified in params.
params - a dict of parameters to project and their corresponding convolution kernels - if 'sigma' is specified it will be used
as the stdev of a gaussian kernel, otherwise a custom kernel can be specified.
ex: {'postCharge': {'sigma':80e-6}, 'dirCharge':{'kernel': ndarray to use as the convolution kernel}}
spacing - the size of each pixel in the returned grid (default is 5um)
"""
#arr = data
arr = afn.convertPtsToSparseImage(data, list(params.keys()), spacing)
## convolve image using either given kernel or gaussian kernel with sigma=sigma
for p in params:
if 'mode' in params[p].keys():
continue
elif params[p].get('kernel', None) is None:
if params[p].get('sigma', None) is None:
raise Exception("Please specify either a kernel to use for convolution, or sigma for a gaussian kernel for %s param." %p)
arr[p] = scipy.ndimage.filters.gaussian_filter(arr[p], int(params[p]['sigma']/spacing))
else:
raise Exception("Convolving by a non-gaussian kernel is not yet supported.")
#arr[p] = scipy.ndimage.filters.convolve(arr[p], params[p]['kernel'])
return arr
class ConvolverItem(Qt.QTreeWidgetItem):
def __init__(self, mc):
self.mc = mc
Qt.QTreeWidgetItem.__init__(self)
self.paramCombo = pg.ComboBox()
self.convolutionCombo = pg.ComboBox(items=["Gaussian convolution", "interpolation"], default="Gaussian convolution")
#self.convolutionCombo.addItems(["Gaussian convolution", "interpolation"])
self.sigmaSpin = pg.SpinBox(value=45e-6, siPrefix=True, suffix='m', dec=True, step=0.1)
self.modeCombo = pg.ComboBox(items=['nearest', 'linear', 'cubic'], default='nearest')
#self.modeCombo.addItems(['nearest', 'linear', 'cubic'])
self.modeCombo.setEnabled(False)
self.remBtn = Qt.QPushButton('Remove')
self.remBtn.clicked.connect(self.delete)
self.paramCombo.currentIndexChanged.connect(self.mc.fieldsChanged)
self.convolutionCombo.currentIndexChanged.connect(self.methodChanged)
self.paramCombo.currentIndexChanged.connect(self.itemChanged)
self.sigmaSpin.sigValueChanged.connect(self.itemChanged)
self.modeCombo.currentIndexChanged.connect(self.itemChanged)
def postAdd(self):
t = self.treeWidget()
#self.setText(0, "-")
t.setItemWidget(self, 0, self.paramCombo)
t.setItemWidget(self, 1, self.convolutionCombo)
t.setItemWidget(self, 2, self.sigmaSpin)
t.setItemWidget(self, 3, self.modeCombo)
t.setItemWidget(self, 4, self.remBtn)
def itemChanged(self):
self.mc.itemChanged()
def delete(self):
self.mc.remClicked(self)
def getParamName(self):
return str(self.paramCombo.currentText())
def updateParamCombo(self, paramList):
#prev = str(self.paramCombo.currentText())
#self.paramCombo.clear()
#for p in paramList:
#self.paramCombo.addItem(p)
#if p == prev:
#self.paramCombo.setCurrentIndex(self.paramCombo.count()-1)
self.paramCombo.updateList(paramList)
def methodChanged(self):
method = str(self.convolutionCombo.currentText())
if method == 'Gaussian convolution':
self.sigmaSpin.setEnabled(True)
self.modeCombo.setEnabled(False)
elif method == 'interpolation':
self.sigmaSpin.setEnabled(False)
self.modeCombo.setEnabled(True)
self.itemChanged() | acq4/acq4 | acq4/analysis/modules/MapImager/MapConvolver.py | Python | mit | 10,010 | [
"Gaussian"
] | 1baa890785355bdec1fe10d09aeaae169eb32bad1384a04912de3c7c1b52f415 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import division, absolute_import
import numpy as np
from numpy.testing import (
assert_,
assert_array_almost_equal,
)
from MDAnalysisTests.datafiles import MMTF, MMTF_gz
from MDAnalysis.coordinates.MMTF import MMTFReader
class TestMMTFReader(object):
def setUp(self):
self.r = MMTFReader(MMTF)
def tearDown(self):
del self.r
def test_read_frame_size(self):
assert_(self.r.ts.n_atoms == 512)
def test_read_positions(self):
assert_array_almost_equal(self.r.ts.positions[0],
np.array([-0.798, 12.632, 23.231]),
decimal=4)
assert_array_almost_equal(self.r.ts.positions[-1],
np.array([10.677, 15.517, 11.1]),
decimal=4)
def test_velocities(self):
assert_(not self.r.ts.has_velocities)
def test_forces(self):
assert_(not self.r.ts.has_forces)
def test_len(self):
# should be single frame
assert_(len(self.r) == 1)
class TestMMTFReaderGZ(object):
def setUp(self):
self.r = MMTFReader(MMTF_gz)
def tearDown(self):
del self.r
def test_read_frame_size(self):
assert_(self.r.ts.n_atoms == 1140)
def test_read_positions(self):
assert_array_almost_equal(self.r.ts.positions[0],
np.array([38.428, 16.440, 28.841]),
decimal=4)
assert_array_almost_equal(self.r.ts.positions[-1],
np.array([36.684, 27.024, 20.468]),
decimal=4)
def test_velocities(self):
assert_(not self.r.ts.has_velocities)
def test_forces(self):
assert_(not self.r.ts.has_forces)
def test_len(self):
# should be single frame
assert_(len(self.r) == 1)
| kain88-de/mdanalysis | testsuite/MDAnalysisTests/coordinates/test_mmtf.py | Python | gpl-2.0 | 2,953 | [
"MDAnalysis"
] | 5a2800bb5e7bedc427c6f28967c988e8d31b7dc33b6f91e7cc8de53fb5d7b87d |
#!/usr/bin/env python
"""
LookupTable
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.2 $
$Date: 2001-05-31 17:48:54 $
Pearu Peterson
"""
__version__ = "$Id: LookupTable.py,v 1.2 2001-05-31 17:48:54 pearu Exp $"
from . import common
from . import DataSetAttr
class LookupTable(DataSetAttr.DataSetAttr):
"""Holds VTK LookupTable.
Usage:
LookupTable(<sequence of 4-sequences> ,name = <string>)
Attributes:
table
name
Public methods:
get_size()
to_string(format = 'ascii')
"""
def __init__(self,table,name=None):
self.name = self._get_name(name)
self.table = self.get_n_seq_seq(table,[0,0,0,0])
if len(self.table[0])!=4:
raise ValueError('expected sequence of 4-sequences but got %s'%(len(self.table[0])))
def to_string(self,format='ascii'):
ret = ['LOOKUP_TABLE %s %s'%(self.name,len(self.table))]
seq = self.table
if format=='binary':
if not common.is_int255(seq):
seq = self.float01_to_int255(seq)
ret.append(self.seq_to_string(seq,format,'unsigned char'))
else:
if not common.is_float01(seq):
seq = self.int255_to_float01(seq)
ret.append(self.seq_to_string(seq,format,'float'))
return '\n'.join(ret)
def get_size(self):
return len(self.table)
def lookup_table_fromfile(f,n,sl):
tablename = sl[0]
size = eval(sl[1])
table = []
while len(table)<4*size:
table += list(map(eval,common._getline(f).split(' ')))
assert len(table) == 4*size
table2 = []
for i in range(0,len(table),4):
table2.append(table[i:i+4])
return LookupTable(table2,tablename)
if __name__ == "__main__":
print(LookupTable([[3,3],[4,3],240,3,2]).to_string())
| ddempsey/PyFEHM | pyvtk/LookupTable.py | Python | lgpl-2.1 | 2,119 | [
"VTK"
] | f87663237c96dca12ed3e29db89b4d8ed3a4252e358bfc160cef3148bebacb14 |
#!/usr/bin/env python2.7
"""Script to install mooltool on developer and test machines. We use python
virtual environment based approach for setting up the development environment.
Required packages before the script can be used are:
* Java >= 1.7.0
* python >= 2.7.3
* G++
It is supposed to work on Mac OS, Ubuntu and Centos.
Please visit mool wiki https://github.com/rocketfuel/mool/wiki for help on
installation of prerequisites.
"""
import errno
import hashlib
import logging
import os
import shutil
import subprocess
import sys
import urllib
MIN_PYTHON_VERSION = (2, 7, 3)
INSTALL_HELP_MSG = (
'**** Refer to {0} link for installation instructions ****'.format(
'https://github.com/rocketfuel/mool/wiki'))
# Fail if not running required python version.
if MIN_PYTHON_VERSION <= sys.version_info[0:3]:
import argparse
else:
print 'Current python version is %s' % sys.version[0:5]
print 'Please install python version >= 2.7.3'
print INSTALL_HELP_MSG
sys.exit(1)
def parse_command_line():
"""Parses command line to generate arguments."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-d', '--install_dir', type=str,
default='',
help='Installation path for mool. Default user HOME.')
arg_parser.add_argument('-j', '--java_home', type=str,
default=os.environ.get('JAVA_HOME'),
help='Path to JAVA Home. Default JAVA_HOME environment variable.')
arg_parser.add_argument('-t', '--test_only', default=False,
action='store_true', help='Test current mool installation.')
return arg_parser
# Due to very tight dependency on INSTALL_DIR, we need to set it here.
# Keep the default value unless you are very eager to change/test it.
THIS_SCRIPT_DIR = os.path.dirname(os.path.abspath(sys.argv[0]))
INSTALL_DIR = (parse_command_line().parse_args().install_dir or
os.path.dirname(THIS_SCRIPT_DIR))
MOOL_INSTALL_DIR = os.path.join(INSTALL_DIR, '.mooltool')
MOOL_INIT_SCRIPT = os.path.join(MOOL_INSTALL_DIR, 'mool_init.sh')
MOOL_PACKAGES_DIR = os.path.join(MOOL_INSTALL_DIR, 'packages')
MOOL_RC_FILE = os.path.join(MOOL_INSTALL_DIR, 'moolrc')
BUILD_TOOLS_DIR = os.path.join(MOOL_INSTALL_DIR, 'ei.build_tools')
BU_SCRIPT_DIR = os.path.join(BUILD_TOOLS_DIR, 'release', 'bu.scripts')
TEMP_DIR = os.path.join(MOOL_INSTALL_DIR, '.temp')
JAR_SEARCH_PATH = os.path.join(MOOL_INSTALL_DIR, 'jars')
VIRTUALENV_PATH = os.path.join(MOOL_INSTALL_DIR, 'MOOL_ENV')
VIRTUALENV_URL = (('https://pypi.python.org/packages/source/v/virtualenv/'
'virtualenv-1.11.6.tar.gz'), 'virtualenv-1.11.6',
'd3f8e94bf825cc999924e276c8f1c63b8eeb0715')
GMOCK_PACKAGE = ('https://googlemock.googlecode.com/files/gmock-1.7.0.zip',
'f9d9dd882a25f4069ed9ee48e70aff1b53e3c5a5', 'gmock-1.7.0')
GMOCK_BUILD_COMMANDS = [
"""g++ -isystem {GTEST_DIR}/include -I{GTEST_DIR} -isystem
{GMOCK_DIR}/include -I{GMOCK_DIR} -o{TARGET_DIR}/gtest-all.o
-pthread -c {GTEST_DIR}/src/gtest-all.cc""",
"""g++ -isystem {GTEST_DIR}/include -I{GTEST_DIR} -isystem
{GMOCK_DIR}/include -I{GMOCK_DIR} -o{TARGET_DIR}/gmock-all.o
-pthread -c {GMOCK_DIR}/src/gmock-all.cc""",
"""g++ -isystem {GTEST_DIR}/include -I{GTEST_DIR}
-c {GTEST_DIR}/src/gtest_main.cc -o {TARGET_DIR}/gtest_main.o""",
"""ar -rv {TARGET_DIR}/libgmock.a {TARGET_DIR}/gtest-all.o
{TARGET_DIR}/gmock-all.o"""]
JAVA_PROTOBUF_JAR = 'protobuf-2.4.1.jar'
PROTOBUF_PACKAGE = (('https://protobuf.googlecode.com/files/'
'protobuf-2.4.1.tar.bz2'), 'protobuf-2.4.1',
'df5867e37a4b51fb69f53a8baf5b994938691d6d')
# TODO: Remove MySQL from dependencies, required by Rocket Fuel code examples.
PIP_INSTALL_PACKAGES = [('pylint', '0.28.0'), ('pep8', '1.4.5'),
('pytest', '2.3.4'), ('MySQL-python', '1.2.3')]
NEXUS_REPO_URL = 'http://nexus.rfiserve.net/content/groups/public'
DEFAULT_JAR_PACKAGES = [('org/testng/testng/6.8/testng-6.8.jar',
'ad4531b28715d39f73c49a56caf0c456cb34d48c'),
('com/beust/jcommander/1.27/jcommander-1.27.jar',
'58c9cbf0f1fa296f93c712f2cf46de50471920f9')]
SCALA_2_11 = ('http://www.scala-lang.org/files/archive/scala-2.11.4.tgz',
'scala-2.11.4', 'a6d319b26ccabe9c609fadebc32e797bf9cb1084')
SCALA_2_8 = ('http://www.scala-lang.org/files/archive/scala-2.8.2.final.tgz',
'scala-2.8.2.final', '2d6250763dcba02f371e0c26999a4f43670e8e3e')
MOOL_INIT_TEMPLATE_FILE = 'mool_init_template.sh'
MOOL_INIT_VARS = ['JAR_SEARCH_PATH', 'JAVA_PROTOBUF_JAR', 'JAVA_HOME',
'PROTO_COMPILER', 'PYTHON_PROTOBUF_DIR',
'PROTOBUF_INSTALL_DIR', 'SCALA_DEFAULT_VERSION',
'SCALA_HOME_2_8', 'SCALA_HOME_2_11',
'GMOCK_DIR', 'GTEST_DIR', 'GTEST_MAIN_LIB', 'GTEST_MOCK_LIB']
VARS_TO_EXPORT = {}
VARS_TO_EXPORT['JAR_SEARCH_PATH'] = JAR_SEARCH_PATH
VARS_TO_EXPORT['SCALA_DEFAULT_VERSION'] = '2.8'
BLOCK_SIZE = 1 << 20
LOGGER = None
LOG_FILE_PATH = os.path.join(TEMP_DIR, 'mool_install.log')
PROGRESS_BAR = '\rDownload: [{}{}] {:>3}%'
MOOLRC_TEXT = """
export MOOL_INIT_SCRIPT="VAR_MOOL_INIT_SCRIPT"
export MOOL_VIRTUALENV="VAR_VIRTUALENV_PATH"
function mool_init() {
source "${MOOL_VIRTUALENV}"
source "${MOOL_INIT_SCRIPT}"
alias bu="${BU_SCRIPT_DIR}/bu"
}
"""
INSTALL_SUCCESS_MSG = """
********** Successfully installed build tool mool in {0} directory! **********
* Mool has been configure to use python virtual environment that points to {1}.
* Mool environment settings are in {2} file which you may want to look at.
* Entry for mool_init function has been added to {3}/moolrc file.
You may want to add "source {3}/moolrc" in your bashrc/zshrc file.
* Execute `source {3}/moolrc && mool_init` to activate mool environment.
"""
class Error(Exception):
"""Error class for this script."""
def __exit__(self, etype, value, traceback):
LOGGER.error(self.message)
def check_sha_sum(file_path, sha_sum):
"""Check sha sum of a given file."""
hash_object = hashlib.sha1()
with open(file_path, 'r') as file_object:
while True:
file_text = file_object.read(BLOCK_SIZE)
if not file_text:
break
hash_object.update(file_text)
if hash_object.hexdigest() != sha_sum:
raise Error('Sha1 mismatch for file {}!!'.format(file_path))
def download_item(url, file_path, sha_sum):
"""Download url to file path."""
def report_hook(count, block_size, total_size):
"""Handler for urllib report hook."""
done = (block_size * count * 50 / total_size)
print PROGRESS_BAR.format('=' * done, ' ' * (50 - done), done * 2),
sys.stdout.flush()
temp_path = file_path + '.temp'
LOGGER.info('Downloading %s to %s.', url, file_path)
urllib.urlretrieve(url, temp_path, reporthook=report_hook)
check_sha_sum(temp_path, sha_sum)
shutil.move(temp_path, file_path)
def mkdir_p(path):
"""Create directory along with all required parent directories."""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
def configure_logging(console=True):
"""Setup logging. Enable console logging by default."""
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
if console:
console_handle = logging.StreamHandler(sys.stdout)
console_handle.setLevel(logging.INFO)
console_handle.setFormatter(
logging.Formatter('%(levelname)s: %(message)s'))
logger.addHandler(console_handle)
return logger
def _execute(command, stdout=False, use_shell=False):
"""Executes a given command and logs stdout to file."""
LOGGER.info('Executing "%s".', ' '.join(command))
try:
if not stdout:
with open(LOG_FILE_PATH, 'a') as log_file:
subprocess.check_call(command, stdout=log_file,
shell=use_shell)
else:
subprocess.check_call(command, shell=use_shell)
except subprocess.CalledProcessError:
LOGGER.error('Command %s failed!!', ' '.join(command))
raise
def _check_dependencies():
"""Check for preinstalled dependencies required for mool."""
LOGGER.info('Checking support for JAVA.')
try:
javac_bin = os.path.join(VARS_TO_EXPORT['JAVA_HOME'], 'bin', 'javac')
_execute([javac_bin, '-version'])
# TODO: Assert version >= 1.7.0
except subprocess.CalledProcessError:
LOGGER.error('Java support not found. Aborting installation!!')
LOGGER.error(INSTALL_HELP_MSG)
raise
def _setup_virtualenv():
"""Install and activate python virtual environment."""
try:
version = subprocess.check_output(['virtualenv', '--version'])
LOGGER.info('Using existing virtualenv version: %s', version)
_execute(['virtualenv', '-p', sys.executable, VIRTUALENV_PATH])
except OSError:
os.chdir(MOOL_PACKAGES_DIR)
url, dir_name, sha_sum = VIRTUALENV_URL
dest_path = os.path.join(os.path.abspath('.'), os.path.basename(url))
download_item(url, dest_path, sha_sum)
_execute(['tar', '-zxf', dest_path])
script = os.path.join('.', dir_name, 'virtualenv.py')
_execute(['python', script, '-p', sys.executable, VIRTUALENV_PATH])
activate_this = os.path.join(VIRTUALENV_PATH, 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
def _pip_install_packages(packages):
"""Install packages using pip."""
for package, version in packages:
full_name = '{}=={}'.format(package, version)
LOGGER.info('Installing %s.', full_name)
_execute(['pip', 'install', full_name])
def _install_scala():
"""Untars the scala installation packages and exports env. variables."""
os.chdir(MOOL_PACKAGES_DIR)
for package, env_var in [(SCALA_2_8, 'SCALA_HOME_2_8'),
(SCALA_2_11, 'SCALA_HOME_2_11')]:
url, dir_name, sha_sum = package
LOGGER.info('Setting up %s', dir_name)
dest_path = os.path.join(os.path.abspath('.'), os.path.basename(url))
download_item(url, dest_path, sha_sum)
_execute(['tar', '-zxf', dest_path])
VARS_TO_EXPORT[env_var] = os.path.join(os.path.abspath('.'), dir_name)
def _download_protobuf():
"""Download protobuf source."""
os.chdir(MOOL_PACKAGES_DIR)
url, dir_name, sha_sum = PROTOBUF_PACKAGE
dest_path = os.path.join(os.path.abspath('.'), os.path.basename(url))
download_item(url, dest_path, sha_sum)
_execute(['tar', '-xf', dest_path])
return os.path.join(MOOL_PACKAGES_DIR, dir_name)
def _setup_protobuf():
"""Download and install protobuf."""
LOGGER.info('Setting up Protobuf-2.4.1')
os.chdir(_download_protobuf())
cur_dir = os.path.abspath('.')
# Add missing '<iostream>' library in src/google/protobuf/message.cc file.
bad_file = 'src/google/protobuf/message.cc'
with open(os.path.join(cur_dir, bad_file), 'r+') as file_obj:
msg_file_contents = file_obj.readlines()
msg_file_contents.insert(35, '#include <iostream>\n')
file_obj.seek(0)
file_obj.write(''.join(msg_file_contents))
# Build and install protobuf in local directory.
_execute(['./configure', '--prefix={}'.format(cur_dir)])
_execute(['make', 'install'])
protoc_binary = os.path.join(cur_dir, 'bin', 'protoc')
assert os.path.exists(protoc_binary)
VARS_TO_EXPORT['PROTO_COMPILER'] = protoc_binary
VARS_TO_EXPORT['PROTOBUF_INSTALL_DIR'] = cur_dir
os.chdir(os.path.join(cur_dir, 'python'))
_execute(['python', 'setup.py', 'build'])
lib_dir = os.listdir('build')[0]
VARS_TO_EXPORT['PYTHON_PROTOBUF_DIR'] = os.path.join(os.path.abspath('.'),
'build', lib_dir)
os.chdir(os.path.join(cur_dir, 'java'))
_execute([protoc_binary, '--java_out=src/main/java', '-I../src',
'../src/google/protobuf/descriptor.proto'])
target_dir = os.path.join(os.path.abspath('.'), 'target')
mkdir_p(target_dir)
javac_bin = os.path.join(VARS_TO_EXPORT['JAVA_HOME'], 'bin', 'javac')
command = [javac_bin, '-d', target_dir]
src_dir = os.path.join(os.path.abspath('.'),
'src/main/java/com/google/protobuf/')
command.extend([os.path.join(src_dir, f) for f in os.listdir(src_dir)])
_execute(command)
os.chdir(target_dir)
jar_bin = os.path.join(VARS_TO_EXPORT['JAVA_HOME'], 'bin', 'jar')
_execute([jar_bin, '-cf', JAVA_PROTOBUF_JAR, 'com'])
protobuf_jar_path = os.path.join(target_dir, JAVA_PROTOBUF_JAR)
assert os.path.exists(protobuf_jar_path)
VARS_TO_EXPORT['JAVA_PROTOBUF_JAR'] = protobuf_jar_path
def _setup_gmock_gtest():
"""Gtest is shipped along with gmock. Build and setup required libs."""
os.chdir(MOOL_PACKAGES_DIR)
url, sha_sum, dir_name = GMOCK_PACKAGE
dest_path = os.path.join(os.path.abspath('.'), os.path.basename(url))
download_item(url, dest_path, sha_sum)
_execute(['unzip', dest_path])
os.chdir(os.path.join(MOOL_PACKAGES_DIR, dir_name))
# Create target directory to store all useful stuff.
gmock_dir = os.path.abspath('.')
target_dir = os.path.join(gmock_dir, 'target')
gtest_dir = os.path.join(gmock_dir, 'gtest')
mkdir_p(target_dir)
for cmd in GMOCK_BUILD_COMMANDS:
cmd = cmd.format(GTEST_DIR=gtest_dir, GMOCK_DIR=gmock_dir,
TARGET_DIR=target_dir)
_execute([' '.join([t.strip() for t in cmd.split('\n')])],
use_shell=True)
VARS_TO_EXPORT['GMOCK_DIR'] = gmock_dir
VARS_TO_EXPORT['GTEST_DIR'] = gtest_dir
VARS_TO_EXPORT['GTEST_MAIN_LIB'] = os.path.join(target_dir, 'gtest_main.o')
VARS_TO_EXPORT['GTEST_MOCK_LIB'] = os.path.join(target_dir, 'libgmock.a')
def _setup_mool_init():
"""Creates the mool_init.sh file."""
LOGGER.info('Creating mool init script.')
mool_init_template = os.path.join(THIS_SCRIPT_DIR, MOOL_INIT_TEMPLATE_FILE)
file_contents = None
with open(mool_init_template, 'r') as template_file:
file_contents = template_file.read()
for var in MOOL_INIT_VARS:
file_contents = file_contents.replace('VAR_{}'.format(var),
VARS_TO_EXPORT[var])
with open(MOOL_INIT_SCRIPT, 'w') as init_file:
init_file.write(file_contents)
text = MOOLRC_TEXT.replace('VAR_MOOL_INIT_SCRIPT', MOOL_INIT_SCRIPT)
env_activate = os.path.join(VIRTUALENV_PATH, 'bin', 'activate')
text = text.replace('VAR_VIRTUALENV_PATH', env_activate)
with open(MOOL_RC_FILE, 'w') as moolrc_obj:
moolrc_obj.write(text)
os.chdir(THIS_SCRIPT_DIR)
def test_setup():
"""Run all the mool tests on the current setup."""
LOGGER.info('Running mool tests using current environment setup.')
os.chdir(THIS_SCRIPT_DIR)
activate_this = os.path.join(VIRTUALENV_PATH, 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
test_script = os.path.join(os.path.dirname(THIS_SCRIPT_DIR), 'test_all.sh')
_execute(['bash', test_script], stdout=True)
def _install_all():
"""Installer utility for mool tool."""
LOGGER.info('**** Check %s for installation logs. ****', LOG_FILE_PATH)
_check_dependencies()
_setup_virtualenv()
_pip_install_packages(PIP_INSTALL_PACKAGES)
_setup_protobuf()
_setup_gmock_gtest()
_install_scala()
_setup_mool_init()
test_setup()
LOGGER.info(INSTALL_SUCCESS_MSG.format(INSTALL_DIR, VIRTUALENV_PATH,
MOOL_INIT_SCRIPT, MOOL_INSTALL_DIR))
def main():
"""Main function to drive mool tool setup."""
arg_parser = parse_command_line()
args = arg_parser.parse_args()
LOGGER.info('Using "{}" as installation directory.'.format(INSTALL_DIR))
if not args.java_home or not os.path.exists(args.java_home):
LOGGER.error('Invalid JAVA_HOME value.')
arg_parser.print_help()
sys.exit(1)
VARS_TO_EXPORT['JAVA_HOME'] = args.java_home.rstrip('/')
if args.test_only:
return test_setup()
mkdir_p(MOOL_INSTALL_DIR)
mkdir_p(TEMP_DIR)
mkdir_p(MOOL_PACKAGES_DIR)
_install_all()
if __name__ == '__main__':
LOGGER = configure_logging()
try:
main()
except:
LOGGER.exception(sys.exc_info())
print INSTALL_HELP_MSG
| jkumarrf/mool | installer/install_mooltool.py | Python | bsd-3-clause | 16,674 | [
"VisIt"
] | ca5b6c3a301bdffba399cb61a18c37c83b788ab54ee3150f0706d8a40d4d7529 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2011-2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
"""Logging of events.
"""
# pylint: enable=E1101
# FIXME: This should probably be moved over to stoqlib.domain.logging to
# avoid confusing it with stoqlib.domain.events.
# Another possiblity would be to move events out of domain.
from storm.store import AutoReload
from stoqlib.database.properties import DateTimeCol, IntCol, UnicodeCol, EnumCol
from stoqlib.database.orm import ORMObject
from stoqlib.lib.dateutils import localnow
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.lib.formatters import get_formatted_price, get_formatted_percentage
_ = stoqlib_gettext
#
# Domain Classes
#
class Event(ORMObject):
"""An event represent something that happened in Stoq that
should be logged and access at a later point.
See also:
`schema <http://doc.stoq.com.br/schema/tables/event.html>`__
"""
__storm_table__ = 'event'
#: System related messages
TYPE_SYSTEM = u'system'
#: |loginuser| events, logging in and logging out
TYPE_USER = u'user'
#: |purchase| events
TYPE_ORDER = u'order'
#: |sale| events
TYPE_SALE = u'sale'
#: |payment| events
TYPE_PAYMENT = u'payment'
types = {
TYPE_SYSTEM: _(u'System'),
TYPE_USER: _(u'User'),
TYPE_ORDER: _(u'Order'),
TYPE_SALE: _(u'Sale'),
TYPE_PAYMENT: _(u'Payment'),
}
id = IntCol(primary=True, default=AutoReload)
#: the date the event was created
date = DateTimeCol(default_factory=localnow)
#: type of this event, one of TYPE_* variables of this class
event_type = EnumCol(allow_none=False, default=TYPE_SYSTEM)
#: description of the event
description = UnicodeCol()
@classmethod
def log(cls, store, event_type, description):
"""
Create a new event message.
:param store: a store
:param event_type: the event type of this message
:param description: the message description
"""
cls(event_type=event_type,
description=description,
store=store)
@classmethod
def log_sale_item_discount(cls, store, sale_number, user_name, discount_value,
product, original_price, new_price):
"""
Log the discount authorized by an user
This will log on the event system when a user authorizes a discount
greater than what is allowed on a sale item
:param store: a store
:param sale_number: the sale's id that the discount was applied
:param user_name: the user that authorized the discount
:param discount_value: the percentage of discount applied
:param product: the name of product that received the discount
:param original_price: the original price of product
:param new_price: the price of product after discount
"""
description = _(u"Sale {sale_number}: User {user_name} authorized "
u"{discount_value} of discount changing\n "
u"{product} value from {original_price} to "
u"{new_price}.").format(
sale_number=sale_number,
user_name=user_name,
discount_value=get_formatted_percentage(discount_value),
product=product,
original_price=get_formatted_price(original_price, symbol=True),
new_price=get_formatted_price(new_price, symbol=True))
cls(event_type=cls.TYPE_SALE,
description=description,
store=store)
@classmethod
def log_sale_discount(cls, store, sale_number, user_name, discount_value,
original_price, new_price):
"""
Log the discount authorized by an user
This will log on the event system when a user authorizes a discount
greater than what is allowed on a sale
:param store: a store
:param sale_number: the sale's id that the discount was applied
:param user_name: the user that authorized the discount
:param discount_value: the percentage of discount applied
:param original_price: the original price of product
:param new_price: the price of product after discount
"""
description = _(u"sale {sale_number}: User {user_name} authorized "
u"{discount_value} of discount changing the value from "
u"{original_price} to {new_price}.").format(
sale_number=sale_number,
user_name=user_name,
discount_value=get_formatted_percentage(discount_value),
original_price=get_formatted_price(original_price, symbol=True),
new_price=get_formatted_price(new_price, symbol=True))
cls(event_type=cls.TYPE_SALE,
description=description,
store=store)
| tiagocardosos/stoq | stoqlib/domain/event.py | Python | gpl-2.0 | 5,766 | [
"VisIt"
] | dd5562dd08977a4c23dd6ccf2add1bed6de213d37e50c773f50fd88a68d243f0 |
from setuptools import setup
import os
def readme(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='espei',
author='Brandon Bocklund',
author_email='brandonbocklund@gmail.com',
description='Fitting thermodynamic models with pycalphad.',
packages=['espei', 'espei.error_functions', 'espei.parameter_selection', 'espei.optimizers'],
package_data={
'espei': ['input-schema.yaml']
},
license='MIT',
long_description=readme('README.rst'),
long_description_content_type='text/x-rst',
url='https://espei.org/',
install_requires=[
'cerberus',
'corner',
'dask[complete]>=2',
'distributed>=2',
'emcee<3',
'importlib_metadata', # drop for Python>=3.8
'matplotlib',
'numpy>=1.20',
'pycalphad>=0.10.0',
'pyyaml',
'setuptools_scm[toml]>=6.0',
'scikit-learn>=1.0',
'scipy',
'symengine>=0.9',
'tinydb>=4',
],
extras_require={
'dev': [
'furo',
'ipython', # for pygments syntax highlighting
'pytest',
'sphinx',
'twine',
],
'mpi': [
'mpi4py',
'dask-mpi>=2',
]
},
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Chemistry',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
entry_points={'console_scripts': [
'espei = espei.espei_script:main']}
)
| PhasesResearchLab/ESPEI | setup.py | Python | mit | 2,047 | [
"pycalphad"
] | 275c7ae1cf2cdccc2ee48c33f75cb9457d256d2261bf94fdee592c74b4dc2567 |
# coding: utf-8
from __future__ import unicode_literals
import unittest
import os
from pymatgen.io.feff import Header, FeffTags, FeffLdos, FeffPot, Xmu, \
FeffAtoms
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class HeaderTest(unittest.TestCase):
header_string = """* This FEFF.inp file generated by pymatgen
TITLE comment: From cif file
TITLE Source: CoO19128.cif
TITLE Structure Summary: Co2 O2
TITLE Reduced formula: CoO
TITLE space group: (Cmc2_1), space number: (36)
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.000000 90.000000 120.000000
TITLE sites: 4
* 1 Co 0.666666 0.333332 0.496324
* 2 Co 0.333333 0.666667 0.996324
* 3 O 0.666666 0.333332 0.878676
* 4 O 0.333333 0.666667 0.378675"""
def test_init(self):
filepath = os.path.join(test_dir, 'HEADER')
header = Header.header_string_from_file(filepath)
h = header.splitlines()
hs = HeaderTest.header_string.splitlines()
for i, line in enumerate(h):
self.assertEqual(line, hs[i])
self.assertEqual(HeaderTest.header_string.splitlines(),
header.splitlines(), "Failed to read HEADER file")
def test_from_string(self):
header = Header.from_string(HeaderTest.header_string)
self.assertEqual(header.struct.composition.reduced_formula, "CoO",
"Failed to generate structure from HEADER string")
def test_get_string(self):
cif_file = os.path.join(test_dir, 'CoO19128.cif')
h = Header.from_cif_file(cif_file)
head = str(h)
self.assertEqual(head.splitlines()[3].split()[-1],
HeaderTest.header_string.splitlines()[3].split()[-1],
"Failed to generate HEADER from structure")
def test_as_dict_and_from_dict(self):
file_name = os.path.join(test_dir, 'HEADER')
header = Header.from_file(file_name)
d = header.as_dict()
header2 = Header.from_dict(d)
self.assertEqual(str(header), str(header2),
"Header failed to and from dict test")
class FeffAtomsTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'ATOMS')
atoms = FeffAtoms.atoms_string_from_file(filepath)
self.assertEqual(atoms.splitlines()[3].split()[4], 'O',
"failed to read ATOMS file")
def test_get_string(self):
header = Header.from_string(HeaderTest.header_string)
struc = header.struct
central_atom = 'O'
a = FeffAtoms(struc, central_atom)
atoms = a.get_string()
self.assertEqual(atoms.splitlines()[3].split()[4], central_atom,
"failed to create ATOMS string")
def test_as_dict_and_from_dict(self):
file_name = os.path.join(test_dir, 'HEADER')
header = Header.from_file(file_name)
struct = header.struct
atoms = FeffAtoms(struct, 'O')
d = atoms.as_dict()
atoms2 = FeffAtoms.from_dict(d)
self.assertEqual(str(atoms), str(atoms2),
"FeffAtoms failed to and from dict test")
class FeffTagsTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'PARAMETERS')
parameters = FeffTags.from_file(filepath)
parameters["RPATH"] = 10
self.assertEqual(parameters["COREHOLE"], "Fsr",
"Failed to read PARAMETERS file")
self.assertEqual(parameters["LDOS"], [-30., 15., .1],
"Failed to read PARAMETERS file")
def test_diff(self):
filepath1 = os.path.join(test_dir, 'PARAMETERS')
parameters1 = FeffTags.from_file(filepath1)
filepath2 = os.path.join(test_dir, 'PARAMETERS.2')
parameters2 = FeffTags.from_file(filepath2)
self.assertEqual(FeffTags(parameters1).diff(parameters2),
{'Different': {},
'Same': {'CONTROL': [1, 1, 1, 1, 1, 1],
'MPSE': [2],
'OPCONS': '',
'SCF': [6.0, 0, 30, .2, 1],
'EXCHANGE': [0, 0.0, 0.0, 2],
'S02': [0.0],
'COREHOLE': 'Fsr',
'FMS': [8.5, 0],
'XANES': [3.7, 0.04, 0.1],
'EDGE': 'K',
'PRINT': [1, 0, 0, 0, 0, 0],
'LDOS': [-30., 15., .1]}})
def test_as_dict_and_from_dict(self):
file_name = os.path.join(test_dir, 'PARAMETERS')
tags = FeffTags.from_file(file_name)
d=tags.as_dict()
tags2 = FeffTags.from_dict(d)
self.assertEqual(tags, tags2,
"Parameters do not match to and from dict")
class FeffPotTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'POTENTIALS')
feffpot = FeffPot.pot_string_from_file(filepath)
d, dr = FeffPot.pot_dict_from_string(feffpot)
self.assertEqual(d['Co'], 1, "Wrong symbols read in for FeffPot")
def test_as_dict_and_from_dict(self):
file_name = os.path.join(test_dir, 'HEADER')
header = Header.from_file(file_name)
struct = header.struct
pot = FeffPot(struct, 'O')
d=pot.as_dict()
pot2 = FeffPot.from_dict(d)
self.assertEqual(str(pot), str(pot2),
"FeffPot to and from dict does not match")
class FeffLdosTest(unittest.TestCase):
filepath1 = os.path.join(test_dir, 'feff.inp')
filepath2 = os.path.join(test_dir, 'ldos')
l = FeffLdos.from_file(filepath1, filepath2)
def test_init(self):
efermi = FeffLdosTest.l.complete_dos.efermi
self.assertEqual(efermi, -11.430,
"Did not read correct Fermi energy from ldos file")
def test_complete_dos(self):
complete_dos = FeffLdosTest.l.complete_dos
self.assertEqual(complete_dos.as_dict()['spd_dos']['S']['efermi'],
- 11.430,
"Failed to construct complete_dos dict properly")
def test_as_dict_and_from_dict(self):
l2 = FeffLdosTest.l.charge_transfer_to_string()
d = FeffLdosTest.l.as_dict()
l3 = FeffLdos.from_dict(d).charge_transfer_to_string()
self.assertEqual(l2, l3,
"Feffldos to and from dict does not match")
class XmuTest(unittest.TestCase):
def test_init(self):
filepath1 = os.path.join(test_dir, 'xmu.dat')
filepath2 = os.path.join(test_dir, 'feff.inp')
x = Xmu.from_file(filepath1, filepath2)
self.assertEqual(x.absorbing_atom, 'O',
"failed to read xmu.dat file properly")
def test_as_dict_and_from_dict(self):
filepath1 = os.path.join(test_dir, 'xmu.dat')
filepath2 = os.path.join(test_dir, 'feff.inp')
x = Xmu.from_file(filepath1, filepath2)
data=x.data.tolist()
d=x.as_dict()
x2 = Xmu.from_dict(d)
data2= x2.data.tolist()
self.assertEqual(data, data2,
"Xmu to and from dict does not match")
if __name__ == '__main__':
unittest.main()
| rousseab/pymatgen | pymatgen/io/feff/tests/test_feffio.py | Python | mit | 7,497 | [
"FEFF",
"pymatgen"
] | bc850bc3789bb0214c6a0954ee240337dd57ecf4c37b8f705aa0ee34c5e59cdd |
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
import logging
from PyQt4 import QtCore, QtGui
from openlp.core.lib import Registry, MediaManagerItem, ItemCapabilities, ServiceItemContext, Settings, UiStrings, \
create_separated_list, translate
from openlp.core.lib.searchedit import SearchEdit
from openlp.core.lib.ui import set_case_insensitive_completer, create_horizontal_adjusting_combo_box, \
critical_error_message_box, find_and_set_in_combo_box, build_icon
from openlp.core.utils import get_locale_key
from openlp.plugins.bibles.forms import BibleImportForm, EditBibleForm
from openlp.plugins.bibles.lib import LayoutStyle, DisplayStyle, VerseReferenceList, get_reference_separator, \
LanguageSelection, BibleStrings
from openlp.plugins.bibles.lib.db import BiblesResourcesDB
log = logging.getLogger(__name__)
class BibleSearch(object):
"""
Enumeration class for the different search methods for the "quick search".
"""
Reference = 1
Text = 2
class BibleMediaItem(MediaManagerItem):
"""
This is the custom media manager item for Bibles.
"""
log.info('Bible Media Item loaded')
def __init__(self, parent, plugin):
self.icon_path = 'songs/song'
self.lock_icon = build_icon(':/bibles/bibles_search_lock.png')
self.unlock_icon = build_icon(':/bibles/bibles_search_unlock.png')
MediaManagerItem.__init__(self, parent, plugin)
# Place to store the search results for both bibles.
self.settings = self.plugin.settings_tab
self.quick_preview_allowed = True
self.has_search = True
self.search_results = {}
self.second_search_results = {}
self.check_search_result()
Registry().register_function('bibles_load_list', self.reload_bibles)
def __check_second_bible(self, bible, second_bible):
"""
Check if the first item is a second bible item or not.
"""
bitem = self.list_view.item(0)
if not bitem.flags() & QtCore.Qt.ItemIsSelectable:
# The item is the "No Search Results" item.
self.list_view.clear()
self.displayResults(bible, second_bible)
return
else:
item_second_bible = self._decode_qt_object(bitem, 'second_bible')
if item_second_bible and second_bible or not item_second_bible and not second_bible:
self.displayResults(bible, second_bible)
elif critical_error_message_box(
message=translate('BiblesPlugin.MediaItem',
'You cannot combine single and dual Bible verse search results. '
'Do you want to delete your search results and start a new search?'),
parent=self, question=True) == QtGui.QMessageBox.Yes:
self.list_view.clear()
self.displayResults(bible, second_bible)
def _decode_qt_object(self, bitem, key):
reference = bitem.data(QtCore.Qt.UserRole)
obj = reference[str(key)]
return str(obj).strip()
def required_icons(self):
"""
Set which icons the media manager tab should show
"""
MediaManagerItem.required_icons(self)
self.has_import_icon = True
self.has_new_icon = False
self.has_edit_icon = True
self.has_delete_icon = True
self.add_to_service_item = False
def addSearchTab(self, prefix, name):
self.searchTabBar.addTab(name)
tab = QtGui.QWidget()
tab.setObjectName(prefix + 'Tab')
tab.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
layout = QtGui.QGridLayout(tab)
layout.setObjectName(prefix + 'Layout')
setattr(self, prefix + 'Tab', tab)
setattr(self, prefix + 'Layout', layout)
def addSearchFields(self, prefix, name):
"""
Creates and adds generic search tab.
``prefix``
The prefix of the tab, this is either ``quick`` or ``advanced``.
``name``
The translated string to display.
"""
if prefix == 'quick':
idx = 2
else:
idx = 5
tab = getattr(self, prefix + 'Tab')
layout = getattr(self, prefix + 'Layout')
versionLabel = QtGui.QLabel(tab)
versionLabel.setObjectName(prefix + 'VersionLabel')
layout.addWidget(versionLabel, idx, 0, QtCore.Qt.AlignRight)
versionComboBox = create_horizontal_adjusting_combo_box(tab, prefix + 'VersionComboBox')
versionLabel.setBuddy(versionComboBox)
layout.addWidget(versionComboBox, idx, 1, 1, 2)
secondLabel = QtGui.QLabel(tab)
secondLabel.setObjectName(prefix + 'SecondLabel')
layout.addWidget(secondLabel, idx + 1, 0, QtCore.Qt.AlignRight)
secondComboBox = create_horizontal_adjusting_combo_box(tab, prefix + 'SecondComboBox')
versionLabel.setBuddy(secondComboBox)
layout.addWidget(secondComboBox, idx + 1, 1, 1, 2)
styleLabel = QtGui.QLabel(tab)
styleLabel.setObjectName(prefix + 'StyleLabel')
layout.addWidget(styleLabel, idx + 2, 0, QtCore.Qt.AlignRight)
styleComboBox = create_horizontal_adjusting_combo_box(tab, prefix + 'StyleComboBox')
styleComboBox.addItems(['', '', ''])
layout.addWidget(styleComboBox, idx + 2, 1, 1, 2)
search_button_layout = QtGui.QHBoxLayout()
search_button_layout.setObjectName(prefix + 'search_button_layout')
search_button_layout.addStretch()
lockButton = QtGui.QToolButton(tab)
lockButton.setIcon(self.unlock_icon)
lockButton.setCheckable(True)
lockButton.setObjectName(prefix + 'LockButton')
search_button_layout.addWidget(lockButton)
searchButton = QtGui.QPushButton(tab)
searchButton.setObjectName(prefix + 'SearchButton')
search_button_layout.addWidget(searchButton)
layout.addLayout(search_button_layout, idx + 3, 1, 1, 2)
self.page_layout.addWidget(tab)
tab.setVisible(False)
lockButton.toggled.connect(self.onLockButtonToggled)
setattr(self, prefix + 'VersionLabel', versionLabel)
setattr(self, prefix + 'VersionComboBox', versionComboBox)
setattr(self, prefix + 'SecondLabel', secondLabel)
setattr(self, prefix + 'SecondComboBox', secondComboBox)
setattr(self, prefix + 'StyleLabel', styleLabel)
setattr(self, prefix + 'StyleComboBox', styleComboBox)
setattr(self, prefix + 'LockButton', lockButton)
setattr(self, prefix + 'SearchButtonLayout', search_button_layout)
setattr(self, prefix + 'SearchButton', searchButton)
def add_end_header_bar(self):
self.searchTabBar = QtGui.QTabBar(self)
self.searchTabBar.setExpanding(False)
self.searchTabBar.setObjectName('searchTabBar')
self.page_layout.addWidget(self.searchTabBar)
# Add the Quick Search tab.
self.addSearchTab('quick', translate('BiblesPlugin.MediaItem', 'Quick'))
self.quickSearchLabel = QtGui.QLabel(self.quickTab)
self.quickSearchLabel.setObjectName('quickSearchLabel')
self.quickLayout.addWidget(self.quickSearchLabel, 0, 0, QtCore.Qt.AlignRight)
self.quickSearchEdit = SearchEdit(self.quickTab)
self.quickSearchEdit.setSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Fixed)
self.quickSearchEdit.setObjectName('quickSearchEdit')
self.quickSearchLabel.setBuddy(self.quickSearchEdit)
self.quickLayout.addWidget(self.quickSearchEdit, 0, 1, 1, 2)
self.addSearchFields('quick', translate('BiblesPlugin.MediaItem', 'Quick'))
self.quickTab.setVisible(True)
# Add the Advanced Search tab.
self.addSearchTab('advanced', UiStrings().Advanced)
self.advancedBookLabel = QtGui.QLabel(self.advancedTab)
self.advancedBookLabel.setObjectName('advancedBookLabel')
self.advancedLayout.addWidget(self.advancedBookLabel, 0, 0, QtCore.Qt.AlignRight)
self.advancedBookComboBox = create_horizontal_adjusting_combo_box(self.advancedTab, 'advancedBookComboBox')
self.advancedBookLabel.setBuddy(self.advancedBookComboBox)
self.advancedLayout.addWidget(self.advancedBookComboBox, 0, 1, 1, 2)
self.advancedChapterLabel = QtGui.QLabel(self.advancedTab)
self.advancedChapterLabel.setObjectName('advancedChapterLabel')
self.advancedLayout.addWidget(self.advancedChapterLabel, 1, 1, 1, 2)
self.advancedVerseLabel = QtGui.QLabel(self.advancedTab)
self.advancedVerseLabel.setObjectName('advancedVerseLabel')
self.advancedLayout.addWidget(self.advancedVerseLabel, 1, 2)
self.advancedFromLabel = QtGui.QLabel(self.advancedTab)
self.advancedFromLabel.setObjectName('advancedFromLabel')
self.advancedLayout.addWidget(self.advancedFromLabel, 3, 0, QtCore.Qt.AlignRight)
self.advancedFromChapter = QtGui.QComboBox(self.advancedTab)
self.advancedFromChapter.setObjectName('advancedFromChapter')
self.advancedLayout.addWidget(self.advancedFromChapter, 3, 1)
self.advancedFromVerse = QtGui.QComboBox(self.advancedTab)
self.advancedFromVerse.setObjectName('advancedFromVerse')
self.advancedLayout.addWidget(self.advancedFromVerse, 3, 2)
self.advancedToLabel = QtGui.QLabel(self.advancedTab)
self.advancedToLabel.setObjectName('advancedToLabel')
self.advancedLayout.addWidget(self.advancedToLabel, 4, 0, QtCore.Qt.AlignRight)
self.advancedToChapter = QtGui.QComboBox(self.advancedTab)
self.advancedToChapter.setObjectName('advancedToChapter')
self.advancedLayout.addWidget(self.advancedToChapter, 4, 1)
self.advancedToVerse = QtGui.QComboBox(self.advancedTab)
self.advancedToVerse.setObjectName('advancedToVerse')
self.advancedLayout.addWidget(self.advancedToVerse, 4, 2)
self.addSearchFields('advanced', UiStrings().Advanced)
# Combo Boxes
self.quickVersionComboBox.activated.connect(self.updateAutoCompleter)
self.quickSecondComboBox.activated.connect(self.updateAutoCompleter)
self.advancedVersionComboBox.activated.connect(self.onAdvancedVersionComboBox)
self.advancedSecondComboBox.activated.connect(self.onAdvancedSecondComboBox)
self.advancedBookComboBox.activated.connect(self.onAdvancedBookComboBox)
self.advancedFromChapter.activated.connect(self.onAdvancedFromChapter)
self.advancedFromVerse.activated.connect(self.onAdvancedFromVerse)
self.advancedToChapter.activated.connect(self.onAdvancedToChapter)
QtCore.QObject.connect(self.quickSearchEdit, QtCore.SIGNAL('searchTypeChanged(int)'), self.updateAutoCompleter)
self.quickVersionComboBox.activated.connect(self.updateAutoCompleter)
self.quickStyleComboBox.activated.connect(self.onQuickStyleComboBoxChanged)
self.advancedStyleComboBox.activated.connect(self.onAdvancedStyleComboBoxChanged)
# Buttons
self.advancedSearchButton.clicked.connect(self.onAdvancedSearchButton)
self.quickSearchButton.clicked.connect(self.onQuickSearchButton)
# Other stuff
self.quickSearchEdit.returnPressed.connect(self.onQuickSearchButton)
self.searchTabBar.currentChanged.connect(self.onSearchTabBarCurrentChanged)
def on_focus(self):
if self.quickTab.isVisible():
self.quickSearchEdit.setFocus()
else:
self.advancedBookComboBox.setFocus()
def config_update(self):
log.debug('config_update')
if Settings().value(self.settings_section + '/second bibles'):
self.advancedSecondLabel.setVisible(True)
self.advancedSecondComboBox.setVisible(True)
self.quickSecondLabel.setVisible(True)
self.quickSecondComboBox.setVisible(True)
else:
self.advancedSecondLabel.setVisible(False)
self.advancedSecondComboBox.setVisible(False)
self.quickSecondLabel.setVisible(False)
self.quickSecondComboBox.setVisible(False)
self.quickStyleComboBox.setCurrentIndex(self.settings.layout_style)
self.advancedStyleComboBox.setCurrentIndex(self.settings.layout_style)
def retranslateUi(self):
log.debug('retranslateUi')
self.quickSearchLabel.setText(translate('BiblesPlugin.MediaItem', 'Find:'))
self.quickVersionLabel.setText('%s:' % UiStrings().Version)
self.quickSecondLabel.setText(translate('BiblesPlugin.MediaItem', 'Second:'))
self.quickStyleLabel.setText(UiStrings().LayoutStyle)
self.quickStyleComboBox.setItemText(LayoutStyle.VersePerSlide, UiStrings().VersePerSlide)
self.quickStyleComboBox.setItemText(LayoutStyle.VersePerLine, UiStrings().VersePerLine)
self.quickStyleComboBox.setItemText(LayoutStyle.Continuous, UiStrings().Continuous)
self.quickLockButton.setToolTip(translate('BiblesPlugin.MediaItem',
'Toggle to keep or clear the previous results.'))
self.quickSearchButton.setText(UiStrings().Search)
self.advancedBookLabel.setText(translate('BiblesPlugin.MediaItem', 'Book:'))
self.advancedChapterLabel.setText(translate('BiblesPlugin.MediaItem', 'Chapter:'))
self.advancedVerseLabel.setText(translate('BiblesPlugin.MediaItem', 'Verse:'))
self.advancedFromLabel.setText(translate('BiblesPlugin.MediaItem', 'From:'))
self.advancedToLabel.setText(translate('BiblesPlugin.MediaItem', 'To:'))
self.advancedVersionLabel.setText('%s:' % UiStrings().Version)
self.advancedSecondLabel.setText(translate('BiblesPlugin.MediaItem', 'Second:'))
self.advancedStyleLabel.setText(UiStrings().LayoutStyle)
self.advancedStyleComboBox.setItemText(LayoutStyle.VersePerSlide, UiStrings().VersePerSlide)
self.advancedStyleComboBox.setItemText(LayoutStyle.VersePerLine, UiStrings().VersePerLine)
self.advancedStyleComboBox.setItemText(LayoutStyle.Continuous, UiStrings().Continuous)
self.advancedLockButton.setToolTip(translate('BiblesPlugin.MediaItem',
'Toggle to keep or clear the previous results.'))
self.advancedSearchButton.setText(UiStrings().Search)
def initialise(self):
log.debug('bible manager initialise')
self.plugin.manager.media = self
self.loadBibles()
self.quickSearchEdit.set_search_types([
(BibleSearch.Reference, ':/bibles/bibles_search_reference.png',
translate('BiblesPlugin.MediaItem', 'Scripture Reference'),
translate('BiblesPlugin.MediaItem', 'Search Scripture Reference...')),
(BibleSearch.Text, ':/bibles/bibles_search_text.png',
translate('BiblesPlugin.MediaItem', 'Text Search'),
translate('BiblesPlugin.MediaItem', 'Search Text...'))
])
self.quickSearchEdit.set_current_search_type(Settings().value('%s/last search type' % self.settings_section))
self.config_update()
log.debug('bible manager initialise complete')
def loadBibles(self):
log.debug('Loading Bibles')
self.quickVersionComboBox.clear()
self.quickSecondComboBox.clear()
self.advancedVersionComboBox.clear()
self.advancedSecondComboBox.clear()
self.quickSecondComboBox.addItem('')
self.advancedSecondComboBox.addItem('')
# Get all bibles and sort the list.
bibles = list(self.plugin.manager.get_bibles().keys())
bibles = [_f for _f in bibles if _f]
bibles.sort(key=get_locale_key)
# Load the bibles into the combo boxes.
self.quickVersionComboBox.addItems(bibles)
self.quickSecondComboBox.addItems(bibles)
self.advancedVersionComboBox.addItems(bibles)
self.advancedSecondComboBox.addItems(bibles)
# set the default value
bible = Settings().value(self.settings_section + '/advanced bible')
if bible in bibles:
find_and_set_in_combo_box(self.advancedVersionComboBox, bible)
self.initialiseAdvancedBible(str(bible))
elif bibles:
self.initialiseAdvancedBible(bibles[0])
bible = Settings().value(self.settings_section + '/quick bible')
find_and_set_in_combo_box(self.quickVersionComboBox, bible)
def reload_bibles(self, process=False):
log.debug('Reloading Bibles')
self.plugin.manager.reload_bibles()
self.loadBibles()
# If called from first time wizard re-run, process any new bibles.
if process:
self.plugin.app_startup()
self.updateAutoCompleter()
def initialiseAdvancedBible(self, bible, last_book_id=None):
"""
This initialises the given bible, which means that its book names and
their chapter numbers is added to the combo boxes on the
'Advanced Search' Tab. This is not of any importance of the
'Quick Search' Tab.
``bible``
The bible to initialise (unicode).
``last_book_id``
The "book reference id" of the book which is choosen at the moment.
(int)
"""
log.debug('initialiseAdvancedBible %s, %s', bible, last_book_id)
book_data = self.plugin.manager.get_books(bible)
secondbible = self.advancedSecondComboBox.currentText()
if secondbible != '':
secondbook_data = self.plugin.manager.get_books(secondbible)
book_data_temp = []
for book in book_data:
for secondbook in secondbook_data:
if book['book_reference_id'] == \
secondbook['book_reference_id']:
book_data_temp.append(book)
book_data = book_data_temp
self.advancedBookComboBox.clear()
first = True
initialise_chapter_verse = False
language_selection = self.plugin.manager.get_language_selection(bible)
book_names = BibleStrings().BookNames
for book in book_data:
row = self.advancedBookComboBox.count()
if language_selection == LanguageSelection.Bible:
self.advancedBookComboBox.addItem(book['name'])
elif language_selection == LanguageSelection.Application:
data = BiblesResourcesDB.get_book_by_id(book['book_reference_id'])
self.advancedBookComboBox.addItem(book_names[data['abbreviation']])
elif language_selection == LanguageSelection.English:
data = BiblesResourcesDB.get_book_by_id(book['book_reference_id'])
self.advancedBookComboBox.addItem(data['name'])
self.advancedBookComboBox.setItemData(row, book['book_reference_id'])
if first:
first = False
first_book = book
initialise_chapter_verse = True
if last_book_id and last_book_id == int(book['book_reference_id']):
index = self.advancedBookComboBox.findData(book['book_reference_id'])
if index == -1:
# Not Found.
index = 0
self.advancedBookComboBox.setCurrentIndex(index)
initialise_chapter_verse = False
if initialise_chapter_verse:
self.initialiseChapterVerse(bible, first_book['name'],
first_book['book_reference_id'])
def initialiseChapterVerse(self, bible, book, book_ref_id):
log.debug('initialiseChapterVerse %s, %s, %s', bible, book, book_ref_id)
book = self.plugin.manager.get_book_by_id(bible, book_ref_id)
self.chapter_count = self.plugin.manager.get_chapter_count(bible, book)
verse_count = self.plugin.manager.get_verse_count_by_book_ref_id(bible, book_ref_id, 1)
if verse_count == 0:
self.advancedSearchButton.setEnabled(False)
critical_error_message_box(message=translate('BiblesPlugin.MediaItem', 'Bible not fully loaded.'))
else:
self.advancedSearchButton.setEnabled(True)
self.adjustComboBox(1, self.chapter_count, self.advancedFromChapter)
self.adjustComboBox(1, self.chapter_count, self.advancedToChapter)
self.adjustComboBox(1, verse_count, self.advancedFromVerse)
self.adjustComboBox(1, verse_count, self.advancedToVerse)
def updateAutoCompleter(self):
"""
This updates the bible book completion list for the search field. The
completion depends on the bible. It is only updated when we are doing a
reference search, otherwise the auto completion list is removed.
"""
log.debug('updateAutoCompleter')
# Save the current search type to the configuration.
Settings().setValue('%s/last search type' % self.settings_section, self.quickSearchEdit.current_search_type())
# Save the current bible to the configuration.
Settings().setValue(self.settings_section + '/quick bible', self.quickVersionComboBox.currentText())
books = []
# We have to do a 'Reference Search'.
if self.quickSearchEdit.current_search_type() == BibleSearch.Reference:
bibles = self.plugin.manager.get_bibles()
bible = self.quickVersionComboBox.currentText()
if bible:
book_data = bibles[bible].get_books()
secondbible = self.quickSecondComboBox.currentText()
if secondbible != '':
secondbook_data = bibles[secondbible].get_books()
book_data_temp = []
for book in book_data:
for secondbook in secondbook_data:
if book.book_reference_id == secondbook.book_reference_id:
book_data_temp.append(book)
book_data = book_data_temp
language_selection = self.plugin.manager.get_language_selection(bible)
if language_selection == LanguageSelection.Bible:
books = [book.name + ' ' for book in book_data]
elif language_selection == LanguageSelection.Application:
book_names = BibleStrings().BookNames
for book in book_data:
data = BiblesResourcesDB.get_book_by_id(book.book_reference_id)
books.append(str(book_names[data['abbreviation']]) + ' ')
elif language_selection == LanguageSelection.English:
for book in book_data:
data = BiblesResourcesDB.get_book_by_id(book.book_reference_id)
books.append(data['name'] + ' ')
books.sort(key=get_locale_key)
set_case_insensitive_completer(books, self.quickSearchEdit)
def on_import_click(self):
if not hasattr(self, 'import_wizard'):
self.import_wizard = BibleImportForm(self, self.plugin.manager, self.plugin)
# If the import was not cancelled then reload.
if self.import_wizard.exec_():
self.reload_bibles()
def on_edit_click(self):
if self.quickTab.isVisible():
bible = self.quickVersionComboBox.currentText()
elif self.advancedTab.isVisible():
bible = self.advancedVersionComboBox.currentText()
if bible:
self.editBibleForm = EditBibleForm(self, self.main_window, self.plugin.manager)
self.editBibleForm.loadBible(bible)
if self.editBibleForm.exec_():
self.reload_bibles()
def on_delete_click(self):
if self.quickTab.isVisible():
bible = self.quickVersionComboBox.currentText()
elif self.advancedTab.isVisible():
bible = self.advancedVersionComboBox.currentText()
if bible:
if QtGui.QMessageBox.question(self, UiStrings().ConfirmDelete,
translate('BiblesPlugin.MediaItem', 'Are you sure you want to completely delete "%s" Bible from '
'OpenLP?\n\nYou will need to re-import this Bible to use it again.') % bible,
QtGui.QMessageBox.StandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.No),
QtGui.QMessageBox.Yes) == QtGui.QMessageBox.No:
return
self.plugin.manager.delete_bible(bible)
self.reload_bibles()
def onSearchTabBarCurrentChanged(self, index):
if index == 0:
self.advancedTab.setVisible(False)
self.quickTab.setVisible(True)
self.quickSearchEdit.setFocus()
else:
self.quickTab.setVisible(False)
self.advancedTab.setVisible(True)
self.advancedBookComboBox.setFocus()
def onLockButtonToggled(self, checked):
if checked:
self.sender().setIcon(self.lock_icon)
else:
self.sender().setIcon(self.unlock_icon)
def onQuickStyleComboBoxChanged(self):
self.settings.layout_style = self.quickStyleComboBox.currentIndex()
self.advancedStyleComboBox.setCurrentIndex(self.settings.layout_style)
self.settings.layout_style_combo_box.setCurrentIndex(self.settings.layout_style)
Settings().setValue(self.settings_section + '/verse layout style', self.settings.layout_style)
def onAdvancedStyleComboBoxChanged(self):
self.settings.layout_style = self.advancedStyleComboBox.currentIndex()
self.quickStyleComboBox.setCurrentIndex(self.settings.layout_style)
self.settings.layout_style_combo_box.setCurrentIndex(self.settings.layout_style)
Settings().setValue(self.settings_section + '/verse layout style', self.settings.layout_style)
def onAdvancedVersionComboBox(self):
Settings().setValue(self.settings_section + '/advanced bible', self.advancedVersionComboBox.currentText())
self.initialiseAdvancedBible(self.advancedVersionComboBox.currentText(),
self.advancedBookComboBox.itemData(int(self.advancedBookComboBox.currentIndex())))
def onAdvancedSecondComboBox(self):
self.initialiseAdvancedBible(self.advancedVersionComboBox.currentText(),
self.advancedBookComboBox.itemData(int(self.advancedBookComboBox.currentIndex())))
def onAdvancedBookComboBox(self):
item = int(self.advancedBookComboBox.currentIndex())
self.initialiseChapterVerse(
self.advancedVersionComboBox.currentText(),
self.advancedBookComboBox.currentText(),
self.advancedBookComboBox.itemData(item))
def onAdvancedFromVerse(self):
chapter_from = int(self.advancedFromChapter.currentText())
chapter_to = int(self.advancedToChapter.currentText())
if chapter_from == chapter_to:
bible = self.advancedVersionComboBox.currentText()
book_ref_id = self.advancedBookComboBox.itemData(int(self.advancedBookComboBox.currentIndex()))
verse_from = int(self.advancedFromVerse.currentText())
verse_count = self.plugin.manager.get_verse_count_by_book_ref_id(bible, book_ref_id, chapter_to)
self.adjustComboBox(verse_from, verse_count, self.advancedToVerse, True)
def onAdvancedToChapter(self):
bible = self.advancedVersionComboBox.currentText()
book_ref_id = self.advancedBookComboBox.itemData(int(self.advancedBookComboBox.currentIndex()))
chapter_from = int(self.advancedFromChapter.currentText())
chapter_to = int(self.advancedToChapter.currentText())
verse_from = int(self.advancedFromVerse.currentText())
verse_to = int(self.advancedToVerse.currentText())
verse_count = self.plugin.manager.get_verse_count_by_book_ref_id(bible, book_ref_id, chapter_to)
if chapter_from == chapter_to and verse_from > verse_to:
self.adjustComboBox(verse_from, verse_count, self.advancedToVerse)
else:
self.adjustComboBox(1, verse_count, self.advancedToVerse)
def onAdvancedFromChapter(self):
bible = self.advancedVersionComboBox.currentText()
book_ref_id = self.advancedBookComboBox.itemData(
int(self.advancedBookComboBox.currentIndex()))
chapter_from = int(self.advancedFromChapter.currentText())
chapter_to = int(self.advancedToChapter.currentText())
verse_count = self.plugin.manager.get_verse_count_by_book_ref_id(bible, book_ref_id, chapter_from)
self.adjustComboBox(1, verse_count, self.advancedFromVerse)
if chapter_from > chapter_to:
self.adjustComboBox(1, verse_count, self.advancedToVerse)
self.adjustComboBox(chapter_from, self.chapter_count, self.advancedToChapter)
elif chapter_from == chapter_to:
self.adjustComboBox(chapter_from, self.chapter_count, self.advancedToChapter)
self.adjustComboBox(1, verse_count, self.advancedToVerse, True)
else:
self.adjustComboBox(chapter_from, self.chapter_count, self.advancedToChapter, True)
def adjustComboBox(self, range_from, range_to, combo, restore=False):
"""
Adjusts the given como box to the given values.
``range_from``
The first number of the range (int).
``range_to``
The last number of the range (int).
``combo``
The combo box itself (QComboBox).
``restore``
If True, then the combo's currentText will be restored after
adjusting (if possible).
"""
log.debug('adjustComboBox %s, %s, %s', combo, range_from, range_to)
if restore:
old_text = combo.currentText()
combo.clear()
combo.addItems(list(map(str, list(range(range_from, range_to + 1)))))
if restore and combo.findText(old_text) != -1:
combo.setCurrentIndex(combo.findText(old_text))
def onAdvancedSearchButton(self):
"""
Does an advanced search and saves the search results.
"""
log.debug('Advanced Search Button clicked')
self.advancedSearchButton.setEnabled(False)
self.application.process_events()
bible = self.advancedVersionComboBox.currentText()
second_bible = self.advancedSecondComboBox.currentText()
book = self.advancedBookComboBox.currentText()
book_ref_id = self.advancedBookComboBox.itemData(int(self.advancedBookComboBox.currentIndex()))
chapter_from = self.advancedFromChapter.currentText()
chapter_to = self.advancedToChapter.currentText()
verse_from = self.advancedFromVerse.currentText()
verse_to = self.advancedToVerse.currentText()
verse_separator = get_reference_separator('sep_v_display')
range_separator = get_reference_separator('sep_r_display')
verse_range = chapter_from + verse_separator + verse_from + range_separator + chapter_to + \
verse_separator + verse_to
versetext = '%s %s' % (book, verse_range)
self.application.set_busy_cursor()
self.search_results = self.plugin.manager.get_verses(bible, versetext, book_ref_id)
if second_bible:
self.second_search_results = self.plugin.manager.get_verses(second_bible, versetext, book_ref_id)
if not self.advancedLockButton.isChecked():
self.list_view.clear()
if self.list_view.count() != 0:
self.__check_second_bible(bible, second_bible)
elif self.search_results:
self.displayResults(bible, second_bible)
self.advancedSearchButton.setEnabled(True)
self.check_search_result()
self.application.set_normal_cursor()
def onQuickSearchButton(self):
"""
Does a quick search and saves the search results. Quick search can
either be "Reference Search" or "Text Search".
"""
log.debug('Quick Search Button clicked')
self.quickSearchButton.setEnabled(False)
self.application.process_events()
bible = self.quickVersionComboBox.currentText()
second_bible = self.quickSecondComboBox.currentText()
text = self.quickSearchEdit.text()
if self.quickSearchEdit.current_search_type() == BibleSearch.Reference:
# We are doing a 'Reference Search'.
self.search_results = self.plugin.manager.get_verses(bible, text)
if second_bible and self.search_results:
self.second_search_results = self.plugin.manager.get_verses(second_bible, text,
self.search_results[0].book.book_reference_id)
else:
# We are doing a 'Text Search'.
self.application.set_busy_cursor()
bibles = self.plugin.manager.get_bibles()
self.search_results = self.plugin.manager.verse_search(bible, second_bible, text)
if second_bible and self.search_results:
text = []
new_search_results = []
count = 0
passage_not_found = False
for verse in self.search_results:
db_book = bibles[second_bible].get_book_by_book_ref_id(verse.book.book_reference_id)
if not db_book:
log.debug('Passage "%s %d:%d" not found in Second Bible' %
(verse.book.name, verse.chapter, verse.verse))
passage_not_found = True
count += 1
continue
new_search_results.append(verse)
text.append((verse.book.book_reference_id, verse.chapter,
verse.verse, verse.verse))
if passage_not_found:
QtGui.QMessageBox.information(self, translate('BiblesPlugin.MediaItem', 'Information'),
translate('BiblesPlugin.MediaItem', 'The second Bible does not contain all the verses '
'that are in the main Bible. Only verses found in both Bibles will be shown. %d verses '
'have not been included in the results.') % count,
QtGui.QMessageBox.StandardButtons(QtGui.QMessageBox.Ok))
self.search_results = new_search_results
self.second_search_results = bibles[second_bible].get_verses(text)
if not self.quickLockButton.isChecked():
self.list_view.clear()
if self.list_view.count() != 0 and self.search_results:
self.__check_second_bible(bible, second_bible)
elif self.search_results:
self.displayResults(bible, second_bible)
self.quickSearchButton.setEnabled(True)
self.check_search_result()
self.application.set_normal_cursor()
def displayResults(self, bible, second_bible=''):
"""
Displays the search results in the media manager. All data needed for
further action is saved for/in each row.
"""
items = self.buildDisplayResults(bible, second_bible, self.search_results)
for bible_verse in items:
self.list_view.addItem(bible_verse)
self.list_view.selectAll()
self.search_results = {}
self.second_search_results = {}
def buildDisplayResults(self, bible, second_bible, search_results):
"""
Displays the search results in the media manager. All data needed for
further action is saved for/in each row.
"""
verse_separator = get_reference_separator('sep_v_display')
version = self.plugin.manager.get_meta_data(bible, 'name').value
copyright = self.plugin.manager.get_meta_data(bible, 'copyright').value
permissions = self.plugin.manager.get_meta_data(bible, 'permissions').value
second_version = ''
second_copyright = ''
second_permissions = ''
if second_bible:
second_version = self.plugin.manager.get_meta_data(second_bible, 'name').value
second_copyright = self.plugin.manager.get_meta_data(second_bible, 'copyright').value
second_permissions = self.plugin.manager.get_meta_data(second_bible, 'permissions').value
items = []
language_selection = self.plugin.manager.get_language_selection(bible)
for count, verse in enumerate(search_results):
book = None
if language_selection == LanguageSelection.Bible:
book = verse.book.name
elif language_selection == LanguageSelection.Application:
book_names = BibleStrings().BookNames
data = BiblesResourcesDB.get_book_by_id(verse.book.book_reference_id)
book = str(book_names[data['abbreviation']])
elif language_selection == LanguageSelection.English:
data = BiblesResourcesDB.get_book_by_id(verse.book.book_reference_id)
book = data['name']
data = {
'book': book,
'chapter': verse.chapter,
'verse': verse.verse,
'bible': bible,
'version': version,
'copyright': copyright,
'permissions': permissions,
'text': verse.text,
'second_bible': second_bible,
'second_version': second_version,
'second_copyright': second_copyright,
'second_permissions': second_permissions,
'second_text': ''
}
if second_bible:
try:
data['second_text'] = self.second_search_results[count].text
except IndexError:
log.exception('The second_search_results does not have as many verses as the search_results.')
break
bible_text = '%s %d%s%d (%s, %s)' % (book, verse.chapter, verse_separator, verse.verse, version,
second_version)
else:
bible_text = '%s %d%s%d (%s)' % (book, verse.chapter, verse_separator, verse.verse, version)
bible_verse = QtGui.QListWidgetItem(bible_text)
bible_verse.setData(QtCore.Qt.UserRole, data)
items.append(bible_verse)
return items
def generate_slide_data(self, service_item, item=None, xmlVersion=False,
remote=False, context=ServiceItemContext.Service):
"""
Generates and formats the slides for the service item as well as the
service item's title.
"""
log.debug('generating slide data')
if item:
items = item
else:
items = self.list_view.selectedItems()
if not items:
return False
bible_text = ''
old_item = None
old_chapter = -1
raw_slides = []
raw_title = []
verses = VerseReferenceList()
for bitem in items:
book = self._decode_qt_object(bitem, 'book')
chapter = int(self._decode_qt_object(bitem, 'chapter'))
verse = int(self._decode_qt_object(bitem, 'verse'))
bible = self._decode_qt_object(bitem, 'bible')
version = self._decode_qt_object(bitem, 'version')
copyright = self._decode_qt_object(bitem, 'copyright')
permissions = self._decode_qt_object(bitem, 'permissions')
text = self._decode_qt_object(bitem, 'text')
second_bible = self._decode_qt_object(bitem, 'second_bible')
second_version = self._decode_qt_object(bitem, 'second_version')
second_copyright = self._decode_qt_object(bitem, 'second_copyright')
second_permissions = self._decode_qt_object(bitem, 'second_permissions')
second_text = self._decode_qt_object(bitem, 'second_text')
verses.add(book, chapter, verse, version, copyright, permissions)
verse_text = self.formatVerse(old_chapter, chapter, verse)
if second_bible:
bible_text = '%s %s\n\n%s %s' % (verse_text, text, verse_text, second_text)
raw_slides.append(bible_text.rstrip())
bible_text = ''
# If we are 'Verse Per Slide' then create a new slide.
elif self.settings.layout_style == LayoutStyle.VersePerSlide:
bible_text = '%s %s' % (verse_text, text)
raw_slides.append(bible_text.rstrip())
bible_text = ''
# If we are 'Verse Per Line' then force a new line.
elif self.settings.layout_style == LayoutStyle.VersePerLine:
bible_text = '%s%s %s\n' % (bible_text, verse_text, text)
# We have to be 'Continuous'.
else:
bible_text = '%s %s %s\n' % (bible_text, verse_text, text)
bible_text = bible_text.strip(' ')
if not old_item:
start_item = bitem
elif self.checkTitle(bitem, old_item):
raw_title.append(self.formatTitle(start_item, old_item))
start_item = bitem
old_item = bitem
old_chapter = chapter
# Add footer
service_item.raw_footer.append(verses.format_verses())
if second_bible:
verses.add_version(second_version, second_copyright, second_permissions)
service_item.raw_footer.append(verses.format_versions())
raw_title.append(self.formatTitle(start_item, bitem))
# If there are no more items we check whether we have to add bible_text.
if bible_text:
raw_slides.append(bible_text.lstrip())
bible_text = ''
# Service Item: Capabilities
if self.settings.layout_style == LayoutStyle.Continuous and not second_bible:
# Split the line but do not replace line breaks in renderer.
service_item.add_capability(ItemCapabilities.NoLineBreaks)
service_item.add_capability(ItemCapabilities.CanPreview)
service_item.add_capability(ItemCapabilities.CanLoop)
service_item.add_capability(ItemCapabilities.CanWordSplit)
# Service Item: Title
service_item.title = create_separated_list(raw_title)
# Service Item: Theme
if not self.settings.bible_theme:
service_item.theme = None
else:
service_item.theme = self.settings.bible_theme
for slide in raw_slides:
service_item.add_from_text(slide)
return True
def formatTitle(self, start_bitem, old_bitem):
"""
This method is called, when we have to change the title, because
we are at the end of a verse range. E. g. if we want to add
Genesis 1:1-6 as well as Daniel 2:14.
``start_item``
The first item of a range.
``old_item``
The last item of a range.
"""
verse_separator = get_reference_separator('sep_v_display')
range_separator = get_reference_separator('sep_r_display')
old_chapter = self._decode_qt_object(old_bitem, 'chapter')
old_verse = self._decode_qt_object(old_bitem, 'verse')
start_book = self._decode_qt_object(start_bitem, 'book')
start_chapter = self._decode_qt_object(start_bitem, 'chapter')
start_verse = self._decode_qt_object(start_bitem, 'verse')
start_bible = self._decode_qt_object(start_bitem, 'bible')
start_second_bible = self._decode_qt_object(start_bitem, 'second_bible')
if start_second_bible:
bibles = '%s, %s' % (start_bible, start_second_bible)
else:
bibles = start_bible
if start_chapter == old_chapter:
if start_verse == old_verse:
verse_range = start_chapter + verse_separator + start_verse
else:
verse_range = start_chapter + verse_separator + start_verse + range_separator + old_verse
else:
verse_range = start_chapter + verse_separator + start_verse + \
range_separator + old_chapter + verse_separator + old_verse
return '%s %s (%s)' % (start_book, verse_range, bibles)
def checkTitle(self, bitem, old_bitem):
"""
This method checks if we are at the end of an verse range. If that is
the case, we return True, otherwise False. E. g. if we added
Genesis 1:1-6, but the next verse is Daniel 2:14, we return True.
``item``
The item we are dealing with at the moment.
``old_item``
The item we were previously dealing with.
"""
# Get all the necessary meta data.
book = self._decode_qt_object(bitem, 'book')
chapter = int(self._decode_qt_object(bitem, 'chapter'))
verse = int(self._decode_qt_object(bitem, 'verse'))
bible = self._decode_qt_object(bitem, 'bible')
second_bible = self._decode_qt_object(bitem, 'second_bible')
old_book = self._decode_qt_object(old_bitem, 'book')
old_chapter = int(self._decode_qt_object(old_bitem, 'chapter'))
old_verse = int(self._decode_qt_object(old_bitem, 'verse'))
old_bible = self._decode_qt_object(old_bitem, 'bible')
old_second_bible = self._decode_qt_object(old_bitem, 'second_bible')
if old_bible != bible or old_second_bible != second_bible or old_book != book:
# The bible, second bible or book has changed.
return True
elif old_verse + 1 != verse and old_chapter == chapter:
# We are still in the same chapter, but a verse has been skipped.
return True
elif old_chapter + 1 == chapter and (verse != 1 or
old_verse != self.plugin.manager.get_verse_count(old_bible, old_book, old_chapter)):
# We are in the following chapter, but the last verse was not the
# last verse of the chapter or the current verse is not the
# first one of the chapter.
return True
return False
def formatVerse(self, old_chapter, chapter, verse):
"""
Formats and returns the text, each verse starts with, for the given
chapter and verse. The text is either surrounded by round, square,
curly brackets or no brackets at all. For example::
u'{su}1:1{/su}'
``old_chapter``
The previous verse's chapter number (int).
``chapter``
The chapter number (int).
``verse``
The verse number (int).
"""
verse_separator = get_reference_separator('sep_v_display')
if not self.settings.show_new_chapters or old_chapter != chapter:
verse_text = str(chapter) + verse_separator + str(verse)
else:
verse_text = str(verse)
if self.settings.display_style == DisplayStyle.Round:
return '{su}(%s){/su}' % verse_text
if self.settings.display_style == DisplayStyle.Curly:
return '{su}{%s}{/su}' % verse_text
if self.settings.display_style == DisplayStyle.Square:
return '{su}[%s]{/su}' % verse_text
return '{su}%s{/su}' % verse_text
def search(self, string, showError):
"""
Search for some Bible verses (by reference).
"""
bible = self.quickVersionComboBox.currentText()
search_results = self.plugin.manager.get_verses(bible, string, False, showError)
if search_results:
versetext = ' '.join([verse.text for verse in search_results])
return [[string, versetext]]
return []
def create_item_from_id(self, item_id):
"""
Create a media item from an item id.
"""
item = QtGui.QListWidgetItem()
bible = self.quickVersionComboBox.currentText()
search_results = self.plugin.manager.get_verses(bible, item_id, False)
items = self.buildDisplayResults(bible, '', search_results)
return items
| marmyshev/bug_1117098 | openlp/plugins/bibles/lib/mediaitem.py | Python | gpl-2.0 | 50,056 | [
"Brian"
] | 8450059c90fd916beddef836a64d769b2fef8f4426346ee064d54d2305563e89 |
import numpy as np
import copy
from collections import OrderedDict
import cv2
import caffe
from caffe import layers as L
from caffe import params as P
from caffe.proto import caffe_pb2 as pb2
def compute_jacobian(net, output, input_):
assert output in net.outputs
assert input_ in net.inputs
input_data = net.blobs[input_].data
assert input_data.ndim == 2
assert input_data.shape[0] == 1
output_data = net.blobs[output].data
assert output_data.ndim == 2
assert output_data.shape[0] == 1
doutput_dinput = np.array([net.backward(y_diff_pred=e[None,:])[input_].flatten() for e in np.eye(output_data.shape[1])])
return doutput_dinput
def traverse_layers_to_keep(layer, layers_dict, layers_to_keep):
if layer in layers_to_keep: # has already been traversed
return
layers_to_keep.append(layer)
for layer_name in layer.bottom:
if layer_name in layers_dict:
traverse_layers_to_keep(layers_dict[layer_name], layers_dict, layers_to_keep)
def remove_non_descendants(layers, ancestor_layers, exception_layers=[]):
"""
Remove layers that are not descendants of ancestor_layers except for exception_layers.
"""
layers_dict = {layer.name: layer for layer in layers}
layers_to_keep = [layer for layer in exception_layers]
for ancestor_layer in ancestor_layers:
traverse_layers_to_keep(ancestor_layer, layers_dict, layers_to_keep)
layer_names_to_keep = [layer.name for layer in layers_to_keep]
for layer_name, layer in layers_dict.items():
if layer in layers_to_keep:
continue
for top_layer in layer.top:
if top_layer in layer_names_to_keep:
layer_names_to_keep.append(layer_name)
layers_to_keep.append(layer)
break
for layer_name, layer in layers_dict.items():
if layer_name not in layer_names_to_keep:
layers.remove(layer)
def deploy_net(net, inputs, input_shapes, outputs, batch_size=1, force_backward=True):
# remove all layers that are not descendants of output layers
output_layers = [layer for layer in net.layer if layer.name in outputs]
remove_non_descendants(net.layer, output_layers)
net.input.extend(inputs)
net.input_shape.extend([pb2.BlobShape(dim=(batch_size,)+shape) for shape in input_shapes])
net.force_backward = force_backward
return net
def train_val_net(net):
# remove all layers that are not descendants of loss layers
loss_layers = [layer for layer in net.layer if layer.name.endswith('loss')]
exception_layers = [layer for layer in net.layer if 'data' in layer.name]
remove_non_descendants(net.layer, loss_layers, exception_layers)
return net
def approx_bilinear_net(input_shapes, hdf5_txt_fname='', batch_size=1, net_name='ApproxBilinearNet', phase=None):
assert len(input_shapes) == 2
image_shape, vel_shape = input_shapes
assert len(image_shape) == 3
assert len(vel_shape) == 1
_, height, width = image_shape
y_dim = height * width
fc_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=0, decay_mult=0)],
num_output=y_dim,
weight_filler=dict(type='gaussian', std=0.001),
bias_filler=dict(type='constant', value=0))
n = caffe.NetSpec()
data_kwargs = dict(name='data', ntop=3, batch_size=batch_size, source=hdf5_txt_fname)
if phase is not None:
data_kwargs.update(dict(include=dict(phase=phase)))
n.image_curr, n.image_diff, n.vel = L.HDF5Data(**data_kwargs)
u = n.vel
n.y = L.Flatten(n.image_curr, name='flatten1')
n.y_diff = L.Flatten(n.image_diff, name='flatten2')
n.fc1_y = L.InnerProduct(n.y, name='fc1', **fc_kwargs)
n.fc2_u = L.InnerProduct(u, name='fc2', **fc_kwargs)
n.fc3_u = L.InnerProduct(u, name='fc3', **fc_kwargs)
n.prod_y_u = L.Eltwise(n.fc1_y, n.fc2_u, name='prod', operation=P.Eltwise.PROD)
n.y_diff_pred = L.Eltwise(n.prod_y_u, n.fc3_u, name='sum', operation=P.Eltwise.SUM)
n.loss = L.EuclideanLoss(n.y_diff_pred, n.y_diff, name='loss')
net = n.to_proto()
net.name = net_name
return net, None
def Bilinear(n, y, u, y_dim, u_dim, name='bilinear', **fc_kwargs):
re_y = n.tops[name+'_re_y'] = L.Reshape(y, shape=dict(dim=[0, -1, 1]))
tile_re_y = n.tops[name+'_tile_re_y'] = L.Tile(re_y, axis=2, tiles=u_dim)
re_u = n.tops[name+'_re_u'] = L.Reshape(u, shape=dict(dim=[0, 1, -1]))
tile_re_u = n.tops[name+'_tile_re_u'] = L.Tile(re_u, axis=1, tiles=y_dim)
outer_yu = n.tops[name+'_outer_yu'] = L.Eltwise(tile_re_y, tile_re_u, operation=P.Eltwise.PROD)
fc_outer_yu = n.tops[name+'_fc_outer_yu'] = L.InnerProduct(outer_yu, num_output=y_dim, **fc_kwargs)
fc_u = n.tops[name+'_fc_u'] = L.InnerProduct(u, num_output=y_dim, **fc_kwargs)
return L.Eltwise(fc_outer_yu, fc_u, operation=P.Eltwise.SUM)
def bilinear_net(input_shapes, hdf5_txt_fname='', batch_size=1, net_name='BilinearNet', phase=None, **kwargs):
assert len(input_shapes) == 2
image_shape, vel_shape = input_shapes
assert len(image_shape) == 3
assert len(vel_shape) == 1
y_dim = np.prod(image_shape)
u_dim, = vel_shape
fc_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=0, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.001),
bias_filler=dict(type='constant', value=0))
n = caffe.NetSpec()
data_kwargs = dict(name='data', ntop=3, batch_size=batch_size, source=hdf5_txt_fname)
if phase is not None:
data_kwargs.update(dict(include=dict(phase=phase)))
n.image_curr, n.image_diff, n.vel = L.HDF5Data(**data_kwargs)
u = n.vel
n.y = L.Flatten(n.image_curr)
n.y_diff_pred = Bilinear(n, n.y, u, y_dim, u_dim, **fc_kwargs)
n.y_diff = L.Flatten(n.image_diff)
n.loss = L.EuclideanLoss(n.y_diff_pred, n.y_diff, name='loss')
n.image_diff_pred = L.Reshape(n.y_diff_pred, shape=dict(dim=[batch_size] + list(image_shape)))
n.image_next_pred = L.Eltwise(n.image_curr, n.image_diff_pred, operation=P.Eltwise.SUM)
net = n.to_proto()
net.name = net_name
return net, None
def bilinear_constrained_net(input_shapes, hdf5_txt_fname='', batch_size=1, net_name='BilinearConstrainedNet', phase=None, **kwargs):
assert len(input_shapes) == 2
image_shape, vel_shape = input_shapes
assert len(image_shape) == 3
assert len(vel_shape) == 1
_, height, width = image_shape
y_dim = height * width
u_dim = vel_shape[0]
fc_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=0, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.001),
bias_filler=dict(type='constant', value=0))
n = caffe.NetSpec()
data_kwargs = dict(name='data', ntop=3, batch_size=batch_size, source=hdf5_txt_fname)
if phase is not None:
data_kwargs.update(dict(include=dict(phase=phase)))
n.image_curr, n.image_diff, n.vel = L.HDF5Data(**data_kwargs)
u = n.vel
n.y = L.Flatten(n.image_curr)
n.y_diff_pred = Bilinear(n, n.y, u, y_dim, u_dim, **fc_kwargs)
n.image_diff_pred = L.Reshape(n.y_diff_pred, shape=dict(dim=[batch_size] + list(image_shape)))
n.image_next_pred_unconstrained = L.Eltwise(n.image_curr, n.image_diff_pred, operation=P.Eltwise.SUM)
n.image_next_pred = L.TanH(n.image_next_pred_unconstrained)
n.image_next = L.Eltwise(n.image_curr, n.image_diff, operation=P.Eltwise.SUM)
n.loss = L.EuclideanLoss(n.image_next, n.image_next_pred, name='loss')
net = n.to_proto()
net.name = net_name
return net, None
def action_cond_encoder_net(input_shapes, hdf5_txt_fname='', batch_size=1, net_name='ActionCondEncoderNet', phase=None):
assert len(input_shapes) == 2
image_shape, vel_shape = input_shapes
assert len(image_shape) == 3
assert len(vel_shape) == 1
y_dim = 1024
u_dim = vel_shape[0]
conv_kwargs = dict(num_output=64, kernel_size=6, stride=2)
deconv_kwargs = conv_kwargs
fc_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=0, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.001),
bias_filler=dict(type='constant', value=0))
n = caffe.NetSpec()
data_kwargs = dict(name='data', ntop=3, batch_size=batch_size, source=hdf5_txt_fname)
if phase is not None:
data_kwargs.update(dict(include=dict(phase=phase)))
n.image_curr, n.image_diff, n.vel = L.HDF5Data(**data_kwargs)
n.conv1 = L.Convolution(n.image_curr, name='conv1', **conv_kwargs)
n.relu1 = L.ReLU(n.conv1, name='relu1', in_place=True)
n.conv2 = L.Convolution(n.relu1, name='conv2', pad=2, **conv_kwargs)
n.relu2 = L.ReLU(n.conv2, name='relu2', in_place=True)
n.conv3 = L.Convolution(n.relu2, name='conv3', pad=2, **conv_kwargs)
n.relu3 = L.ReLU(n.conv3, name='relu3', in_place=True)
n.y = L.InnerProduct(n.relu3, num_output=y_dim, weight_filler=dict(type='xavier'))
u = n.vel
n.y_diff_pred = Bilinear(n, n.y, u, y_dim, u_dim, **fc_kwargs)
n.y_next_pred = L.Eltwise(n.y, n.y_diff_pred, operation=P.Eltwise.SUM)
n.ip2 = L.InnerProduct(n.y_next_pred, name='ip2', num_output=6400, weight_filler=dict(type='xavier'))
n.re_y_next_pred = L.Reshape(n.ip2, shape=dict(dim=[batch_size, 64, 10, 10]))
n.deconv3 = L.Deconvolution(n.re_y_next_pred, convolution_param=dict(deconv_kwargs.items() + dict(pad=2).items()))
n.derelu3 = L.ReLU(n.deconv3, in_place=True)
n.deconv2 = L.Deconvolution(n.derelu3, convolution_param=dict(deconv_kwargs.items() + dict(pad=2).items()))
n.derelu2 = L.ReLU(n.deconv2, in_place=True)
n.deconv1 = L.Deconvolution(n.derelu2, convolution_param=dict(deconv_kwargs.items() + dict(num_output=1).items()))
n.image_next_pred = L.ReLU(n.deconv1, in_place=True)
n.image_next = L.Eltwise(n.image_curr, n.image_diff, operation=P.Eltwise.SUM)
n.loss = L.EuclideanLoss(n.image_next_pred, n.image_next, name='loss')
net = n.to_proto()
net.name = net_name
return net, None
def small_action_cond_encoder_net(input_shapes, hdf5_txt_fname='', batch_size=1, net_name=None, phase=None, constrained=True, **kwargs):
assert len(input_shapes) == 2
image0_shape, vel_shape = input_shapes
assert len(image0_shape) == 3
assert len(vel_shape) == 1
image0_num_channel = image0_shape[0]
num_channel = kwargs.get('num_channel') or 16
image1_num_channel = num_channel
image2_num_channel = num_channel
image1_shape = (image1_num_channel, image0_shape[1]//2, image0_shape[2]//2)
image2_shape = (image2_num_channel, image1_shape[1]//2, image1_shape[2]//2)
y0_dim = image0_shape[1] * image0_shape[2] # 1024
y2_dim = kwargs.get('y2_dim') or 64
u_dim = vel_shape[0]
conv1_kwargs = dict(param=[dict(name='conv1', lr_mult=1, decay_mult=1), dict(name='conv1_bias', lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=image1_num_channel,
kernel_size=6,
stride=2,
pad=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
conv2_kwargs = dict(param=[dict(name='conv2', lr_mult=1, decay_mult=1), dict(name='conv2_bias', lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=image2_num_channel,
kernel_size=6,
stride=2,
pad=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
deconv0_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=image0_num_channel,
kernel_size=6,
stride=2,
pad=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
deconv1_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=image1_num_channel,
kernel_size=6,
stride=2,
pad=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
fc_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=0, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.001),
bias_filler=dict(type='constant', value=0))
n = caffe.NetSpec()
data_kwargs = dict(name='data', ntop=3, batch_size=batch_size, source=hdf5_txt_fname)
if phase is not None:
data_kwargs.update(dict(include=dict(phase=phase)))
n.image_curr, n.image_diff, n.vel = L.HDF5Data(**data_kwargs)
image0 = n.image_curr
u = n.vel
n.image1 = L.Convolution(image0, **conv1_kwargs)
n.image1 = L.ReLU(n.image1, in_place=True)
n.image2 = L.Convolution(n.image1, **conv2_kwargs)
n.image2 = L.ReLU(n.image2, in_place=True)
y2 = n.y = L.InnerProduct(n.image2, num_output=y2_dim, weight_filler=dict(type='xavier'))
y2_diff_pred = n.y_diff_pred = Bilinear(n, y2, u, y2_dim, u_dim, name='bilinear2', **fc_kwargs)
n.y2_next_pred = L.Eltwise(y2, y2_diff_pred, operation=P.Eltwise.SUM)
n.image2_next_pred_flat = L.InnerProduct(n.y2_next_pred, num_output=np.prod(image2_shape), weight_filler=dict(type='xavier'))
n.image2_next_pred = L.Reshape(n.image2_next_pred_flat, shape=dict(dim=[0]+list(image2_shape)))
n.image1_next_pred = L.Deconvolution(n.image2_next_pred, **deconv1_kwargs)
n.image1_next_pred = L.ReLU(n.image1_next_pred, in_place=True)
if constrained:
n.image_next_pred_unconstrained = L.Deconvolution(n.image1_next_pred, **deconv0_kwargs)
image0_next_pred = n.image_next_pred = L.TanH(n.image_next_pred_unconstrained)
else:
n.image_next_pred = L.Deconvolution(n.image1_next_pred, **deconv0_kwargs)
image0_next = n.image_next = L.Eltwise(n.image_curr, n.image_diff, operation=P.Eltwise.SUM)
n.image0_next_loss = L.EuclideanLoss(image0_next, image0_next_pred)
net = n.to_proto()
if net_name is None:
net_name = 'SmallActionCondEncoderNet'
if constrained:
net_name += '_constrained'
net_name +='_num_channel' + str(num_channel)
net_name += '_y2_dim' + str(y2_dim)
net.name = net_name
return net, None
def downsampled_small_action_cond_encoder_net(input_shapes, hdf5_txt_fname='', batch_size=1, net_name='DownsampledSmallActionCondEncoderNet', phase=None):
assert len(input_shapes) == 2
image_shape, vel_shape = input_shapes
assert len(image_shape) == 3
assert len(vel_shape) == 1
y_dim = 32
u_dim = vel_shape[0]
conv_num_output = 16
conv2_wh = 4
blur_conv_kwargs = dict(param=[dict(lr_mult=0, decay_mult=0)],
convolution_param=dict(num_output=1,
kernel_size=5,
stride=2,
pad=2,
bias_term=False))
conv_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=conv_num_output,
kernel_size=6,
stride=2,
pad=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
deconv_kwargs = conv_kwargs
deconv_kwargs1 = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=1,
kernel_size=6,
stride=2,
pad=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
fc_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=0, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.001),
bias_filler=dict(type='constant', value=0))
weight_fillers = OrderedDict()
ds_kernel = cv2.getGaussianKernel(ksize=5, sigma=-1)
ds_weight_filler = ds_kernel.dot(ds_kernel.T)
n = caffe.NetSpec()
data_kwargs = dict(name='data', ntop=3, batch_size=batch_size, source=hdf5_txt_fname)
if phase is not None:
data_kwargs.update(dict(include=dict(phase=phase)))
n.image_curr, n.image_diff, n.vel = L.HDF5Data(**data_kwargs)
n.image_curr_ds = L.Convolution(n.image_curr, name='blur_conv1', **blur_conv_kwargs)
n.image_diff_ds = L.Convolution(n.image_diff, name='blur_conv2', **blur_conv_kwargs)
weight_fillers['image_curr_ds'] = [ds_weight_filler]
weight_fillers['image_diff_ds'] = [ds_weight_filler]
n.conv1 = L.Convolution(n.image_curr_ds, **conv_kwargs)
n.relu1 = L.ReLU(n.conv1, name='relu1', in_place=True)
n.conv2 = L.Convolution(n.relu1, **conv_kwargs)
n.relu2 = L.ReLU(n.conv2, name='relu2', in_place=True)
n.y = L.InnerProduct(n.relu2, num_output=y_dim, weight_filler=dict(type='xavier'))
u = n.vel
n.y_diff_pred = Bilinear(n, n.y, u, y_dim, u_dim, **fc_kwargs)
n.y_next_pred = L.Eltwise(n.y, n.y_diff_pred, operation=P.Eltwise.SUM)
n.ip2 = L.InnerProduct(n.y_next_pred, name='ip2', num_output=conv_num_output*conv2_wh**2, weight_filler=dict(type='xavier'))
n.re_y_next_pred = L.Reshape(n.ip2, shape=dict(dim=[batch_size, conv_num_output, conv2_wh, conv2_wh]))
n.deconv2 = L.Deconvolution(n.re_y_next_pred, **deconv_kwargs)
n.derelu2 = L.ReLU(n.deconv2, in_place=True)
n.deconv1 = L.Deconvolution(n.derelu2, **deconv_kwargs1)
n.image_next_pred = L.ReLU(n.deconv1, in_place=True)
n.image_next = L.Eltwise(n.image_curr_ds, n.image_diff_ds, operation=P.Eltwise.SUM)
n.loss = L.EuclideanLoss(n.image_next_pred, n.image_next, name='loss')
net = n.to_proto()
net.name = net_name
return net, weight_fillers
def ladder_action_cond_encoder_net(input_shapes, hdf5_txt_fname='', batch_size=1, net_name=None, phase=None, constrained=True, **kwargs):
assert len(input_shapes) == 2
image0_shape, vel_shape = input_shapes
assert len(image0_shape) == 3
assert image0_shape[1] == 32
assert image0_shape[2] == 32
assert len(vel_shape) == 1
image0_num_channel = image0_shape[0]
num_channel = kwargs.get('num_channel') or 16
image1_num_channel = num_channel
image2_num_channel = num_channel
image1_shape = (image1_num_channel, 16, 16)
image2_shape = (image2_num_channel, 8, 8)
y0_dim = image0_shape[1] * image0_shape[2] # 1024
y1_dim = kwargs.get('y1_dim') or 128
y2_dim = kwargs.get('y2_dim') or 32
u_dim = vel_shape[0]
conv1_kwargs = dict(param=[dict(name='conv1', lr_mult=1, decay_mult=1), dict(name='conv1_bias', lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=image1_num_channel,
kernel_size=6,
stride=2,
pad=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
conv2_kwargs = dict(param=[dict(name='conv2', lr_mult=1, decay_mult=1), dict(name='conv2_bias', lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=image2_num_channel,
kernel_size=6,
stride=2,
pad=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
deconv0_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=image0_num_channel,
kernel_size=6,
stride=2,
pad=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
deconv1_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=image1_num_channel,
kernel_size=6,
stride=2,
pad=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
fc_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=0, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.001),
bias_filler=dict(type='constant', value=0))
n = caffe.NetSpec()
data_kwargs = dict(name='data', ntop=3, batch_size=batch_size, source=hdf5_txt_fname)
if phase is not None:
data_kwargs.update(dict(include=dict(phase=phase)))
n.image_curr, n.image_diff, n.vel = L.HDF5Data(**data_kwargs)
image0 = n.image_curr
u = n.vel
n.image1 = L.Convolution(image0, **conv1_kwargs)
n.image1 = L.ReLU(n.image1, in_place=True)
n.image2 = L.Convolution(n.image1, **conv2_kwargs)
n.image2 = L.ReLU(n.image2, in_place=True)
n.y2 = L.InnerProduct(n.image2, num_output=y2_dim, weight_filler=dict(type='xavier'))
n.y1 = L.InnerProduct(n.image1, num_output=y1_dim, weight_filler=dict(type='xavier'))
n.y0 = L.Flatten(image0)
n.y01 = L.Concat(n.y0, n.y1, axis=1)
n.y = L.Concat(n.y01, n.y2, axis=1)
n.y2_diff_pred = Bilinear(n, n.y2, u, y2_dim, u_dim, name='bilinear2', **fc_kwargs)
n.y1_diff_pred = Bilinear(n, n.y1, u, y1_dim, u_dim, name='bilinear1', **fc_kwargs)
n.y0_diff_pred = Bilinear(n, n.y0, u, y0_dim, u_dim, name='bilinear0', **fc_kwargs)
n.y01_diff_pred = L.Concat(n.y0_diff_pred, n.y1_diff_pred, axis=1)
n.y_diff_pred = L.Concat(n.y01_diff_pred, n.y2_diff_pred, axis=1)
n.y2_next_pred = L.Eltwise(n.y2, n.y2_diff_pred, operation=P.Eltwise.SUM)
n.image2_next_pred_flat = L.InnerProduct(n.y2_next_pred, num_output=np.prod(image2_shape), weight_filler=dict(type='xavier'))
n.image2_next_pred = L.Reshape(n.image2_next_pred_flat, shape=dict(dim=[batch_size]+list(image2_shape)))
n.y1_next_pred = L.Eltwise(n.y1, n.y1_diff_pred, operation=P.Eltwise.SUM)
n.image1_next_pred1_flat = L.InnerProduct(n.y1_next_pred, num_output=np.prod(image1_shape), weight_filler=dict(type='xavier'))
n.image1_next_pred1 = L.Reshape(n.image1_next_pred1_flat, shape=dict(dim=[batch_size]+list(image1_shape)))
n.image1_next_pred2 = L.Deconvolution(n.image2_next_pred, **deconv1_kwargs)
n.image1_next_pred = L.Eltwise(n.image1_next_pred1, n.image1_next_pred2, operation=P.Eltwise.SUM)
n.image1_next_pred = L.ReLU(n.image1_next_pred, in_place=True)
n.y0_next_pred = L.Eltwise(n.y0, n.y0_diff_pred, operation=P.Eltwise.SUM)
n.image0_next_pred0 = L.Reshape(n.y0_next_pred, shape=dict(dim=[batch_size]+list(image0_shape)))
n.image0_next_pred1 = L.Deconvolution(n.image1_next_pred, **deconv0_kwargs)
if constrained:
n.image_next_pred_unconstrained = L.Eltwise(n.image0_next_pred0, n.image0_next_pred1, operation=P.Eltwise.SUM)
image0_next_pred = n.image_next_pred = L.TanH(n.image_next_pred_unconstrained)
else:
image0_next_pred = n.image_next_pred = L.Eltwise(n.image0_next_pred0, n.image0_next_pred1, operation=P.Eltwise.SUM)
image0_next = n.image_next = L.Eltwise(n.image_curr, n.image_diff, operation=P.Eltwise.SUM)
n.image1_next = L.Convolution(image0_next, **conv1_kwargs)
n.image1_next = L.ReLU(n.image1_next, in_place=True)
n.image2_next = L.Convolution(n.image1_next, **conv2_kwargs)
n.image2_next = L.ReLU(n.image2_next, in_place=True)
n.image0_next_loss = L.EuclideanLoss(image0_next, image0_next_pred)
n.image1_next_loss = L.EuclideanLoss(n.image1_next, n.image1_next_pred)
n.image2_next_loss = L.EuclideanLoss(n.image2_next, n.image2_next_pred)
net = n.to_proto()
if net_name is None:
net_name = 'LadderActionCondEncoderNet'
if constrained:
net_name += '_constrained'
net_name +='_num_channel' + str(num_channel)
net_name += '_y1_dim' + str(y1_dim)
net_name += '_y2_dim' + str(y2_dim)
net.name = net_name
return net, None
def ladder_conv_action_cond_encoder_net(input_shapes, hdf5_txt_fname='', batch_size=1, net_name=None, phase=None, constrained=True, **kwargs):
assert len(input_shapes) == 2
image0_shape, vel_shape = input_shapes
assert len(image0_shape) == 3
assert image0_shape[1] == 32
assert image0_shape[2] == 32
assert len(vel_shape) == 1
image0_num_channel = image0_shape[0]
num_channel = kwargs.get('num_channel') or 16
image1_num_channel = num_channel
image2_num_channel = num_channel
image1_shape = (image1_num_channel, 16, 16)
image2_shape = (image2_num_channel, 8, 8)
y0_dim = image0_shape[1] * image0_shape[2] # 1024
y1_dim = kwargs.get('y1_dim') or 128
y2_dim = kwargs.get('y2_dim') or 32
u_dim = vel_shape[0]
conv1_kwargs = dict(param=[dict(name='conv1', lr_mult=1, decay_mult=1), dict(name='conv1_bias', lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=image1_num_channel,
kernel_size=6,
stride=2,
pad=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
conv2_kwargs = dict(param=[dict(name='conv2', lr_mult=1, decay_mult=1), dict(name='conv2_bias', lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=image2_num_channel,
kernel_size=6,
stride=2,
pad=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
deconv0_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=image0_num_channel,
kernel_size=6,
stride=2,
pad=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
deconv1_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=image1_num_channel,
kernel_size=6,
stride=2,
pad=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
conv0_merge_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=image0_num_channel,
kernel_size=1,
stride=1,
pad=0,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
conv1_merge_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=image1_num_channel,
kernel_size=1,
stride=1,
pad=0,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
fc_kwargs = dict(param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=0, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.001),
bias_filler=dict(type='constant', value=0))
n = caffe.NetSpec()
data_kwargs = dict(name='data', ntop=3, batch_size=batch_size, source=hdf5_txt_fname)
if phase is not None:
data_kwargs.update(dict(include=dict(phase=phase)))
n.image_curr, n.image_diff, n.vel = L.HDF5Data(**data_kwargs)
image0 = n.image_curr
u = n.vel
n.image1 = L.Convolution(image0, **conv1_kwargs)
n.image1 = L.ReLU(n.image1, in_place=True)
n.image2 = L.Convolution(n.image1, **conv2_kwargs)
n.image2 = L.ReLU(n.image2, in_place=True)
n.y2 = L.InnerProduct(n.image2, num_output=y2_dim, weight_filler=dict(type='xavier'))
n.y1 = L.InnerProduct(n.image1, num_output=y1_dim, weight_filler=dict(type='xavier'))
n.y0 = L.Flatten(image0)
n.y01 = L.Concat(n.y0, n.y1, axis=1)
n.y = L.Concat(n.y01, n.y2, axis=1)
n.y2_diff_pred = Bilinear(n, n.y2, u, y2_dim, u_dim, name='bilinear2', **fc_kwargs)
n.y1_diff_pred = Bilinear(n, n.y1, u, y1_dim, u_dim, name='bilinear1', **fc_kwargs)
n.y0_diff_pred = Bilinear(n, n.y0, u, y0_dim, u_dim, name='bilinear0', **fc_kwargs)
n.y01_diff_pred = L.Concat(n.y0_diff_pred, n.y1_diff_pred, axis=1)
n.y_diff_pred = L.Concat(n.y01_diff_pred, n.y2_diff_pred, axis=1)
n.y2_next_pred = L.Eltwise(n.y2, n.y2_diff_pred, operation=P.Eltwise.SUM)
n.image2_next_pred_flat = L.InnerProduct(n.y2_next_pred, num_output=np.prod(image2_shape), weight_filler=dict(type='xavier'))
n.image2_next_pred = L.Reshape(n.image2_next_pred_flat, shape=dict(dim=[batch_size]+list(image2_shape)))
n.y1_next_pred = L.Eltwise(n.y1, n.y1_diff_pred, operation=P.Eltwise.SUM)
n.image1_next_pred1_flat = L.InnerProduct(n.y1_next_pred, num_output=np.prod(image1_shape), weight_filler=dict(type='xavier'))
n.image1_next_pred1 = L.Reshape(n.image1_next_pred1_flat, shape=dict(dim=[batch_size]+list(image1_shape)))
n.image1_next_pred2 = L.Deconvolution(n.image2_next_pred, **deconv1_kwargs)
n.image1_next_pred12 = L.Concat(n.image1_next_pred1, n.image1_next_pred2, concat_param=dict(axis=1))
n.image1_next_pred = L.Convolution(n.image1_next_pred12, **conv1_merge_kwargs)
n.image1_next_pred = L.ReLU(n.image1_next_pred, in_place=True)
n.y0_next_pred = L.Eltwise(n.y0, n.y0_diff_pred, operation=P.Eltwise.SUM)
n.image0_next_pred0 = L.Reshape(n.y0_next_pred, shape=dict(dim=[batch_size]+list(image0_shape)))
n.image0_next_pred1 = L.Deconvolution(n.image1_next_pred, **deconv0_kwargs)
n.image0_next_pred01 = L.Concat(n.image0_next_pred0, n.image0_next_pred1, concat_param=dict(axis=1))
if constrained:
n.image_next_pred_unconstrained = L.Convolution(n.image0_next_pred01, **conv0_merge_kwargs)
image0_next_pred = n.image_next_pred = L.TanH(n.image_next_pred_unconstrained)
else:
image0_next_pred = n.image_next_pred = L.Convolution(n.image0_next_pred01, **conv0_merge_kwargs)
image0_next = n.image_next = L.Eltwise(n.image_curr, n.image_diff, operation=P.Eltwise.SUM)
n.image1_next = L.Convolution(image0_next, **conv1_kwargs)
n.image1_next = L.ReLU(n.image1_next, in_place=True)
n.image2_next = L.Convolution(n.image1_next, **conv2_kwargs)
n.image2_next = L.ReLU(n.image2_next, in_place=True)
n.image0_next_loss = L.EuclideanLoss(image0_next, image0_next_pred)
n.image1_next_loss = L.EuclideanLoss(n.image1_next, n.image1_next_pred)
n.image2_next_loss = L.EuclideanLoss(n.image2_next, n.image2_next_pred)
net = n.to_proto()
if net_name is None:
net_name = 'LadderConvActionCondEncoderNet'
if constrained:
net_name += '_constrained'
net_name +='_num_channel' + str(num_channel)
net_name += '_y1_dim' + str(y1_dim)
net_name += '_y2_dim' + str(y2_dim)
net.name = net_name
return net, None
def ConvolutionPooling(n, image, conv_kwargs, pool_kwargs, name=''):
conv_1_kwargs = copy.deepcopy(conv_kwargs)
for param in conv_1_kwargs['param']:
if 'name' in param:
param['name'] += '_1'
conv_2_kwargs = copy.deepcopy(conv_kwargs)
for param in conv_2_kwargs['param']:
if 'name' in param:
param['name'] += '_2'
conv_1 = n.tops['conv'+name+'_1'] = L.Convolution(image, **conv_1_kwargs)
n.tops['relu'+name+'_1'] = L.ReLU(conv_1, in_place=True)
conv_2 = n.tops['conv'+name+'_2'] = L.Convolution(conv_1, **conv_2_kwargs)
n.tops['relu'+name+'_2'] = L.ReLU(conv_2, in_place=True)
return L.Pooling(conv_2, name='pool'+name, **pool_kwargs)
def DeconvolutionUpsample(n, image, deconv_kwargs, upsample_kwargs, name=''):
for param in deconv_kwargs['param']:
assert 'name' not in param
deconv_1 = n.tops['deconv'+name+'_1'] = L.Deconvolution(image, **deconv_kwargs)
n.tops['derelu'+name+'_1'] = L.ReLU(deconv_1, in_place=True)
deconv_2 = n.tops['deconv'+name+'_2'] = L.Deconvolution(deconv_1, **deconv_kwargs)
n.tops['derelu'+name+'_2'] = L.ReLU(deconv_2, in_place=True)
return L.Deconvolution(deconv_2, **upsample_kwargs)
def conv_kwargs_(num_output, kernel_size, stride, pad,
lr_mult=1, lr_mult_bias=1,
name=None, name_bias=None,
weight_filler_type='gaussian'):
param = [dict(lr_mult=lr_mult, decay_mult=lr_mult), dict(lr_mult=lr_mult_bias, decay_mult=lr_mult_bias)]
if name is not None:
param[0]['name'] = name
if name_bias is not None:
param[1]['name'] = name_bias
if weight_filler_type == 'gaussian':
weight_filler = dict(type='gaussian', std=0.01)
elif weight_filler_type == 'constant':
weight_filler = dict(type='constant', value=0)
elif weight_filler_type == 'bilinear':
weight_filler = dict(type='bilinear')
else:
raise
convolution_param = dict(num_output=num_output,
kernel_size=kernel_size,
stride=stride,
pad=pad,
weight_filler=weight_filler,
bias_filler=dict(type='constant', value=0))
if weight_filler_type == 'bilinear':
convolution_param['group'] = num_output
convolution_param['bias_term'] = False
param.pop()
conv_kwargs = dict(param=param, convolution_param=convolution_param)
return conv_kwargs
def ImageBilinear(n, image, u, image_shape, u_dim, bilinear_kwargs,
y_channel_dim=1, axis=1, name=''):
assert len(bilinear_kwargs['param']) == 3
assert axis == 1 or axis == 2
fc_outer_yu_kwargs = dict(param=[bilinear_kwargs['param'][0], dict(lr_mult=0, decay_mult=0)],
weight_filler=bilinear_kwargs['bilinear_filler'],
bias_filler=dict(type='constant', value=0))
fc_u_kwargs = dict(param=[bilinear_kwargs['param'][1], bilinear_kwargs['param'][2]],
weight_filler=bilinear_kwargs['linear_filler'],
bias_filler=bilinear_kwargs['bias_filler'])
y_channel_dim = y_channel_dim or image_shape[0]
# image -> y
if y_channel_dim != image_shape[0]:
y_conv1_kwargs = conv_kwargs_(y_channel_dim, 1, 1, 0)
y = n.tops['y'+name] = L.Convolution(image, **y_conv1_kwargs)
y_shape = (y_channel_dim,) + image_shape[1:]
else:
y = image
y_shape = image_shape
n.tops['y'+name+'_flat'] = L.Flatten(y)
y_dim = np.prod(y_shape)
# y, u -> outer_yu
re_y_shape = (0,)*axis + (1, -1) # e.g. (N, C, 1, I)
re_y = n.tops['bilinear'+name+'_re_y'] = L.Reshape(y, shape=dict(dim=list(re_y_shape)))
tile_re_y = n.tops['bilinear'+name+'_tile_re_y'] = L.Tile(re_y, axis=axis, tiles=u_dim)
re_u_shape = (0,) + (1,)*(axis-1) + (-1, 1) # e.g. (N, 1, J, 1)
re_u = n.tops['bilinear'+name+'_re_u'] = L.Reshape(u, shape=dict(dim=list(re_u_shape)))
if axis != 1:
tile_re_u1 = n.tops['bilinear'+name+'_tile_re_u1'] = L.Tile(re_u, axis=axis+1, tiles=np.prod(y_shape[axis-1:]))
tile_re_u = n.tops['bilinear'+name+'_tile_re_u'] = L.Tile(tile_re_u1, axis=1, tiles=np.prod(y_shape[0]))
else:
tile_re_u = n.tops['bilinear'+name+'_tile_re_u'] = L.Tile(re_u, axis=axis+1, tiles=np.prod(y_shape[axis-1:]))
outer_yu = n.tops['bilinear'+name+'_outer_yu'] = L.Eltwise(tile_re_y, tile_re_u, operation=P.Eltwise.PROD) # e.g. (N, C, J, I)
# outer_yu, u -> y_next_pred
# bilinear term
bilinear_yu = n.tops['bilinear'+name+'_bilinear_yu'] = L.InnerProduct(outer_yu, num_output=np.prod(y_shape[axis-1:]), axis=axis, **fc_outer_yu_kwargs) # e.g. (N, C, I)
# linear and bias terms
fc_u = n.tops['bilinear'+name+'_linear_u'] = L.InnerProduct(u, num_output=np.prod(y_shape[axis-1:]), **fc_u_kwargs) # e.g. (N, I)
if axis != 1:
re_fc_u_shape = (0,) + (1,)*(axis-1) + (np.prod(y_shape[axis-1:]),) # e.g. (N, 1, I)
re_fc_u = n.tops['bilinear'+name+'_re_fc_u'] = L.Reshape(fc_u, shape=dict(dim=list(re_fc_u_shape)))
linear_u = n.tops['bilinear'+name+'_tile_re_fc_u'] = L.Tile(re_fc_u, axis=1, tiles=y_channel_dim) # e.g. (N, C, I)
else:
linear_u = fc_u
bilinear_yu_linear_u = n.tops['bilinear'+name+'_bilinear_yu_linear_u'] = L.Eltwise(bilinear_yu, linear_u, operation=P.Eltwise.SUM) # e.g. (N, C, I)
n.tops['y'+name+'_diff_pred_flat'] = L.Flatten(bilinear_yu_linear_u)
y_diff_pred = n.tops['y'+name+'_diff_pred'] = L.Reshape(bilinear_yu_linear_u, shape=dict(dim=list((0,) + y_shape)))
y_next_pred = n.tops['y'+name+'_next_pred'] = L.Eltwise(y, y_diff_pred, operation=P.Eltwise.SUM)
if y_channel_dim != image_shape[0]:
y_diff_conv2_kwargs = conv_kwargs_(image_shape[0], 1, 1, 0)
image_next_pred = L.Convolution(y_next_pred, **y_diff_conv2_kwargs)
else:
image_next_pred = L.Concat(y_next_pred, concat_param=dict(axis=1)) # proxy for identity layer
return image_next_pred
def ImageBilinearChannelwise(n, x, u, x_shape, u_dim, bilinear_kwargs, axis=1, name='', share_weights=True):
assert len(bilinear_kwargs['param']) == 4
assert axis == 1 or axis == 2
fc_outer_yu_kwargs = dict(param=[bilinear_kwargs['param'][0]],
bias_term=False,
weight_filler=bilinear_kwargs['bilinear_filler'])
fc_y_kwargs = dict(param=bilinear_kwargs['param'][1],
bias_term=False,
weight_filler=bilinear_kwargs['linear_y_filler'])
fc_u_kwargs = dict(param=bilinear_kwargs['param'][2:],
weight_filler=bilinear_kwargs['linear_u_filler'],
bias_filler=bilinear_kwargs['bias_filler'])
# y, u -> outer_yu
re_y_shape = (0,)*axis + (1, -1) # e.g. (N, 1, CI) or (N, C, 1, I)
re_y = n.tops['bilinear'+name+'_re_y'] = L.Reshape(x, shape=dict(dim=list(re_y_shape)))
tile_re_y = n.tops['bilinear'+name+'_tile_re_y'] = L.Tile(re_y, axis=axis, tiles=u_dim) # (N, J, CI) or (N, C, J, I)
re_u_shape = (0,) + (1,)*(axis-1) + (-1, 1) # e.g. (N, J, 1) or (N, 1, J, 1)
re_u = n.tops['bilinear'+name+'_re_u'] = L.Reshape(u, shape=dict(dim=list(re_u_shape)))
if axis == 1:
tile_re_u = n.tops['bilinear'+name+'_tile_re_u'] = L.Tile(re_u, axis=axis+1, tiles=np.prod(x_shape[axis-1:])) # (N, J, CI)
else:
tile_re_u1 = n.tops['bilinear'+name+'_tile_re_u1'] = L.Tile(re_u, axis=axis+1, tiles=np.prod(x_shape[axis-1:])) # (N, 1, J, I)
tile_re_u = n.tops['bilinear'+name+'_tile_re_u'] = L.Tile(tile_re_u1, axis=1, tiles=np.prod(x_shape[0])) # (N, C, J, I)
outer_yu = n.tops['bilinear'+name+'_outer_yu'] = L.Eltwise(tile_re_y, tile_re_u, operation=P.Eltwise.PROD) # e.g. (N, J, CI) or (N, C, J, I)
# outer_yu, u -> y_next_pred
if share_weights:
# bilinear term
bilinear_yu = n.tops['bilinear'+name+'_bilinear_yu'] = L.InnerProduct(outer_yu, num_output=np.prod(x_shape[axis-1:]), axis=axis, **fc_outer_yu_kwargs) # e.g. (N, CI) or (N, C, I)
# linear
linear_y = n.tops['bilinear'+name+'_linear_y'] = L.InnerProduct(x, num_output=np.prod(x_shape[axis-1:]), axis=axis, **fc_y_kwargs) # e.g. (N, CI) or (N, I)
# linear and bias terms
fc_u = n.tops['bilinear'+name+'_linear_u'] = L.InnerProduct(u, num_output=np.prod(x_shape[axis-1:]), **fc_u_kwargs) # e.g. (N, CI) or (N, I)
if axis == 1:
linear_u = fc_u
else:
re_fc_u_shape = (0,) + (1,)*(axis-1) + (np.prod(x_shape[axis-1:]),) # e.g. (N, 1, I)
re_fc_u = n.tops['bilinear'+name+'_re_fc_u'] = L.Reshape(fc_u, shape=dict(dim=list(re_fc_u_shape)))
linear_u = n.tops['bilinear'+name+'_tile_re_fc_u'] = L.Tile(re_fc_u, axis=1, tiles=x_shape[0]) # e.g. (N, C, I)
bilinear_yu_linear_u = n.tops['bilinear'+name+'_bilinear_yu_linear_u'] = L.Eltwise(bilinear_yu, linear_y, linear_u, operation=P.Eltwise.SUM) # e.g. (N, C, I)
else:
assert axis == 2
# bilinear term
outer_yu_channels = L.Slice(outer_yu, ntop=x_shape[0], slice_param=dict(axis=1, slice_point=list(range(1, x_shape[0]))))
bilinear_yu_channels = []
for channel, outer_yu_channel in enumerate(outer_yu_channels):
n.tops['bilinear'+name+'_outer_yu_%d'%channel] = outer_yu_channel
n.tops['bilinear'+name+'_bilinear_yu_%d'%channel] = \
bilinear_yu_channel = L.InnerProduct(outer_yu_channel, num_output=np.prod(x_shape[axis-1:]), axis=axis, **fc_outer_yu_kwargs) # e.g. (N, 1, I)
bilinear_yu_channels.append(bilinear_yu_channel)
bilinear_yu = n.tops['bilinear'+name+'_bilinear_yu'] = L.Concat(*bilinear_yu_channels, axis=1) # e.g. (N, C, I)
# linear
y_channels = L.Slice(x, ntop=x_shape[0], slice_param=dict(axis=1, slice_point=list(range(1, x_shape[0]))))
linear_y_channels = []
for channel, y_channel in enumerate(y_channels):
n.tops['bilinear'+name+'_y_%d'%channel] = y_channel
n.tops['bilinear'+name+'_linear_y_%d'%channel] = \
linear_y_channel = L.InnerProduct(y_channel, num_output=np.prod(x_shape[axis-1:]), axis=axis, **fc_y_kwargs) # e.g. (N, 1, I)
linear_y_channels.append(linear_y_channel)
linear_y = n.tops['bilinear'+name+'_linear_y'] = L.Concat(*linear_y_channels, axis=1) # e.g. (N, C, I)
# linear and bias terms
re_fc_u_shape = (0,) + (1,)*(axis-1) + (np.prod(x_shape[axis-1:]),) # e.g. (N, 1, I)
linear_u_channels = []
for channel in range(x_shape[0]):
fc_u_channel = n.tops['bilinear'+name+'_linear_u_%d'%channel] = L.InnerProduct(u, num_output=np.prod(x_shape[axis-1:]), **fc_u_kwargs) # e.g. (N, I)
linear_u_channel = n.tops['bilinear'+name+'_re_fc_u_%d'%channel] = L.Reshape(fc_u_channel, shape=dict(dim=list(re_fc_u_shape))) # e.g. (N, 1, I)
linear_u_channels.append(linear_u_channel)
linear_u = n.tops['bilinear'+name+'_linear_u'] = L.Concat(*linear_u_channels, axis=1) # e.g. (N, C, I)
bilinear_yu_linear_u = n.tops['bilinear'+name+'_bilinear_yu_linear_u'] = L.Eltwise(bilinear_yu, linear_y, linear_u, operation=P.Eltwise.SUM) # e.g. (N, C, I)
x_diff_pred = L.Reshape(bilinear_yu_linear_u, shape=dict(dim=list((0,) + x_shape)))
return x_diff_pred
def fcn_action_cond_encoder_net(input_shapes, hdf5_txt_fname='', batch_size=1, net_name=None, phase=None, levels=None, x1_c_dim=16, num_downsample=0, share_bilinear_weights=True, ladder_loss=True, batch_normalization=False, concat=False, **kwargs):
x_shape, u_shape = input_shapes
assert len(x_shape) == 3
assert len(u_shape) == 1
u_dim = u_shape[0]
levels = levels or [3]
levels = sorted(set(levels))
n = caffe.NetSpec()
data_kwargs = dict(name='data', ntop=3, batch_size=batch_size, source=hdf5_txt_fname, shuffle=True)
if phase is not None:
data_kwargs.update(dict(include=dict(phase=phase)))
n.image_curr, n.image_diff, n.vel = L.HDF5Data(**data_kwargs)
x, u = n.image_curr, n.vel
weight_fillers = OrderedDict()
if num_downsample:
ds_kernel = cv2.getGaussianKernel(ksize=2, sigma=-1)
ds_weight_filler = ds_kernel.dot(ds_kernel.T)
# preprocess
x0 = x
x0_shape = x_shape
for i_ds in range(num_downsample):
n.tops['x0_ds%d'%(i_ds+1)] = \
x0 = L.Convolution(x0,
param=[dict(lr_mult=0, decay_mult=0)],
convolution_param=dict(num_output=x0_shape[0], kernel_size=2, stride=2, pad=0,
group=x0_shape[0], bias_term=False))
weight_fillers['x0_ds%d'%(i_ds+1)] = [ds_weight_filler]
x0_shape = (x0_shape[0], x0_shape[1]//2, x0_shape[2]//2)
if num_downsample > 0:
n.x0 = n.tops.pop('x0_ds%d'%num_downsample)
weight_fillers['x0'] = weight_fillers.pop('x0_ds%d'%num_downsample)
# encoding
xlevels = OrderedDict()
xlevels_shape = OrderedDict()
for level in range(levels[-1]+1):
if level == 0:
xlevel = x0
xlevel_shape = x0_shape
else:
if level == 1:
xlevelm1_c_dim = x0_shape[0]
xlevel_c_dim = x1_c_dim
else:
xlevelm1_c_dim = xlevel_c_dim
xlevel_c_dim = 2 * xlevelm1_c_dim
n.tops['x%d_1'%level] = \
xlevel_1 = L.Convolution(xlevels[level-1],
param=[dict(name='x%d_1.w'%level, lr_mult=1, decay_mult=1),
dict(name='x%d_1.b'%level, lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=xlevel_c_dim, kernel_size=3, stride=1, pad=1,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
if batch_normalization:
n.tops['bnx%d_1'%level] = \
xlevel_1 = L.BatchNorm(xlevel_1, param=[dict(lr_mult=0)]*3, batch_norm_param=dict(use_global_stats=(phase == caffe.TRAIN)))
n.tops['rx%d_1'%level] = L.ReLU(xlevel_1, in_place=True)
n.tops['x%d_2'%level] = \
xlevel_2 = L.Convolution(xlevel_1,
param=[dict(name='x%d_2.w'%level, lr_mult=1, decay_mult=1),
dict(name='x%d_2.b'%level, lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=xlevel_c_dim, kernel_size=3, stride=1, pad=1,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
if batch_normalization:
n.tops['bnx%d_2'%level] = \
xlevel_2 = L.BatchNorm(xlevel_2, param=[dict(lr_mult=0)]*3, batch_norm_param=dict(use_global_stats=(phase == caffe.TRAIN)))
n.tops['rx%d_2'%level] = L.ReLU(xlevel_2, in_place=True)
n.tops['x%d'%level] = \
xlevel = L.Pooling(xlevel_2, pool=P.Pooling.MAX, kernel_size=2, stride=2, pad=0)
xlevel_shape = (xlevel_c_dim, xlevels_shape[level-1][1]//2, xlevels_shape[level-1][2]//2)
xlevels[level] = xlevel
xlevels_shape[level] = xlevel_shape
# bilinear
xlevels_next_pred_s0 = OrderedDict() # 0th summand
ylevels = OrderedDict()
ylevels_diff_pred = OrderedDict()
ylevels_next_pred = OrderedDict()
for level in levels:
xlevel, xlevel_shape = xlevels[level], xlevels_shape[level]
n.tops['x%d_diff_pred_s0'%level] = \
xlevel_diff_pred_s0 = ImageBilinearChannelwise(n, xlevel, u, xlevel_shape, u_dim,
dict(param=[dict(lr_mult=1, decay_mult=1),
dict(lr_mult=1, decay_mult=1),
dict(lr_mult=1, decay_mult=1),
dict(lr_mult=1, decay_mult=1)],
bilinear_filler=dict(type='gaussian', std=0.001),
linear_y_filler=dict(type='gaussian', std=0.001),
linear_u_filler=dict(type='gaussian', std=0.001),
bias_filler=dict(type='constant', value=0)),
axis=2,
name=str(level),
share_weights=share_bilinear_weights)
ylevels[level] = n.tops['y%d'%level] = L.Flatten(xlevel)
ylevels_diff_pred[level] = n.tops['y%d_diff_pred'%level] = L.Flatten(xlevel_diff_pred_s0)
xlevels_next_pred_s0[level] = n.tops['x%d_next_pred_s0'%level] = L.Eltwise(xlevel, xlevel_diff_pred_s0, operation=P.Eltwise.SUM)
ylevels_next_pred[level] = n.tops['y%d_next_pred'%level] = L.Flatten(xlevels_next_pred_s0[level])
# features
n.y = L.Concat(*ylevels.values(), concat_param=dict(axis=1))
n.y_diff_pred = L.Concat(*ylevels_diff_pred.values(), concat_param=dict(axis=1)) # FIXME: maybe should use merged y
n.y_next_pred = L.Concat(*ylevels_next_pred.values(), concat_param=dict(axis=1))
# decoding
xlevels_next_pred = OrderedDict()
for level in range(levels[-1]+1)[::-1]:
if level == levels[-1]:
xlevel_next_pred = xlevels_next_pred_s0[level]
else:
if level == 0:
xlevel_c_dim = x1_c_dim
xlevelm1_c_dim = x0_shape[0]
elif level < levels[-1]-1:
xlevel_c_dim = xlevelm1_c_dim
xlevelm1_c_dim = xlevel_c_dim // 2
n.tops['x%d_next_pred_2'%(level+1)] = \
xlevel_next_pred_2 = L.Deconvolution(xlevels_next_pred[level+1],
param=[dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=xlevel_c_dim, kernel_size=2, stride=2, pad=0,
group=xlevel_c_dim, bias_term=False,
weight_filler=dict(type='bilinear')))
# TODO: nonlinearity needed?
n.tops['x%d_next_pred_1'%(level+1)] = \
xlevel_next_pred_1 = L.Deconvolution(xlevel_next_pred_2,
param=[dict(lr_mult=1, decay_mult=1),
dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=xlevel_c_dim, kernel_size=3, stride=1, pad=1,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
if batch_normalization:
n.tops['bnx%d_next_pred_1'%(level+1)] = \
xlevel_next_pred_1 = L.BatchNorm(xlevel_next_pred_1, param=[dict(lr_mult=0)]*3, batch_norm_param=dict(use_global_stats=(phase == caffe.TRAIN)))
n.tops['rx%d_next_pred_1'%(level+1)] = L.ReLU(xlevel_next_pred_1, in_place=True)
if concat:
if level in xlevels_next_pred_s0:
n.tops['cx%d_next_pred_1'%(level+1)] = \
xlevel_next_pred_1 = L.Concat(xlevels_next_pred_s0[level], xlevel_next_pred_1)
n.tops['x%d_next_pred'%level] = \
xlevel_next_pred= L.Deconvolution(xlevel_next_pred_1,
param=[dict(lr_mult=1, decay_mult=1),
dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=xlevelm1_c_dim, kernel_size=3, stride=1, pad=1,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
if level != 0: # or level in xlevels_next_pred_s0:
if batch_normalization:
n.tops['bnx%d_next_pred'%level] = \
xlevel_next_pred = L.BatchNorm(xlevel_next_pred, param=[dict(lr_mult=0)]*3, batch_norm_param=dict(use_global_stats=(phase == caffe.TRAIN)))
n.tops['rx%d_next_pred'%level] = L.ReLU(xlevel_next_pred, in_place=True)
else:
n.tops['x%d_next_pred_s1'%level] = \
xlevel_next_pred_s1 = L.Deconvolution(xlevel_next_pred_1,
param=[dict(lr_mult=1, decay_mult=1),
dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=xlevelm1_c_dim, kernel_size=3, stride=1, pad=1,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
if level != 0: # or level in xlevels_next_pred_s0:
if batch_normalization:
n.tops['bnx%d_next_pred_s1'%level] = \
xlevel_next_pred_s1 = L.BatchNorm(xlevel_next_pred_s1, param=[dict(lr_mult=0)]*3, batch_norm_param=dict(use_global_stats=(phase == caffe.TRAIN)))
n.tops['rx%d_next_pred_s1'%level] = L.ReLU(xlevel_next_pred_s1, in_place=True)
if level in xlevels_next_pred_s0:
# sum using fixed coeffs
# n.tops['x%d_next_pred'%level] = \
# xlevel_next_pred = L.Eltwise(xlevels_next_pred_s0[level], xlevel_next_pred_s1, operation=P.Eltwise.SUM, coeff=[.5, .5])
# workaround to sum using learnable coeffs
xlevel_shape = xlevels_shape[level]
re_xlevel_next_pred_s0 = n.tops['re_x%d_next_pred_s0'%level] = L.Reshape(xlevels_next_pred_s0[level], shape=dict(dim=list((0, 1, -1))))
re_xlevel_next_pred_s1 = n.tops['re_x%d_next_pred_s1'%level] = L.Reshape(xlevel_next_pred_s1, shape=dict(dim=list((0, 1, -1))))
re_xlevel_next_pred_s01 = n.tops['re_x%d_next_pred_s01'%level] = L.Concat(re_xlevel_next_pred_s0, re_xlevel_next_pred_s1)
n.tops['re_x%d_next_pred'%level] = \
re_xlevel_next_pred = L.Convolution(re_xlevel_next_pred_s01,
param=[dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=1, kernel_size=1, stride=1, pad=0,
bias_term=False,
weight_filler=dict(type='constant', value=0.5),
engine=P.Convolution.CAFFE))
xlevel_next_pred = n.tops['re_x%d_next_pred'%level] = L.Reshape(re_xlevel_next_pred, shape=dict(dim=list((0,) + xlevel_shape)))
else:
n.tops['x%d_next_pred'%level] = n.tops.pop('x%d_next_pred_s1'%level)
xlevel_next_pred = xlevel_next_pred_s1
xlevels_next_pred[level] = xlevel_next_pred
x_next_pred = n.image_next_pred = L.TanH(xlevels_next_pred[0])
x0_next_pred = x_next_pred
if num_downsample > 0:
n.x0_next_pred = n.tops.pop('image_next_pred') # for consistent name (i.e. all image or all x0)
x_next = n.image_next = L.Eltwise(n.image_curr, n.image_diff, operation=P.Eltwise.SUM)
# preprocess
x0_next = x_next
for i_ds in range(num_downsample):
n.tops['x0_next_ds%d'%(i_ds+1)] = \
x0_next = L.Convolution(x0_next,
param=[dict(lr_mult=0, decay_mult=0)],
convolution_param=dict(num_output=x0_shape[0], kernel_size=2, stride=2, pad=0,
group=x0_shape[0], bias_term=False))
weight_fillers['x0_next_ds%d'%(i_ds+1)] = [ds_weight_filler]
if num_downsample > 0:
n.x0_next = n.tops.pop('x0_next_ds%d'%num_downsample)
weight_fillers['x0_next'] = weight_fillers.pop('x0_next_ds%d'%num_downsample)
n.x0_next_loss = L.EuclideanLoss(x0_next, x0_next_pred)
if ladder_loss:
# encoding of next image
xlevels_next = OrderedDict()
for level in range(levels[-1]+1):
if level == 0:
xlevel_next = x0_next
else:
if level == 1:
xlevelm1_c_dim = x0_shape[0]
xlevel_c_dim = x1_c_dim
else:
xlevelm1_c_dim = xlevel_c_dim
xlevel_c_dim = 2 * xlevelm1_c_dim
n.tops['x%d_next_1'%level] = \
xlevel_next_1 = L.Convolution(xlevels_next[level-1],
param=[dict(name='x%d_1.w'%level, lr_mult=1, decay_mult=1),
dict(name='x%d_1.b'%level, lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=xlevel_c_dim, kernel_size=3, stride=1, pad=1,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
if batch_normalization:
n.tops['bnx%d_next_1'%level] = \
xlevel_next_1 = L.BatchNorm(xlevel_next_1, param=[dict(lr_mult=0)]*3, batch_norm_param=dict(use_global_stats=(phase == caffe.TRAIN)))
n.tops['rx%d_next_1'%level] = L.ReLU(xlevel_next_1, in_place=True)
n.tops['x%d_next_2'%level] = \
xlevel_next_2 = L.Convolution(xlevel_next_1,
param=[dict(name='x%d_2.w'%level, lr_mult=1, decay_mult=1),
dict(name='x%d_2.b'%level, lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=xlevel_c_dim, kernel_size=3, stride=1, pad=1,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
if batch_normalization:
n.tops['bnx%d_next_2'%level] = \
xlevel_next_2 = L.BatchNorm(xlevel_next_2, param=[dict(lr_mult=0)]*3, batch_norm_param=dict(use_global_stats=(phase == caffe.TRAIN)))
n.tops['rx%d_next_2'%level] = L.ReLU(xlevel_next_2, in_place=True)
n.tops['x%d_next'%level] = \
xlevel_next = L.Pooling(xlevel_next_2, pool=P.Pooling.MAX, kernel_size=2, stride=2, pad=0)
xlevels_next[level] = xlevel_next
for level in levels:
if level == 0:
continue
n.tops['x%d_next_loss'%level] = L.EuclideanLoss(xlevels_next[level], xlevels_next_pred[level], loss_weight=1.)
net = n.to_proto()
if net_name is None:
net_name = 'FcnActionCondEncoderNet'
net_name +='_levels' + ''.join([str(level) for level in levels])
net_name += '_x1cdim' + str(x1_c_dim)
net_name += '_numds' + str(num_downsample)
net_name += '_share' + str(int(share_bilinear_weights))
net_name += '_ladder' + str(int(ladder_loss))
net_name += '_bn' + str(int(batch_normalization))
if concat:
net_name += '_concat' + str(int(concat))
net.name = net_name
return net, weight_fillers
def paper_action_cond_encoder_net(input_shapes, hdf5_txt_fname='', batch_size=1, net_name=None, phase=None, **kwargs):
x_shape, u_shape = input_shapes
assert len(x_shape) == 3
assert len(u_shape) == 1
u_dim = u_shape[0]
y_dim = 2048
n = caffe.NetSpec()
data_kwargs = dict(name='data', ntop=3, batch_size=batch_size, source=hdf5_txt_fname, shuffle=True)
if phase is not None:
data_kwargs.update(dict(include=dict(phase=phase)))
n.image_curr, n.image_diff, n.vel = L.HDF5Data(**data_kwargs)
# encoding
n.conv1 = L.Convolution(n.image_curr, name='conv1', num_output=64, kernel_size=8, pad_h=0, pad_w=1,stride=2)
n.relu1 = L.ReLU(n.conv1, name='relu1', in_place=True)
n.conv2 = L.Convolution(n.conv1, name='conv2', num_output=128, kernel_size=6, pad=1, stride=2)
n.relu2 = L.ReLU(n.conv2, name='relu2', in_place=True)
n.conv3 = L.Convolution(n.conv2, name='conv3', num_output=128, kernel_size=6, pad=1, stride=2)
n.relu3 = L.ReLU(n.conv3, name='relu3', in_place=True)
n.conv4 = L.Convolution(n.conv3, name='conv4', num_output=128, kernel_size=4, pad=0, stride=2)
n.relu4 = L.ReLU(n.conv4, name='relu4', in_place=True)
n.y0 = L.InnerProduct(n.conv4, num_output=y_dim, weight_filler=dict(type='xavier'))
n.y = L.InnerProduct(n.y0, num_output=y_dim, weight_filler=dict(type='xavier'))
# bilinear
n.y_diff_pred = ImageBilinearChannelwise(n, n.y, n.vel, (y_dim,), u_dim,
dict(param=[dict(lr_mult=1, decay_mult=1),
dict(lr_mult=0, decay_mult=0),
dict(lr_mult=1, decay_mult=1)],
bilinear_filler=dict(type='gaussian', std=0.001),
linear_filler=dict(type='constant', value=0),
bias_filler=dict(type='constant', value=0)),
axis=1)
n.y_next_pred = L.Eltwise(n.y, n.y_diff_pred, operation=P.Eltwise.SUM)
# decoding
n.y_next_pred0 = L.InnerProduct(n.y_next_pred, num_output=y_dim, weight_filler=dict(type='xavier'))
n.y_next_pred1 = L.InnerProduct(n.y_next_pred0, num_output=y_dim, weight_filler=dict(type='xavier'))
n.relu5 = L.ReLU(n.y_next_pred1, name='relu5', in_place=True)
n.deconv4 = L.Deconvolution(n.y_next_pred1,
param=[dict(lr_mult=1, decay_mult=1),
dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=128, kernel_size=4, pad=0, stride=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
n.derelu4 = L.ReLU(n.deconv4, name='derelu4', in_place=True)
n.deconv3 = L.Deconvolution(n.deconv4,
param=[dict(lr_mult=1, decay_mult=1),
dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=128, kernel_size=6, pad=1, stride=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
n.derelu3 = L.ReLU(n.deconv3, name='derelu3', in_place=True)
n.deconv2 = L.Deconvolution(n.deconv3,
param=[dict(lr_mult=1, decay_mult=1),
dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=128, kernel_size=6, pad=1, stride=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
n.derelu2 = L.ReLU(n.deconv2, name='derelu2', in_place=True)
n.image_next_pred = L.Deconvolution(n.deconv2,
param=[dict(lr_mult=1, decay_mult=1),
dict(lr_mult=1, decay_mult=1)],
convolution_param=dict(num_output=3, kernel_size=8, pad_h=0, pad_w=1, stride=2,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)))
n.image_next = L.Eltwise(n.image_curr, n.image_diff, operation=P.Eltwise.SUM)
n.x_next_loss = L.EuclideanLoss(n.image_next, n.image_next_pred)
net = n.to_proto()
if net_name is None:
net_name = 'ActionCondEncoderNet2'
net.name = net_name
return net, None
| alexlee-gk/visual_dynamics | visual_dynamics/predictors/net_caffe.py | Python | mit | 67,823 | [
"Gaussian"
] | cd878b61a8dbb1c291e47c4c1cc9cc17e004c97c0dece531eab532791cd493e0 |
#!/usr/bin/python2
#NOTE: Run this plot script from directory deft/papers/fuzzy-fmt
#with comand ./new-melting_plot_script.py [directory where data stored] [temp]
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
if len(sys.argv) < 3:
print "Usage: directory temperature"
exit(1)
data_directory=sys.argv[1]
temp=sys.argv[2]
data_file=data_directory+"/plot_kT"+temp+".dat"
print "Removing plot file if it exists..." #ASK is there a way to tell whether it exists so can avoid error message?
os.system("rm "+data_file)
#wait = raw_input("If not, press the ENTER key to continue program...")
print
print "Creating new plot file [fuzzy-fmt]/"+data_file
os.system("cat "+data_directory+"/kT"+temp+"*best.dat >>"+data_file)
thisdata = np.loadtxt(data_file)
print thisdata
densities=thisdata[:,1]
print densities
crystal_energies_per_atom = thisdata[:,5]
homogeneous_energies_per_atom = thisdata[:,4]
energy_differences_per_atom = thisdata[:,6]
crystal_energies_per_volume = thisdata[:,9]
#if want vol = 4*(1-fv)/reduced_density
plot1=data_directory+"/plot1_FEvsDen_kT"+temp+".png"
plot2=data_directory+"/plot2_Pressure_kT"+temp+".png"
# Plot Free Energy/atom vs Reduced Density
plt.plot(densities, crystal_energies_per_atom, 'b', label="Crystal Free Energy/atom")
plt.plot(densities, homogeneous_energies_per_atom, 'g', label="Homogeneous Free Energy/atom")
plt.title('Free Energy/atom vs Reduced Density at Fixed kT='+temp)
plt.xlabel('Reduced Density')
plt.ylabel('Free Energy/atom')
plt.legend()
plt.savefig(plot1)
plt.figure()
f =crystal_energies_per_atom
#print "f=", f
df=np.diff(f) #Caution: depends on order of data files!
#print "df=", df
n =densities
#print "n=", n
dn=np.diff(n) #Caution: depends on order of data files!
#print "dn=", dn
mid_n=n[0:len(n)-1]+dn/2
#print "mid_n=", mid_n
pressure = -(mid_n*mid_n)*(df/dn) #for fixed N and T
#print "pressure =", pressure
# Plot Pressure vs Reduced Density
plt.plot(mid_n, pressure, color='red')
plt.title('Reduced Pressure vs Reduced Density at Fixed kT='+temp)
plt.xlabel('Reduced Density')
plt.ylabel('Reduced Pressure')
plt.savefig(plot2)
plt.show()
#------------------------------------------------------------------------------
#NOTE: lattice_constant will be divided by gwstep
#Do we need these in the plot file? - ASK!
#crystal_energiesdensities = np.zeros_like(densities) #initializing...
#crystal_energies_per_volume = np.zeros_like(densities)
#energy_differences = np.zeros_like(densities)
| droundy/deft | papers/fuzzy-fmt/new-melting_plot_script.py | Python | gpl-2.0 | 2,515 | [
"CRYSTAL"
] | aeee54f62154289d9ea6ffe6ffcfd293a2c9ffb8356ea050506104cca6313da8 |
from cis.data_io.products import NetCDF_Gridded
import logging
class HadGEM_CONVSH(NetCDF_Gridded):
"""
HadGEM plugin for reading NetCDF files converted by CONVSH. It implements a callback to pass to iris when
reading multiple files to allow correct merging
"""
def get_file_signature(self):
return [r'[a-z]{6}[\._][pamd]{2}[0-9]{4,6}.*\.nc']
@staticmethod
def load_multiple_files_callback(cube, field, filename):
from iris.util import squeeze
# We need to remove the history field when reading multiple files so that the cubes can be properly merged
cube.attributes.pop('history')
# cube.coord(name_or_coord='Hybrid height').attributes['formula_terms'] = 'a: lev b: b orog: orog'
# We also need to remove the length one time dimension so that the cube can be merged correctly (iris preserves
# the value as a scalar which then gets converted back into a full coordinate again on merge).
return squeeze(cube)
def _create_cube(self, filenames, variable):
"""Creates a cube for the specified variable.
:param filenames: List of filenames to read coordinates from
:param variable: Optional variable to read while we're reading the coordinates, can be a string or a
VariableConstraint object
:return: If variable was specified this will return an UngriddedData object, otherwise a CoordList
"""
import six
from cis.exceptions import InvalidVariableError
from cis.data_io.products.gridded_NetCDF import DisplayConstraint
from cis.data_io.gridded_data import load_cube
from iris.exceptions import CoordinateNotFoundError
# Check if the files given actually exist.
for filename in filenames:
with open(filename) as f:
pass
variable_constraint = variable
if isinstance(variable, six.string_types):
# noinspection PyPep8
variable_constraint = DisplayConstraint(cube_func=(lambda c: c.var_name == variable or
c.standard_name == variable or
c.long_name == variable), display=variable)
if len(filenames) == 1:
callback_function = self.load_single_file_callback
else:
callback_function = self.load_multiple_files_callback
try:
cube = load_cube(filenames, variable_constraint, callback=callback_function)
except ValueError as e:
if variable is None:
message = "File contains more than one cube variable name must be specified"
elif e.args[0] == "No cubes found":
message = "Variable not found: {} \nTo see a list of variables run: cis info {}" \
.format(str(variable), filenames[0])
else:
message = e.args[0]
raise InvalidVariableError(message)
try:
hybrid_ht = cube.coord(name_or_coord='Hybrid height')
hybrid_ht.attributes['formula'] = 'z(k,j,i) = a(k) + b(k)*orog(j,i)'
hybrid_ht.convert_units('m')
except CoordinateNotFoundError as e:
pass
try:
cube.coord(long_name='t').standard_name = 'time'
except CoordinateNotFoundError as e:
pass
self._add_available_aux_coords(cube, filenames)
return cube
def get_variable_names(self, filenames, data_type=None):
# Don't do any checks on valid variables at the moment as iris can't parse the hybrid height dimension units...
import iris
from cis.utils import single_warnings_only
# Filter the warnings so that they only appear once - otherwise you get lots of repeated warnings
with single_warnings_only():
cubes = iris.load(filenames)
return set(cube.name() for cube in cubes)
class HadGEM_PP(NetCDF_Gridded):
"""
HadGEM plugin for reading native pp files
"""
def get_file_signature(self):
return [r'.*\.pp']
@staticmethod
def load_multiple_files_callback(cube, field, filename):
# This method sets the var_name (used for outputting the cube to NetCDF) to the cube name. This can be quite
# for some HadGEM variables but most commands allow the user to override this field on output.
var_name = cube.name()
try:
cube.var_name = var_name
except ValueError as e:
logging.info("Unable to set var_name due to error: {}".format(e))
@staticmethod
def load_single_file_callback(cube, field, filename):
# This method sets the var_name (used for outputting the cube to NetCDF) to the cube name. This can be quite
# for some HadGEM variables but most commands allow the user to override this field on output.
var_name = cube.name()
try:
cube.var_name = var_name
except ValueError as e:
try:
cube.var_name = var_name.replace(' ', '_')
except ValueError as e:
logging.info("Unable to set var_name due to error: {}".format(e))
| cedadev/cis | cis/data_io/products/HadGEM.py | Python | lgpl-3.0 | 5,253 | [
"NetCDF"
] | 96cecd8403953073563adba2dcb4b5eb2edcc77bb2a26acc38c9e18163056069 |
import numpy as np
from numpy.testing import (assert_equal,
assert_almost_equal,
assert_raises)
import skimage
from skimage import data
from skimage._shared._warnings import expected_warnings
from skimage.filters.thresholding import (threshold_adaptive,
threshold_otsu,
threshold_li,
threshold_yen,
threshold_isodata,
threshold_niblack,
threshold_sauvola,
threshold_mean,
threshold_triangle,
threshold_minimum)
class TestSimpleImage():
def setup(self):
self.image = np.array([[0, 0, 1, 3, 5],
[0, 1, 4, 3, 4],
[1, 2, 5, 4, 1],
[2, 4, 5, 2, 1],
[4, 5, 1, 0, 0]], dtype=int)
def test_otsu(self):
assert threshold_otsu(self.image) == 2
def test_otsu_negative_int(self):
image = self.image - 2
assert threshold_otsu(image) == 0
def test_otsu_float_image(self):
image = np.float64(self.image)
assert 2 <= threshold_otsu(image) < 3
def test_li(self):
assert int(threshold_li(self.image)) == 2
def test_li_negative_int(self):
image = self.image - 2
assert int(threshold_li(image)) == 0
def test_li_float_image(self):
image = np.float64(self.image)
assert 2 <= threshold_li(image) < 3
def test_li_constant_image(self):
assert_raises(ValueError, threshold_li, np.ones((10,10)))
def test_yen(self):
assert threshold_yen(self.image) == 2
def test_yen_negative_int(self):
image = self.image - 2
assert threshold_yen(image) == 0
def test_yen_float_image(self):
image = np.float64(self.image)
assert 2 <= threshold_yen(image) < 3
def test_yen_arange(self):
image = np.arange(256)
assert threshold_yen(image) == 127
def test_yen_binary(self):
image = np.zeros([2, 256], dtype=np.uint8)
image[0] = 255
assert threshold_yen(image) < 1
def test_yen_blank_zero(self):
image = np.zeros((5, 5), dtype=np.uint8)
assert threshold_yen(image) == 0
def test_yen_blank_max(self):
image = np.empty((5, 5), dtype=np.uint8)
image.fill(255)
assert threshold_yen(image) == 255
def test_isodata(self):
assert threshold_isodata(self.image) == 2
assert threshold_isodata(self.image, return_all=True) == [2]
def test_isodata_blank_zero(self):
image = np.zeros((5, 5), dtype=np.uint8)
assert threshold_isodata(image) == 0
assert threshold_isodata(image, return_all=True) == [0]
def test_isodata_linspace(self):
image = np.linspace(-127, 0, 256)
assert -63.8 < threshold_isodata(image) < -63.6
assert_almost_equal(threshold_isodata(image, return_all=True),
[-63.74804688, -63.25195312])
def test_isodata_16bit(self):
np.random.seed(0)
imfloat = np.random.rand(256, 256)
assert 0.49 < threshold_isodata(imfloat, nbins=1024) < 0.51
assert all(0.49 < threshold_isodata(imfloat, nbins=1024,
return_all=True))
def test_threshold_adaptive_generic(self):
def func(arr):
return arr.sum() / arr.shape[0]
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, True],
[False, False, True, True, False],
[False, True, True, False, False],
[ True, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='generic', param=func)
assert_equal(ref, out)
def test_threshold_adaptive_gaussian(self):
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, True],
[False, False, True, True, False],
[False, True, True, False, False],
[ True, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='gaussian')
assert_equal(ref, out)
out = threshold_adaptive(self.image, 3, method='gaussian',
param=1./3.)
assert_equal(ref, out)
def test_threshold_adaptive_mean(self):
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, True],
[False, False, True, True, False],
[False, True, True, False, False],
[ True, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='mean')
assert_equal(ref, out)
def test_threshold_adaptive_median(self):
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, False],
[False, False, True, False, False],
[False, False, True, True, False],
[False, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='median')
assert_equal(ref, out)
def test_threshold_niblack(self):
ref = np.array(
[[False, False, False, True, True],
[False, True, True, True, True],
[False, True, True, True, False],
[False, True, True, True, True],
[True, True, False, False, False]]
)
thres = threshold_niblack(self.image, window_size=3, k=0.5)
out = self.image > thres
assert_equal(ref, out)
def test_threshold_sauvola(self):
ref = np.array(
[[False, False, False, True, True],
[False, False, True, True, True],
[False, False, True, True, False],
[False, True, True, True, False],
[True, True, False, False, False]]
)
thres = threshold_sauvola(self.image, window_size=3, k=0.2, r=128)
out = self.image > thres
assert_equal(ref, out)
def test_otsu_camera_image():
camera = skimage.img_as_ubyte(data.camera())
assert 86 < threshold_otsu(camera) < 88
def test_otsu_coins_image():
coins = skimage.img_as_ubyte(data.coins())
assert 106 < threshold_otsu(coins) < 108
def test_otsu_coins_image_as_float():
coins = skimage.img_as_float(data.coins())
assert 0.41 < threshold_otsu(coins) < 0.42
def test_otsu_astro_image():
img = skimage.img_as_ubyte(data.astronaut())
with expected_warnings(['grayscale']):
assert 109 < threshold_otsu(img) < 111
def test_otsu_one_color_image():
img = np.ones((10, 10), dtype=np.uint8)
assert_raises(ValueError, threshold_otsu, img)
def test_li_camera_image():
camera = skimage.img_as_ubyte(data.camera())
assert 63 < threshold_li(camera) < 65
def test_li_coins_image():
coins = skimage.img_as_ubyte(data.coins())
assert 95 < threshold_li(coins) < 97
def test_li_coins_image_as_float():
coins = skimage.img_as_float(data.coins())
assert 0.37 < threshold_li(coins) < 0.38
def test_li_astro_image():
img = skimage.img_as_ubyte(data.astronaut())
assert 66 < threshold_li(img) < 68
def test_yen_camera_image():
camera = skimage.img_as_ubyte(data.camera())
assert 197 < threshold_yen(camera) < 199
def test_yen_coins_image():
coins = skimage.img_as_ubyte(data.coins())
assert 109 < threshold_yen(coins) < 111
def test_yen_coins_image_as_float():
coins = skimage.img_as_float(data.coins())
assert 0.43 < threshold_yen(coins) < 0.44
def test_adaptive_even_block_size_error():
img = data.camera()
assert_raises(ValueError, threshold_adaptive, img, block_size=4)
def test_isodata_camera_image():
camera = skimage.img_as_ubyte(data.camera())
threshold = threshold_isodata(camera)
assert np.floor((camera[camera <= threshold].mean() +
camera[camera > threshold].mean()) / 2.0) == threshold
assert threshold == 87
assert threshold_isodata(camera, return_all=True) == [87]
def test_isodata_coins_image():
coins = skimage.img_as_ubyte(data.coins())
threshold = threshold_isodata(coins)
assert np.floor((coins[coins <= threshold].mean() +
coins[coins > threshold].mean()) / 2.0) == threshold
assert threshold == 107
assert threshold_isodata(coins, return_all=True) == [107]
def test_isodata_moon_image():
moon = skimage.img_as_ubyte(data.moon())
threshold = threshold_isodata(moon)
assert np.floor((moon[moon <= threshold].mean() +
moon[moon > threshold].mean()) / 2.0) == threshold
assert threshold == 86
thresholds = threshold_isodata(moon, return_all=True)
for threshold in thresholds:
assert np.floor((moon[moon <= threshold].mean() +
moon[moon > threshold].mean()) / 2.0) == threshold
assert_equal(thresholds, [86, 87, 88, 122, 123, 124, 139, 140])
def test_isodata_moon_image_negative_int():
moon = skimage.img_as_ubyte(data.moon()).astype(np.int32)
moon -= 100
threshold = threshold_isodata(moon)
assert np.floor((moon[moon <= threshold].mean() +
moon[moon > threshold].mean()) / 2.0) == threshold
assert threshold == -14
thresholds = threshold_isodata(moon, return_all=True)
for threshold in thresholds:
assert np.floor((moon[moon <= threshold].mean() +
moon[moon > threshold].mean()) / 2.0) == threshold
assert_equal(thresholds, [-14, -13, -12, 22, 23, 24, 39, 40])
def test_isodata_moon_image_negative_float():
moon = skimage.img_as_ubyte(data.moon()).astype(np.float64)
moon -= 100
assert -14 < threshold_isodata(moon) < -13
thresholds = threshold_isodata(moon, return_all=True)
assert_almost_equal(thresholds,
[-13.83789062, -12.84179688, -11.84570312, 22.02148438,
23.01757812, 24.01367188, 38.95507812, 39.95117188])
def test_threshold_minimum():
camera = skimage.img_as_ubyte(data.camera())
threshold = threshold_minimum(camera)
assert threshold == 76
threshold = threshold_minimum(camera, bias='max')
assert threshold == 77
astronaut = skimage.img_as_ubyte(data.astronaut())
threshold = threshold_minimum(astronaut)
assert threshold == 117
def test_threshold_minimum_synthetic():
img = np.arange(25*25, dtype=np.uint8).reshape((25, 25))
img[0:9, :] = 50
img[14:25, :] = 250
threshold = threshold_minimum(img, bias='min')
assert threshold == 93
threshold = threshold_minimum(img, bias='mid')
assert threshold == 159
threshold = threshold_minimum(img, bias='max')
assert threshold == 225
def test_threshold_minimum_failure():
img = np.zeros((16*16), dtype=np.uint8)
assert_raises(RuntimeError, threshold_minimum, img)
def test_mean():
img = np.zeros((2, 6))
img[:, 2:4] = 1
img[:, 4:] = 2
assert(threshold_mean(img) == 1.)
def test_triangle_uint_images():
assert(threshold_triangle(np.invert(data.text())) == 151)
assert(threshold_triangle(data.text()) == 104)
assert(threshold_triangle(data.coins()) == 80)
assert(threshold_triangle(np.invert(data.coins())) == 175)
def test_triangle_float_images():
text = data.text()
int_bins = text.max() - text.min() + 1
# Set nbins to match the uint case and threshold as float.
assert(round(threshold_triangle(
text.astype(np.float), nbins=int_bins)) == 104)
# Check that rescaling image to floats in unit interval is equivalent.
assert(round(threshold_triangle(text / 255., nbins=int_bins) * 255) == 104)
# Repeat for inverted image.
assert(round(threshold_triangle(
np.invert(text).astype(np.float), nbins=int_bins)) == 151)
assert (round(threshold_triangle(
np.invert(text) / 255., nbins=int_bins) * 255) == 151)
def test_triangle_flip():
# Depending on the skewness, the algorithm flips the histogram.
# We check that the flip doesn't affect too much the result.
img = data.camera()
inv_img = np.invert(img)
t = threshold_triangle(inv_img)
t_inv_img = inv_img > t
t_inv_inv_img = np.invert(t_inv_img)
t = threshold_triangle(img)
t_img = img > t
# Check that most of the pixels are identical
# See numpy #7685 for a future np.testing API
unequal_pos = np.where(t_img.ravel() != t_inv_inv_img.ravel())
assert(len(unequal_pos[0]) / t_img.size < 1e-2)
if __name__ == '__main__':
np.testing.run_module_suite()
| paalge/scikit-image | skimage/filters/tests/test_thresholding.py | Python | bsd-3-clause | 12,997 | [
"Gaussian"
] | 767cf6f6681b2dc50b83293f0e1f92cdc52510016c59c7b502291eaf57108ba9 |
# -*- coding: utf-8 -*-
import os
import subprocess
import tempfile
import warnings
from threading import Thread
import json
from collections import defaultdict
import re
from functools import partial
import pandas as pd
import numpy as np
from chemcoord._generic_classes.generic_IO import GenericIO
from chemcoord.cartesian_coordinates._cartesian_class_core import CartesianCore
from chemcoord.configuration import settings
from chemcoord import constants
class CartesianIO(CartesianCore, GenericIO):
"""This class provides IO-methods.
Contains ``write_filetype`` and ``read_filetype`` methods
like ``write_xyz()`` and ``read_xyz()``.
The generic functions ``read`` and ``write``
figure out themselves what the filetype is and use the
appropiate IO-method.
The ``view`` method uses external viewers to display a temporarily
written xyz-file.
"""
def __repr__(self):
return self._frame.__repr__()
def _repr_html_(self):
new = self._sympy_formatter()
def insert_before_substring(insert_txt, substr, txt):
"Under the assumption that substr only appears once."
return (insert_txt + substr).join(txt.split(substr))
html_txt = new._frame._repr_html_()
insert_txt = '<caption>{}</caption>\n'.format(self.__class__.__name__)
return insert_before_substring(insert_txt, '<thead>', html_txt)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, line_width=None, max_rows=None,
max_cols=None, show_dimensions=False):
"""Render a DataFrame to a console-friendly tabular output.
Wrapper around the :meth:`pandas.DataFrame.to_string` method.
"""
return self._frame.to_string(
buf=buf, columns=columns, col_space=col_space, header=header,
index=index, na_rep=na_rep, formatters=formatters,
float_format=float_format, sparsify=sparsify,
index_names=index_names, justify=justify, line_width=line_width,
max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions)
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, bold_rows=True,
column_format=None, longtable=None, escape=None,
encoding=None, decimal='.', multicolumn=None,
multicolumn_format=None, multirow=None):
"""Render a DataFrame to a tabular environment table.
You can splice this into a LaTeX document.
Requires ``\\usepackage{booktabs}``.
Wrapper around the :meth:`pandas.DataFrame.to_latex` method.
"""
return self._frame.to_latex(
buf=buf, columns=columns, col_space=col_space, header=header,
index=index, na_rep=na_rep, formatters=formatters,
float_format=float_format, sparsify=sparsify,
index_names=index_names, bold_rows=bold_rows,
column_format=column_format, longtable=longtable, escape=escape,
encoding=encoding, decimal=decimal, multicolumn=multicolumn,
multicolumn_format=multicolumn_format, multirow=multirow)
def to_xyz(self, buf=None, sort_index=True,
index=False, header=False, float_format='{:.6f}'.format,
overwrite=True):
"""Write xyz-file
Args:
buf (str, path object or file-like object):
File path or object, if None is provided the result is returned as a string.
sort_index (bool): If sort_index is true, the
:class:`~chemcoord.Cartesian`
is sorted by the index before writing.
float_format (one-parameter function): Formatter function
to apply to column’s elements if they are floats.
The result of this function must be a unicode string.
overwrite (bool): May overwrite existing files.
Returns:
formatted : string (or unicode, depending on data and options)
"""
if sort_index:
molecule_string = (
self
.loc[:, ['atom', 'x', 'y', 'z']]
.sort_index()
.to_string(header=header, index=index, float_format=float_format)
)
else:
molecule_string = (
self
.loc[:, ['atom', 'x', 'y', 'z']]
.to_string(header=header, index=index, float_format=float_format)
)
# NOTE the following might be removed in the future
# introduced because of formatting bug in pandas
# See https://github.com/pandas-dev/pandas/issues/13032
space = ' ' * (self.loc[:, 'atom'].str.len().max()
- len(self.iloc[0, 0]))
output = '{n}\n{message}\n{alignment}{frame_string}'.format(
n=len(self), alignment=space, frame_string=molecule_string,
message='Created by chemcoord http://chemcoord.readthedocs.io/')
if buf is not None:
if overwrite:
with open(buf, mode='w') as f:
f.write(output)
else:
with open(buf, mode='x') as f:
f.write(output)
else:
return output
def write_xyz(self, *args, **kwargs):
"""Deprecated, use :meth:`~chemcoord.Cartesian.to_xyz`
"""
message = 'Will be removed in the future. Please use to_xyz().'
with warnings.catch_warnings():
warnings.simplefilter("always")
warnings.warn(message, DeprecationWarning)
return self.to_xyz(*args, **kwargs)
@classmethod
def read_xyz(cls, buf, start_index=0, get_bonds=True,
nrows=None, engine=None):
"""Read a file of coordinate information.
Reads xyz-files.
Args:
buf (str, path object or file-like object):
This is passed on to :func:`pandas.read_table` and has the same constraints.
Any valid string path is acceptable. The string could be a URL.
Valid URL schemes include http, ftp, s3, and file.
For file URLs, a host is expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts any os.PathLike.
By file-like object, we refer to objects with a read() method, such as a file handler (e.g. via builtin open function) or StringIO.
start_index (int):
get_bonds (bool):
nrows (int): Number of rows of file to read.
Note that the first two rows are implicitly excluded.
engine (str): Wrapper for argument of :func:`pandas.read_csv`.
Returns:
Cartesian:
"""
frame = pd.read_csv(buf, skiprows=2, comment='#',
nrows=nrows, sep=r'\s+',
names=['atom', 'x', 'y', 'z'], engine=engine)
remove_digits = partial(re.sub, r'[0-9]+', '')
frame['atom'] = frame['atom'].apply(
lambda x: remove_digits(x).capitalize())
molecule = cls(frame)
molecule.index = range(start_index, start_index + len(molecule))
if get_bonds:
molecule.get_bonds(use_lookup=False, set_lookup=True)
return molecule
def to_cjson(self, buf=None, **kwargs):
"""Write a cjson file or return dictionary.
The cjson format is specified
`here <https://github.com/OpenChemistry/chemicaljson>`_.
Args:
buf (str): If it is a filepath, the data is written to
filepath. If it is None, a dictionary with the cjson
information is returned.
kwargs: The keyword arguments are passed into the
``dump`` function of the
`json library <https://docs.python.org/3/library/json.html>`_.
Returns:
dict:
"""
cjson_dict = {'chemical json': 0}
cjson_dict['atoms'] = {}
atomic_number = constants.elements['atomic_number'].to_dict()
cjson_dict['atoms'] = {'elements': {}}
cjson_dict['atoms']['elements']['number'] = [
int(atomic_number[x]) for x in self['atom']]
cjson_dict['atoms']['coords'] = {}
coords = self.loc[:, ['x', 'y', 'z']].values.reshape(len(self) * 3)
cjson_dict['atoms']['coords']['3d'] = [float(x) for x in coords]
bonds = []
bond_dict = self.get_bonds()
for i in bond_dict:
for b in bond_dict[i]:
bonds += [int(i), int(b)]
bond_dict[b].remove(i)
cjson_dict['bonds'] = {'connections': {}}
cjson_dict['bonds']['connections']['index'] = bonds
if buf is not None:
with open(buf, mode='w') as f:
f.write(json.dumps(cjson_dict, **kwargs))
else:
return cjson_dict
@classmethod
def read_cjson(cls, buf):
"""Read a cjson file or a dictionary.
The cjson format is specified
`here <https://github.com/OpenChemistry/chemicaljson>`_.
Args:
buf (str, dict): If it is a filepath, the data is read from
filepath. If it is a dictionary, the dictionary is interpreted
as cjson.
Returns:
Cartesian:
"""
if isinstance(buf, dict):
data = buf.copy()
else:
with open(buf, 'r') as f:
data = json.load(f)
assert data['chemical json'] == 0
n_atoms = len(data['atoms']['coords']['3d'])
metadata = {}
_metadata = {}
coords = np.array(
data['atoms']['coords']['3d']).reshape((n_atoms // 3, 3))
atomic_number = constants.elements['atomic_number']
elements = [dict(zip(atomic_number, atomic_number.index))[x]
for x in data['atoms']['elements']['number']]
try:
connections = data['bonds']['connections']['index']
except KeyError:
pass
else:
bond_dict = defaultdict(set)
for i, b in zip(connections[::2], connections[1::2]):
bond_dict[i].add(b)
bond_dict[b].add(i)
_metadata['bond_dict'] = dict(bond_dict)
try:
metadata.update(data['properties'])
except KeyError:
pass
out = cls(atoms=elements, coords=coords, _metadata=_metadata,
metadata=metadata)
return out
def view(self, viewer=None, use_curr_dir=False):
"""View your molecule.
.. note:: This function writes a temporary file and opens it with
an external viewer.
If you modify your molecule afterwards you have to recall view
in order to see the changes.
Args:
viewer (str): The external viewer to use. If it is None,
the default as specified in cc.settings['defaults']['viewer']
is used.
use_curr_dir (bool): If True, the temporary file is written to
the current diretory. Otherwise it gets written to the
OS dependendent temporary directory.
Returns:
None:
"""
if viewer is None:
viewer = settings['defaults']['viewer']
if use_curr_dir:
TEMP_DIR = os.path.curdir
else:
TEMP_DIR = tempfile.gettempdir()
def give_filename(i):
filename = 'ChemCoord_' + str(i) + '.xyz'
return os.path.join(TEMP_DIR, filename)
i = 1
while os.path.exists(give_filename(i)):
i = i + 1
self.to_xyz(give_filename(i))
def open_file(i):
"""Open file and close after being finished."""
try:
subprocess.check_call([viewer, give_filename(i)])
except (subprocess.CalledProcessError, FileNotFoundError):
raise
finally:
if use_curr_dir:
pass
else:
os.remove(give_filename(i))
Thread(target=open_file, args=(i,)).start()
def get_pymatgen_molecule(self):
"""Create a Molecule instance of the pymatgen library
.. warning:: The `pymatgen library <http://pymatgen.org>`_ is imported
locally in this function and will raise
an ``ImportError`` exception, if it is not installed.
Args:
None
Returns:
:class:`pymatgen.core.structure.Molecule`:
"""
from pymatgen.core import Molecule
return Molecule(self['atom'].values,
self.loc[:, ['x', 'y', 'z']].values)
@classmethod
def from_pymatgen_molecule(cls, molecule):
"""Create an instance of the own class from a pymatgen molecule
Args:
molecule (:class:`pymatgen.core.structure.Molecule`):
Returns:
Cartesian:
"""
new = cls(atoms=[el.value for el in molecule.species],
coords=molecule.cart_coords)
return new._to_numeric()
def get_ase_atoms(self):
"""Create an Atoms instance of the ase library
.. warning:: The `ase library <https://wiki.fysik.dtu.dk/ase/>`_
is imported locally in this function and will raise
an ``ImportError`` exception, if it is not installed.
Args:
None
Returns:
:class:`ase.atoms.Atoms`:
"""
from ase import Atoms
return Atoms(''.join(self['atom']), self.loc[:, ['x', 'y', 'z']])
@classmethod
def from_ase_atoms(cls, atoms):
"""Create an instance of the own class from an ase molecule
Args:
molecule (:class:`ase.atoms.Atoms`):
Returns:
Cartesian:
"""
return cls(atoms=atoms.get_chemical_symbols(), coords=atoms.positions)
| mcocdawc/chemcoord | src/chemcoord/cartesian_coordinates/_cartesian_class_io.py | Python | lgpl-3.0 | 14,387 | [
"ASE",
"pymatgen"
] | 0cfad16a3dc126ef080bfc2ee5388ea285681d8a3b36485adb45da93ba77ac4a |
''' file name : comparehist.py
Description : This sample shows how to determine how well two histograms match each other.
This is Python version of this tutorial : http://opencv.itseez.com/doc/tutorials/imgproc/histograms/histogram_comparison/histogram_comparison.html
Level : Beginner
Benefits : Learn to use cv2.compareHist and create 2D histograms
Usage : python comparehist.py
Written by : Abid K. (abidrahman2@gmail.com) , Visit opencvpython.blogspot.com for more tutorials '''
import cv2
import numpy as np
base = cv2.imread('base.png')
test1 = cv2.imread('test1.jpg')
test2 = cv2.imread('test2.jpg')
rows,cols = base.shape[:2]
basehsv = cv2.cvtColor(base,cv2.COLOR_BGR2HSV)
test1hsv = cv2.cvtColor(test1,cv2.COLOR_BGR2HSV)
test2hsv = cv2.cvtColor(test2,cv2.COLOR_BGR2HSV)
halfhsv = basehsv[rows/2:rows-1,cols/2:cols-1].copy() # Take lower half of the base image for testing
hbins = 180
sbins = 255
hrange = [0,180]
srange = [0,256]
ranges = hrange+srange # ranges = [0,180,0,256]
histbase = cv2.calcHist(basehsv,[0,1],None,[180,256],ranges)
cv2.normalize(histbase,histbase,0,255,cv2.NORM_MINMAX)
histhalf = cv2.calcHist(halfhsv,[0,1],None,[180,256],ranges)
cv2.normalize(histhalf,histhalf,0,255,cv2.NORM_MINMAX)
histtest1 = cv2.calcHist(test1hsv,[0,1],None,[180,256],ranges)
cv2.normalize(histtest1,histtest1,0,255,cv2.NORM_MINMAX)
histtest2 = cv2.calcHist(test2hsv,[0,1],None,[180,256],ranges)
cv2.normalize(histtest2,histtest2,0,255,cv2.NORM_MINMAX)
for i in xrange(4):
base_base = cv2.compareHist(histbase,histbase,i)
base_half = cv2.compareHist(histbase,histhalf,i)
base_test1 = cv2.compareHist(histbase,histtest1,i)
base_test2 = cv2.compareHist(histbase,histtest2,i)
print "Method: {0} -- base-base: {1} , base-half: {2} , base-test1: {3}, base_test2: {4}".format(i,base_base,base_half,base_test1,base_test2)
| asrob-uc3m/rpc_rpi | src/python/opencv_python_tutorials/Official_Tutorial_Python_Codes/3_imgproc/comparehist.py | Python | gpl-3.0 | 1,899 | [
"VisIt"
] | a83c1001897571eaf23052bfe6ce11adb8406abba6f7c79baf6b57bfda6e957d |
#!/usr/bin/env python
'''
Module to launch a crawl.
This module supplies the following functions that can be used
independently:
1. compute_stats: To calculate the download statistics of a URL.
Usage:
To use the functions provided in this module independently,
first place yourself just above pytomo folder.Then:
import pytomo.start_pytomo as start_pytomo
import pytomo.config_pytomo as config_pytomo
config_pytomo.LOG_FILE = '-'
import time
timestamp = time.strftime('%Y-%m-%d.%H_%M_%S')
log_file = start_pytomo.configure_log_file(timestamp)
import platform
config_pytomo.SYSTEM = platform.system()
url = 'http://youtu.be/3VdOTTfSKyM'
start_pytomo.compute_stats(url)
# test Dailymotion
url = 'http://www.dailymotion.com/video/xscdm4_le-losc-au-pays-basque_sport?no_track=1'
import pytomo.start_pytomo as start_pytomo
import pytomo.config_pytomo as config_pytomo
config_pytomo.LOG_FILE = '-'
import time
timestamp = time.strftime('%Y-%m-%d.%H_%M_%S')
log_file = start_pytomo.configure_log_file(timestamp)
import platform
config_pytomo.SYSTEM = platform.system()
# video delivered by akamai CDN
url = 'http://www.dailymotion.com/video/xp9fq9_test-video-akamai_tech'
start_pytomo.compute_stats(url)
# redirect url: do not work
url = 'http://vid.ak.dmcdn.net/video/986/034/42430689_mp4_h264_aac.mp4?primaryToken=1343398942_d77027d09aac0c5d5de74d5428fb9e5b'
start_pytomo.compute_stats(url, redirect=True)
# video delivered by edgecast CDN
url = 'http://www.dailymotion.com/video/xmcyww_test-video-cell-edgecast_tech'
start_pytomo.compute_stats(url)
url = 'http://vid.ec.dmcdn.net/cdn/H264-512x384/video/xmcyww.mp4?77838fedd64fa52abe6a11b3bdbb4e62f4387ebf7cbce2147ea4becc5eee5c418aaa6598bb98a61fc95a02997247e59bfb0dcd58cdf05c1601ded04f75ae357b225da725baad5e97ea6cce6d6a12e17d1c01'
start_pytomo.compute_stats(url, redirect=True)
# video delivered by dailymotion servers
url = 'http://www.dailymotion.com/video/xmcyw2_test-video-cell-core_tech'
start_pytomo.compute_stats(url)
url = 'http://proxy-60.dailymotion.com/video/246/655/37556642_mp4_h264_aac.mp4?auth=1343399602-4098-bdkyfgul-eb00ad223e1964e40b327d75367b273b'
start_pytomo.compute_stats(url, redirect=True)
'''
from __future__ import with_statement, absolute_import, print_function
import sys
from urlparse import urlsplit
from pprint import pprint
import logging
import datetime
from time import strftime, timezone
import os
from string import maketrans, lowercase
from optparse import OptionParser
import hashlib
import socket
import urllib2
import platform
import signal
import time
from os.path import abspath, dirname, sep
from sys import path
import tarfile
import re
from operator import concat, itemgetter, eq
from sqlite3 import Error
#from ast import literal_eval
import json
# assumes the standard distribution paths
PACKAGE_DIR = dirname(abspath(path[0]))
#PUBLIC_IP_FINDER = 'http://automation.whatismyip.com/n09230945.asp'
#PUBLIC_IP_FINDER = 'http://ipogre.com/linux.php'
PUBLIC_IP_FINDER = r'http://stat.ripe.net/data/whats-my-ip/data.json'
AS_REQUEST_URL = r'http://stat.ripe.net/data/routing-status/data.json?resource='
# to store the request on /24 prefixes
CACHED_PREFIXES = dict()
# give a default fake value for convenience
CACHED_PREFIXES['0.0.0'] = 0
IP_MATCH_PATTERN = ('^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}'
'([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$')
SEPARATOR_LINE = '#' * 80
YOUTUBE_SERVICE = 'youtube'
DAILYMOTION_SERVICE = 'dailymotion'
GALERIE_VIDEO_SERVICE = 'galerievideo.orange-business.com'
CONTINUOUS_CRAWL_SIZE = 10
PARAM_URL = 'Url'
#REDIRECT_CACHE_URL_PATTERN = '.youtube.com/videoplayback?sparams=id'
# initial cache url regex
#CACHE_URL_REGEXP = re.compile(r'(http://o-o---preferred---sn-)(\w{8})'
# '(---v\d{1,2}---lscache\d.c.youtube.com)')
# updated
# direct cache *o-o---preferred* or redirect *rXX*
# *---*
# *sn-* - will be omitted in the decrypted url
# something like *25g7rn7r* or *vg5obx-hgne*
# direct cache have *---vXX---lscacheX*
# *.c.youtube.com*
CACHE_URL_REGEXP = re.compile(r'(http://)(o-o---preferred|r\d{1,2})(---)(sn-)'
r'(\w{6}-\w{4}|(?:-|\w{8}))(---v\d{1,2}---lscache\d){0,1}'
r'(.c.youtube.com)')
TRANSLATION = maketrans(''.join(map(str, range(10))) + lowercase,
'uzpkfa50vqlgb61wrmhc72xsnid83ytoje94')
try:
from win32com.shell import shell, shellcon
HOME_DIR = shell.SHGetFolderPath(0, shellcon.CSIDL_PROFILE, None, 0)
except ImportError:
HOME_DIR = os.path.expanduser('~')
from . import config_pytomo
from . import lib_dns
from . import lib_ping
from . import lib_youtube_download
from . import lib_dailymotion_download
from . import lib_general_download
from . import lib_galerie_download
from . import lib_database
from . import lib_youtube_api
from . import lib_dailymotion_api
from . import lib_links_extractor
from . import lib_data_centralisation
from . import translation_cache_url
if config_pytomo.PLOT:
from . import lib_plot
# default service is YouTube
SERVICE = 'YouTube'
def select_libraries(url):
''' Return the libraries to use for dowloading and retrieving specific
links'''
if DAILYMOTION_SERVICE in url or 'dmcdn' in url or 'dmcloud' in url:
lib_download = lib_dailymotion_download
lib_api = lib_dailymotion_api
elif YOUTUBE_SERVICE in url or 'youtu.be' in url:
lib_download = lib_youtube_download
lib_api = lib_youtube_api
elif GALERIE_VIDEO_SERVICE in url:
lib_download = lib_galerie_download
lib_api = lib_dailymotion_api
else:
config_pytomo.LOG.critical('only YouTube and Dailymotion download '
'are implemented')
return None
return (lib_download, lib_api)
#def translate_cache_url(url):
# ''' Return decrypted cache url name, using monoalphabetic cipher:
# digits, letters -> uzpkfa50vqlgb61wrmhc72xsnid83ytoje94
# Assumes all cache servers that match pattern are encrypted, otherwise
# it returns original address. Unencrypted cache urls still exist, they do
# not contain *--sn* (http://r3---orange-mrs2.c.youtube.com/).
# >>> url = 'http://o-o---preferred---sn-25g7rn7k---v18---lscache1.c.youtube.com/'
# >>> translate_cache_url(url)
# 'http://o-o---preferred---par08s07---v18---lscache1.c.youtube.com'
# >>> url = 'http://o-o---preferred---sn-vg5obx-hgnl---v16---lscache6.c.youtube.com'
# >>> translate_cache_url(url)
# 'http://o-o---preferred---orange-mrs2---v16---lscache6.c.youtube.com'
# >>> url = 'http://r10---sn-25g7rn7l.c.youtube.com/'
# >>> translate_cache_url(url)
# 'http://r10---par08s02.c.youtube.com'
# >>> url = 'http://r3---orange-mrs2.c.youtube.com/'
# >>> translate_cache_url(url)
# 'http://r3---orange-mrs2.c.youtube.com/'
# >>> url = 'http://r6---sn-5up-u0ol.c.youtube.com'
# >>> translate_cache_url(url)
# 'http://r6---ati-tun2.c.youtube.com'
# '''
# match = CACHE_URL_REGEXP.match(url)
# config_pytomo.LOG.debug('translating url: %s', url)
# if not match:
# config_pytomo.LOG.debug('no match')
# new_url = url
# else:
# groups = match.groups()
# assert len(groups) == 7
# groups = filter(None, groups)
# new_url = (''.join((groups[0:3])) + groups[4].translate(TRANSLATION) +
# ''.join((groups[5:])))
# config_pytomo.LOG.debug('url translated as: %s', new_url)
# return new_url
def compute_stats(url, cache_uri, do_download_stats, redirect_url=None,
do_full_crawl=None):
'' 'Return a list of the statistics related to the url'''
if not cache_uri:
return None
current_stats = dict()
# the cache url server where the video is stored
# <scheme>://<netloc>/<path>?<query>#<fragment>
parsed_uri = urlsplit(cache_uri)
cache_url = '://'.join((parsed_uri.scheme, parsed_uri.netloc))
status_code = None
if config_pytomo.CRAWL_SERVICE.lower() == YOUTUBE_SERVICE:
cache_url = translation_cache_url.translate_cache_url(cache_url)
#cache_urn = '?'.join((parsed_uri.path, parsed_uri.query))
ip_addresses = lib_dns.get_ip_addresses(parsed_uri.netloc)
# in case there is a problem in the DNS, for the variables to be bound
if redirect_url:
parsed_uri = urlsplit(redirect_url)
redirect_url = '://'.join((parsed_uri.scheme, parsed_uri.netloc))
if config_pytomo.CRAWL_SERVICE.lower() == YOUTUBE_SERVICE:
redirect_url = translation_cache_url.translate_cache_url(
redirect_url)
redirect_list = []
for (ip_address, resolver, req_time) in ip_addresses:
config_pytomo.LOG.debug('Compute stats for IP: %s', ip_address)
timestamp = datetime.datetime.now()
if ip_address in current_stats and config_pytomo.SKIP_COMPUTED:
config_pytomo.LOG.debug('Skip IP already crawled: %s',
ip_address)
continue
ping_times = lib_ping.ping_ip(ip_address)
if do_download_stats and ('default' in resolver
or config_pytomo.DOWNLOAD_FROM_EXTRA_IPS):
(download_stats, new_redirect_url,
status_code) = compute_download_stats(resolver, ip_address,
cache_uri, current_stats,
do_full_crawl=do_full_crawl)
redirect_list.append(new_redirect_url)
else:
download_stats, new_redirect_url = None, None
if config_pytomo.PROXIES:
proxy = urllib2.ProxyHandler(config_pytomo.PROXIES)
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
# we do consider only /24 prefixes
prefix = '.'.join(ip_address.split('.')[0:3])
if prefix not in CACHED_PREFIXES:
try:
# HARD CODED fields of json data
as_nb = int(json.load(urllib2.urlopen(
AS_REQUEST_URL + ip_address,
timeout=config_pytomo.URL_TIMEOUT))['data']['last_seen']['origin'])
CACHED_PREFIXES[prefix] = as_nb
config_pytomo.LOG.debug('IP %s resolved as AS: %d',
ip_address, as_nb)
except Exception, mes:
config_pytomo.LOG.exception(mes)
prefix = '0.0.0'
if not status_code and redirect_url:
# should not happen, but guess a 302 in this case
config_pytomo.LOG.debug('no status code found with this '
'redirect_url: %s', redirect_url)
status_code = config_pytomo.HTTP_REDIRECT_FOUND
current_stats[ip_address] = [timestamp, ping_times, download_stats,
redirect_url, resolver, req_time,
CACHED_PREFIXES[prefix], status_code]
# check if cache_url is the same independently of DNS: YES only depend on
# video id
#assert reduce(eq, redirect_list)
config_pytomo.LOG.info('new redirect urls: %s', redirect_list)
return (url, cache_url, current_stats), redirect_list
def compute_download_stats(resolver, ip_address, cache_uri, current_stats,
do_full_crawl=False):
#redirect=False,
'' 'Return a list of the download statistics related to the cache_uri'''
# it's important to pass the uri with the ip_address to avoid
# uncontrolled DNS resolution
if do_full_crawl:
d_time = config_pytomo.MAX_DOWNLOAD_TIME
else:
d_time = config_pytomo.DOWNLOAD_TIME
# may be done multiple times in case of different IP addresses
# resolved and uncaught errors on each IP
redirect_url = None
# if 'default' in resolver:
# config_pytomo.LOG.debug('trying url without IP')
try:
status_code, download_stats, redirect_url = (
lib_general_download.get_download_stats(cache_uri,
ip_address, download_time=d_time))
#redirect=redirect))
except (urllib2.HTTPError, ), nested_err:
config_pytomo.LOG.exception(nested_err)
# do nothing
return None, None, None
except (TypeError, ), mes:
config_pytomo.LOG.debug('no data')
config_pytomo.LOG.exception(mes)
return None, None, None
except Exception, mes:
config_pytomo.LOG.exception('Uncaught exception: %s', mes)
#import pdb; pdb.set_trace()
return None, None, None
# else:
# # stats can thus be collected only on default resolver
# if ip_address in current_stats:
# # HARD CODED current_stats index: BAD
# download_stats = current_stats[ip_address][2]
# else:
# download_stats = None
return download_stats, redirect_url, status_code
def format_stats(stats, cache_server_delay, service=SERVICE):
"""Return the stats as a list of tuple to insert into database
>>> stats = ('http://www.youtube.com/watch?v=RcmKbTR--iA',
... 'http://v15.lscache3.c.youtube.com',
... {'173.194.20.56': [datetime.datetime(
... 2011, 5, 6, 15, 30, 50, 103775),
... None,
... [8.9944229125976562, 'mp4',
... 225,
... 115012833.0,
... 511168.14666666667,
... 9575411,
... 0,
... 0.99954795837402344,
... 7.9875903129577637,
... 11.722306421319782,
... 1192528.8804511931, 15169],
... None, 'default_10.193.225.12']})
>>> format_stats(stats) #doctest: +NORMALIZE_WHITESPACE
[(datetime.datetime(2011, 5, 6, 15, 30, 50, 103775),
'Youtube', 'http://www.youtube.com/watch?v=RcmKbTR--iA',
'http://v15.lscache3.c.youtube.com', '173.194.20.56',
'default_10.193.225.12', 15169, None, None, None, 8.9944229125976562,
'mp4', 225, 115012833.0, 511168.14666666667, 9575411, 0,
0.99954795837402344, 7.9875903129577637, 11.722306421319782,
1192528.8804511931, None)]
>>> stats = ('http://www.youtube.com/watch?v=OdF-oiaICZI',
... 'http://v7.lscache8.c.youtube.com',
... {'74.125.105.226': [datetime.datetime(
... 2011, 5, 6, 15, 30, 50, 103775),
... [26.0, 196.0, 82.0],
... [30.311000108718872, 'mp4',
... 287.487, 16840065.0,
... 58576.78781997099,
... 1967199, 0,
... 1.316999912261963,
... 28.986000061035156,
... 5.542251416248594,
... 1109.4598961624772, 15169],
... 'http://www.youtube.com/fake_redirect',
... 'google_public_dns_8.8.8.8_open_dns_208.67.220.220'],
... '173.194.8.226': [datetime.datetime(2011, 5, 6, 15,
... 30, 51, 103775),
... [103.0, 108.0, 105.0],
... [30.287999868392944, 'mp4',
... 287.487, 16840065.0,
... 58576.78781997099,
... 2307716,
... 0,
... 1.3849999904632568,
... 28.89300012588501,
... 11.47842453761781,
... 32770.37517215069, 15169],
... None, 'default_212.234.161.118']})
>>> format_stats(stats) #doctest: +NORMALIZE_WHITESPACE
[(datetime.datetime(2011, 5, 6, 15, 30, 50, 103775),
'Youtube', 'http://www.youtube.com/watch?v=OdF-oiaICZI',
'http://v7.lscache8.c.youtube.com', '74.125.105.226',
'google_public_dns_8.8.8.8_open_dns_208.67.220.220', 15169, 26.0, 196.0, 82.0,
30.311000108718872, 'mp4', 287.48700000000002, 16840065.0,
58576.787819970988, 1967199, 0, 1.3169999122619629,
28.986000061035156, 5.5422514162485941, 1109.4598961624772,
'http://www.youtube.com/fake_redirect'),
(datetime.datetime(2011, 5, 6, 15, 30, 51, 103775),
'Youtube', 'http://www.youtube.com/watch?v=OdF-oiaICZI',
'http://v7.lscache8.c.youtube.com', '173.194.8.226',
'default_212.234.161.118', 103.0, 108.0, 105.0, 30.287999868392944,
'mp4', 287.48700000000002, 16840065.0, 58576.787819970988, 2307716,
0, 1.3849999904632568, 28.89300012588501, 11.47842453761781,
32770.375172150692, None)]
"""
record_list = []
(url, cache_url, current_stats) = stats
for (ip_address, values) in current_stats.items():
(timestamp, ping_times, download_stats, redirect_url,
resolver, req_time, as_nb, status_code) = values
if not ping_times:
ping_times = [None] * config_pytomo.NB_PING_VALUES
if not download_stats:
download_stats = [None] * config_pytomo.NB_DOWNLOAD_VALUES
# use inet_aton(ip_address) for optimisation on this field
row = ([timestamp, service, url, cache_url, cache_server_delay,
ip_address, resolver, req_time, as_nb]
+ list(ping_times) + download_stats + [redirect_url, status_code])
record_list.append(tuple(row))
return record_list
def set_up_snmp():
'''Run Agent X and prepare the dataset'''
try:
from . import hebexsnmptools
except ImportError:
config_pytomo.LOG.error('No hebexsnmptools module')
config_pytomo.LOG.info('Try installing it and ctypes')
config_pytomo.SNMP = False
else:
config_pytomo.hebexsnmptools = hebexsnmptools
config_pytomo.dataset = hebexsnmptools.SnmpData(
root=config_pytomo.ROOT_OID)
ax = hebexsnmptools.AgentX(name='PytomoAgent',
data=config_pytomo.dataset)
ax.Run()
# pytomoGlbStats
config_pytomo.dataset.registerVar(
config_pytomo.snmp_pytomoObjectName + '.0',
hebexsnmptools.ASN_OCTET_STR,
config_pytomo.snmp_pytomoObjectName_str)
config_pytomo.dataset.registerVar(
config_pytomo.snmp_pytomoDescr + '.0',
hebexsnmptools.ASN_OCTET_STR,
config_pytomo.snmp_pytomoDescr_str)
config_pytomo.dataset.registerVar(
config_pytomo.snmp_pytomoContact + '.0',
hebexsnmptools.ASN_OCTET_STR,
config_pytomo.snmp_pytomoContact_str)
config_pytomo.dataset.registerVar(
config_pytomo.snmp_pytomoDownloadDuration + '.0',
hebexsnmptools.ASN_GAUGE,
int(config_pytomo.DOWNLOAD_TIME))
config_pytomo.dataset.registerVar(
config_pytomo.snmp_pytomoSleepTime + '.0',
hebexsnmptools.ASN_GAUGE,
int(config_pytomo.DELAY_BETWEEN_REQUESTS))
# initiate tables for Url stats
config_pytomo.snmp_tables = []
config_pytomo.snmp_types = []
config_pytomo.urlIndexTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoUrlIndex, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlIndexTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_OCTET_STR)
config_pytomo.urlTimeStampTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoTimeStamp, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlTimeStampTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_OCTET_STR)
config_pytomo.urlServiceTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoService, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlServiceTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_OCTET_STR)
config_pytomo.urlCacheUrlTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoCacheUrl, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlCacheUrlTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_OCTET_STR)
config_pytomo.urlCacheServerDelayTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoCacheServerDelay, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlCacheServerDelayTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlAddressIpTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoAddressIp, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlAddressIpTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_OCTET_STR)
config_pytomo.urlResolverTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoResolver, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlResolverTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_OCTET_STR)
config_pytomo.urlResolveTimeTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoResolveTime, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlResolveTimeTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlAsNumberTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoAsNumber, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlAsNumberTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_INTEGER)
config_pytomo.urlPingMinTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoPingMin, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlPingMinTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlPingAvgTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoPingAvg, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlPingAvgTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlPingMaxTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoPingMax, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlPingMaxTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlDownloadTimeTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoDownloadTime, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlDownloadTimeTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlVideoTypeTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoVideoType, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlVideoTypeTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlVideoDurationTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoVideoDuration, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlVideoDurationTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlVideoLengthTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoVideoLength, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlVideoLengthTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlEncodingRateTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoEncodingRate, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlEncodingRateTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlDownloadBytesTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoDownloadBytes, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlDownloadBytesTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_INTEGER)
config_pytomo.urlDownloadInterruptionsTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoDownloadInterruptions, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlDownloadInterruptionsTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_INTEGER)
config_pytomo.urlInitialDataTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoInitialData, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlInitialDataTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlInitialRateTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoInitialRate, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlInitialRateTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlInitialPlaybackBufferTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoInitialPlaybackBuffer, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlInitialPlaybackBufferTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlBufferingDurationTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoBufferingDuration, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlBufferingDurationTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlPlaybackDurationTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoPlaybackDuration, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlPlaybackDurationTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlBufferDurationAtEndTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoBufferDurationAtEnd, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlBufferDurationAtEndTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlTimeTogetFirstByteTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoTimeTogetFirstByte, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlTimeTogetFirstByteTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlMaxInstantThpTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoMaxInstantThp, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlMaxInstantThpTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_GAUGE)
config_pytomo.urlRedirectUrlTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoRedirectUrl, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlRedirectUrlTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_OCTET_STR)
config_pytomo.urlStatusCodeTable = config_pytomo.dataset.addTable(
config_pytomo.snmp_pytomoStatusCode, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.snmp_tables.append(config_pytomo.urlStatusCodeTable)
config_pytomo.snmp_types.append(hebexsnmptools.ASN_INTEGER)
#Statistics by ip
config_pytomo.IpNameTable = config_pytomo.dataset.addTable(config_pytomo.snmp_pytomoIpName, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.IpCountTable = config_pytomo.dataset.addTable(config_pytomo.snmp_pytomoIpCount, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.IpCountType = hebexsnmptools.ASN_COUNTER64
config_pytomo.IpNameType = hebexsnmptools.ASN_OCTET_STR
#Statistics by AS
config_pytomo.ASNameTable = config_pytomo.dataset.addTable(config_pytomo.snmp_pytomoASName, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.ASCountTable = config_pytomo.dataset.addTable(config_pytomo.snmp_pytomoASCount, hebexsnmptools.TABLE_INDEX_STRING)
config_pytomo.ASCountType = hebexsnmptools.ASN_COUNTER64
config_pytomo.ASNameType = hebexsnmptools.ASN_OCTET_STR
def check_out_files(file_pattern, directory, timestamp):
"""Return a full path of the file used for the output
Test if the path exists, create if possible or create it in default user
directory
>>> file_pattern = None
>>> directory = 'logs'
>>> timestamp = 'doc_test'
>>> check_out_files(file_pattern, directory, timestamp) #doctest: +ELLIPSIS
>>> file_pattern = 'pytomo.log'
>>> check_out_files(file_pattern, directory, timestamp) #doctest: +ELLIPSIS
'...doc_test.pytomo.log'
"""
if file_pattern == None:
return None
if config_pytomo.USE_PACKAGE_DIR:
base_dir = PACKAGE_DIR
else:
base_dir = os.getcwd()
if directory:
out_dir = sep.join((base_dir, directory))
if not os.path.exists(out_dir):
try:
os.makedirs(out_dir)
except OSError, mes:
config_pytomo.LOG.warn(
'Out dir %s does not exist and cannot be created\n%s',
out_dir, mes)
if HOME_DIR:
config_pytomo.LOG.warn('Will use home base dir: %s',
HOME_DIR)
out_dir = sep.join((HOME_DIR, directory))
if not os.path.exists(out_dir):
# do not catch OSError as it's our second attempt
os.makedirs(out_dir)
else:
config_pytomo.LOG.error(
'Impossible to create output file: %s', file_pattern)
raise IOError
else:
out_dir = os.getcwd()
# AO 27112012 modified to also include service (yt, dm)
crawl_service_provider = (config_pytomo.CRAWL_SERVICE +
('_%s' % config_pytomo.PROVIDER
if config_pytomo.PROVIDER else ''))
out_file = sep.join((out_dir, '.'.join((socket.gethostname(),
crawl_service_provider,
timestamp, file_pattern))))
# do not catch IOError
with open(out_file, 'a') as _:
# just test writing in the out file
pass
return out_file
def md5sum(input_file):
'Return the standard md5 of the file'
# to cope with large files
# value taken from Python distribution
try:
input_stream = open(input_file, 'rb')
except (TypeError, IOError), mes:
config_pytomo.LOG.exception('Unable to compute the md5 of file: %s',
mes)
return None
bufsize = 8096
hash_value = hashlib.md5()
while True:
data = input_stream.read(bufsize)
if not data:
break
hash_value.update(data)
return hash_value.hexdigest()
class MaxUrlException(Exception):
'Class to stop crawling when the max nb of urls has been attained'
pass
def add_stats(stats, cache_server_delay, url, result_stream=None, data_base=None):
'''Insert the stats in the db and update the crawled urls
'''
if result_stream:
pprint(stats, stream=result_stream)
if data_base:
if YOUTUBE_SERVICE in url:
service = 'YouTube'
elif DAILYMOTION_SERVICE in url:
service = 'Dailymotion'
else:
service = 'Unknown'
for row in format_stats(stats, cache_server_delay, service=service):
data_base.insert_record(row)
if config_pytomo.SNMP:
format_gauge = lambda x: int(x * 1000 if x else 0)
format_datetime = lambda x: x.strftime('%Y%m%d %H:%M:%S')
format_int = lambda x: x if x else 0
identity = lambda x: x if x else ''
formatted_stats = format_stats(stats, cache_server_delay,service=service)
for stats_line in formatted_stats:
video_url = stats_line[config_pytomo.URL_IDX]
video_ip = stats_line[config_pytomo.IP_IDX]
video_as = str(stats_line[config_pytomo.AS_IDX])
for table, snmp_type, idx in zip(config_pytomo.snmp_tables,
config_pytomo.snmp_types,
config_pytomo.STATS_IDX):
if snmp_type == config_pytomo.hebexsnmptools.ASN_GAUGE:
formatter = format_gauge
elif snmp_type == config_pytomo.hebexsnmptools.ASN_INTEGER:
formatter = format_int
elif idx == config_pytomo.TS_IDX:
formatter = format_datetime
else:
formatter = identity
table.registerValue(video_url, snmp_type,
formatter(stats_line[idx]))
if not config_pytomo.DOWNLOADED_BY_IP.has_key(video_ip):
config_pytomo.DOWNLOADED_BY_IP[video_ip] = 0
config_pytomo.IpNameTable.registerValue(video_ip, config_pytomo.IpNameType, video_ip)
if not config_pytomo.DOWNLOADED_BY_AS.has_key(video_as):
config_pytomo.DOWNLOADED_BY_AS[video_as] = 0
config_pytomo.ASNameTable.registerValue(video_as, config_pytomo.ASNameType, video_as)
config_pytomo.DOWNLOADED_BY_IP[video_ip] += 1
config_pytomo.DOWNLOADED_BY_AS[video_as] += 1
config_pytomo.IpCountTable.registerValue(video_ip, config_pytomo.IpCountType , config_pytomo.DOWNLOADED_BY_IP[video_ip] )
config_pytomo.ASCountTable.registerValue(video_as, config_pytomo.ASCountType , config_pytomo.DOWNLOADED_BY_AS[video_as] )
def retrieve_cache_urls(url, lib_download, hd_first=False):
''' Return the list of cache url servers for a given video.
The last element is the server from which the actual video is downloaded.
'''
# cache url server dependent on the service
cache_uri = lib_download.get_cache_url(url, hd_first=hd_first)
if not cache_uri:
return []
# list of cache urls
redirect_links = []
nb_redirects = 0
redirect_links.append(cache_uri)
# try to connect to the cache url, check if there are more redirects
response = lib_links_extractor.retrieve_header(cache_uri,
follow_redirect=False)
while (response and nb_redirects <= config_pytomo.MAX_NB_REDIRECT):
config_pytomo.LOG.debug('response_code: %s', response.code)
# no redirect
if response.code == config_pytomo.HTTP_OK:
break
redirect_server = response.location
redirect_links.append(redirect_server)
response = lib_links_extractor.retrieve_header(redirect_server,
follow_redirect=False)
cache_uri = redirect_server
nb_redirects += 1
if nb_redirects > config_pytomo.MAX_NB_REDIRECT:
config_pytomo.LOG.debug('retrieve_redirect_links: Too many cache server'
' redirects.')
return []
return redirect_links
def check_full_download(len_crawled_urls):
'Check if the urls should be fully downloaded'
if (config_pytomo.FREQ_FULL_DOWNLOAD
and len_crawled_urls % config_pytomo.FREQ_FULL_DOWNLOAD == 0):
return True
else:
return False
def crawl_link(url, next_urls, result_stream, data_base, related, loop,
hd_first=False):
'''Crawl the link and return the next urls'''
try:
crawled_urls = map(itemgetter(-1),
data_base.fetch_single_parameter_with_stats(PARAM_URL))
except Error, mes:
config_pytomo.LOG.error('Unable to extract data %s with error: %s',
PARAM_URL, mes)
return next_urls
if not loop and len(crawled_urls) >= config_pytomo.MAX_CRAWLED_URLS:
config_pytomo.LOG.debug('Reached max crawls')
raise MaxUrlException()
config_pytomo.LOG.debug('Crawl of url# %d: %s', len(crawled_urls), url)
if not loop and url in crawled_urls:
config_pytomo.LOG.debug('Skipped url already crawled: %s', url)
return next_urls
# print completed urls so that user knows that the crawl is running
if (crawled_urls
and len(crawled_urls) % config_pytomo.CRAWLED_URLS_MODULO == 0
# AO 26112012 trying to solve win freeze when printing to stdout
# does not work, best to not print on windows?
and 'win' not in sys.platform.lower()):
sys.stdout.write('Completed %d urls\n' % len(crawled_urls))
sys.stdout.flush()
#print('Completed %d urls' % len(crawled_urls))
if result_stream:
config_pytomo.LOG.debug('Printing to result_stream')
print(sep=config_pytomo.SEP_LINE, file=result_stream)
stats = None
redirect_list = []
download_libs = select_libraries(url)
if download_libs:
(lib_download, lib_api) = download_libs
else:
config_pytomo.LOG.error('Could not select libraries to compute'
' statistics for %s', url)
return next_urls
start_cache_server_time = time.time()
cache_servers = retrieve_cache_urls(url, lib_download, hd_first=hd_first)
end_cache_server_time = time.time()
cache_server_delay = end_cache_server_time - start_cache_server_time
config_pytomo.LOG.debug('For url=%s the cache urls are %s',
url, cache_servers)
if not cache_servers:
config_pytomo.LOG.error('Error retrieving cache for: %s', url)
return next_urls
# redirect servers (all except last) just ping statistics are stored
for index, cache_server in enumerate(cache_servers[:-1]):
try:
stats, redirect_list = compute_stats(url, cache_server, False,
redirect_url=cache_servers[index + 1])
except TypeError:
config_pytomo.LOG.error('Error retrieving stats for: %s',
cache_server)
if stats:
add_stats(stats, cache_server_delay, url, result_stream, data_base)
else:
config_pytomo.LOG.info('no stats for url: %s', cache_server)
if redirect_list:
config_pytomo.LOG.info('redirect_list for redirect servers: %s',
redirect_list)
config_pytomo.LOG.info('these addresses are NOT taken into account')
do_full_crawl = check_full_download(len(crawled_urls))
# final redirect, download server for the video
cache_server = cache_servers[-1]
try:
stats, redirect_list = compute_stats(url, cache_server, True,
do_full_crawl=do_full_crawl)
except TypeError:
config_pytomo.LOG.error('Error retrieving stats for: %s', cache_server)
if stats:
add_stats(stats, cache_server_delay, url, result_stream, data_base)
# wait only if there were stats retrieved
time.sleep(config_pytomo.DELAY_BETWEEN_REQUESTS)
else:
config_pytomo.LOG.info('no stats for url: %s', cache_server)
if redirect_list and redirect_list != [None]:
#assert reduce(eq, redirect_list)
if not reduce(eq, redirect_list):
config_pytomo.LOG.error('redirect list urls are not the same: %s',
redirect_list)
cache_server = redirect_list[0]
stats, redirect_list = compute_stats(url, cache_server, True,
do_full_crawl=do_full_crawl)
if stats:
add_stats(stats, cache_server_delay, url, result_stream, data_base)
# wait only if there were stats retrieved
time.sleep(config_pytomo.DELAY_BETWEEN_REQUESTS)
else:
config_pytomo.LOG.info('no stats for url: %s', cache_server)
if redirect_list:
config_pytomo.LOG.error('new redirect list: %s', redirect_list)
config_pytomo.LOG.error('these addresses are NOT taken into '
'account')
if (related and len(next_urls) < config_pytomo.MAX_CRAWLED_URLS):
try:
related_urls = set(filter(None,
lib_api.get_related_urls(url,
config_pytomo.MAX_PER_PAGE,
config_pytomo.MAX_PER_URL)))
except TypeError:
return next_urls
next_urls = next_urls.union(related_urls)
return next_urls
def crawl_links(input_links, result_stream=None,
data_base=None, related=True, loop=False, hd_first=False):
'''Wrapper to crawl each input link'''
next_urls = set()
# When a redirect occurs, the database should store in the
# 'Url' field the first link that caused redirection and then statistics
# for each cache server it gets redirected to
# - download statistics: only for final cache server from which the video is
# downloaded
# - ping statistics: for each cache server (intermediate and final)
config_pytomo.LOG.debug('input_links: %s', input_links)
for url in input_links:
next_urls = crawl_link(url, next_urls, result_stream, data_base,
related, loop, hd_first=hd_first)
if not loop:
next_urls = next_urls.difference(input_links)
else:
next_urls = input_links.copy()
config_pytomo.LOG.debug('next_urls: %s', next_urls)
return next_urls
def do_rounds(input_links, result_stream, data_base, db_file,
image_file, related=True, loop=False, hd_first=False):
'''Perform the rounds of crawl'''
max_rounds = config_pytomo.MAX_ROUNDS
for round_nb in xrange(max_rounds):
config_pytomo.LOG.warn('Round %d started\n%s',
round_nb, config_pytomo.SEP_LINE)
# Reseting the name servers at start of each crawl
config_pytomo.EXTRA_NAME_SERVERS_CC = []
for (name_server, dns_server_ip_address) in (
config_pytomo.EXTRA_NAME_SERVERS):
config_pytomo.EXTRA_NAME_SERVERS_CC.append(
('_'.join((config_pytomo.PROVIDER, name_server)),
dns_server_ip_address))
# config_pytomo.EXTRA_NAME_SERVERS_CC = (
# config_pytomo.EXTRA_NAME_SERVERS[:])
config_pytomo.LOG.info('Name servers at round %s:',
config_pytomo.EXTRA_NAME_SERVERS_CC)
#config_pytomo.LOG.debug(input_links)
try:
input_links = crawl_links(input_links, result_stream,
data_base, related=related, loop=loop,
hd_first=hd_first)
except ValueError:
# AO 20120926 TODO: check if this catches exception of crawled_urls
# extraction from database
config_pytomo.LOG.debug('not able to retrieve stats from url')
# no sleep here: check if it's ok
continue
# early exit if no more links
if not input_links:
break
time.sleep(config_pytomo.DELAY_BETWEEN_REQUESTS)
config_pytomo.LOG.debug('Slept %d',
config_pytomo.DELAY_BETWEEN_REQUESTS)
# The plot is redrawn everytime the database is updated
if config_pytomo.PLOT:
lib_plot.plot_data(db_file, config_pytomo.COLUMN_NAMES,
image_file)
def do_crawl(result_stream=None, db_file=None, timestamp=None,
image_file=None, loop=False, related=True, hd_first=False):
'''Crawls the urls given by the url_file
up to max_rounds are performed or max_visited_urls
'''
if not db_file and not result_stream and not config_pytomo.SNMP:
config_pytomo.LOG.critical('Cannot start crawl because no file can '
'store output')
return
config_pytomo.LOG.critical('Start crawl')
if not timestamp:
timestamp = strftime('%Y-%m-%d.%H_%M_%S')
data_base = None
if db_file:
config_pytomo.DATABASE_TIMESTAMP = db_file
trans_table = maketrans('.-', '__')
config_pytomo.TABLE_TIMESTAMP = '_'.join((config_pytomo.TABLE,
timestamp)).translate(trans_table)
data_base = lib_database.PytomoDatabase(
config_pytomo.DATABASE_TIMESTAMP)
data_base.create_pytomo_table(config_pytomo.TABLE_TIMESTAMP)
# max_per_page = config_pytomo.MAX_PER_PAGE
# max_per_url = config_pytomo.MAX_PER_URL
config_pytomo.LOG.debug('STATIC_URL_LIST: %s',
config_pytomo.STATIC_URL_LIST)
if config_pytomo.STATIC_URL_LIST:
input_links = set(filter(None, config_pytomo.STATIC_URL_LIST))
else:
if config_pytomo.CRAWL_SERVICE == YOUTUBE_SERVICE:
lib_api = lib_youtube_api
elif config_pytomo.CRAWL_SERVICE == DAILYMOTION_SERVICE:
lib_api = lib_dailymotion_api
input_links = set(filter(None,
lib_api.get_popular_links(
input_time=config_pytomo.TIME_FRAME,
max_results=config_pytomo.MAX_PER_PAGE)))
if (config_pytomo.CRAWL_SERVICE == 'youtube'
and config_pytomo.EXTRA_COUNTRY):
links_country = set(filter(None,
lib_api.get_popular_links(
input_time=config_pytomo.TIME_FRAME,
max_results=config_pytomo.MAX_PER_PAGE,
country=config_pytomo.EXTRA_COUNTRY)))
input_links = input_links.union(links_country)
config_pytomo.LOG.debug('bootstrap links: %s', input_links)
if not input_links:
config_pytomo.LOG.critical('Cannot find input links to crawl')
if data_base:
data_base.close_handle()
return
try:
if loop:
while True:
do_rounds(input_links, result_stream, data_base, db_file,
image_file, related=related, loop=loop,
hd_first=hd_first)
else:
do_rounds(input_links, result_stream, data_base, db_file,
image_file, related=related, loop=loop, hd_first=hd_first)
# next round input are related links of the current input_links
# input_links = get_next_round_urls(lib_api, input_links, max_per_page,
# max_per_url)
except MaxUrlException:
config_pytomo.LOG.warn('Stopping crawl because %d urls have been '
'crawled', config_pytomo.MAX_CRAWLED_URLS)
if data_base:
data_base.close_handle()
config_pytomo.LOG.warn('Crawl finished\n' + config_pytomo.SEP_LINE)
def get_next_round_urls(lib_api, input_links,
max_per_page=config_pytomo.MAX_PER_PAGE,
max_per_url=config_pytomo.MAX_PER_URL,
max_round_duration=config_pytomo.MAX_ROUND_DURATION):
''' Return a tuple of the set of input urls and a set of related url of
videos.
Arguments:
* input_links: list of the urls
* max_per_url and max_per_page options
* out_file_name: if provided, list is dump in it
'''
# keep only non-duplicated links and no links from input file
start = time.time()
if len(input_links) > CONTINUOUS_CRAWL_SIZE:
related_links = []
for url in input_links:
time.sleep(config_pytomo.DELAY_BETWEEN_REQUESTS)
related_links = concat(related_links,
lib_api.get_related_urls(url, max_per_page,
max_per_url))
if (time.time() - start) > max_round_duration:
break
related_links = set(related_links).difference(input_links)
else:
related_links = set(reduce(concat, (lib_api.get_related_urls(url,
max_per_page, max_per_url)
for url in input_links), [])
).difference(input_links)
config_pytomo.LOG.info('%d links collected by crawler',
len(related_links))
config_pytomo.LOG.debug(related_links)
return related_links
def convert_debug_level(_, __, value, parser):
'Convert the string passed to a logging level'
try:
log_level = config_pytomo.NAME_TO_LEVEL[value.upper()]
except KeyError:
parser.error('Incorrect log level.\n'
"Choose from: 'DEBUG', 'INFO', 'WARNING', "
"'ERROR' and 'CRITICAL' (default '%s')"
% config_pytomo.LEVEL_TO_NAME[
config_pytomo.LOG_LEVEL])
return
setattr(parser.values, 'LOG_LEVEL', log_level)
def set_proxies(_, __, value, parser):
'Convert the proxy passed to a dict to be handled by urllib2'
if value:
# remove quotes
value = value.translate(None, '\'"')
if not value.startswith('http://'):
value = 'http://'.join(('', value))
setattr(parser.values, 'PROXIES', {'http': value, 'https': value,
'ftp': value})
def create_options(parser):
'Add the different options to the parser'
parser.add_option('-b', '--batch', dest='BATCH_MODE', action='store_true',
help=('Do NOT prompt user for any input'),
default=config_pytomo.BATCH_MODE)
parser.add_option('-u', dest='MAX_CRAWLED_URLS', type='int',
help=('Max number of urls to visit (default %d)'
% config_pytomo.MAX_CRAWLED_URLS),
default=config_pytomo.MAX_CRAWLED_URLS)
parser.add_option('-r', dest='MAX_ROUNDS', type='int',
help=('Max number of rounds to perform (default %d)'
% config_pytomo.MAX_ROUNDS),
default=config_pytomo.MAX_ROUNDS)
parser.add_option('-l', '--loop', dest='LOOP', action='store_true',
default=config_pytomo.LOOP,
help=('Loop after completing the max nb of rounds '
'(default %s)' % config_pytomo.LOOP))
parser.add_option('--no-loop', dest='LOOP', action='store_false',
default=(not config_pytomo.LOOP),
help=('Do not loop after completing the max nb of rounds '
'(default %s)' % (not config_pytomo.LOOP)))
parser.add_option('-R', '--related', dest='RELATED',
action='store_true', default=config_pytomo.RELATED,
help=('Crawl related videos (default %s)'
% config_pytomo.RELATED))
parser.add_option('--no-related', dest='RELATED',
action='store_false', default=config_pytomo.RELATED,
help=('Do NOT crawl related videos (stays with the first '
'urls found: either most popular or arguments given) '
'(default %s)' % (not config_pytomo.RELATED)))
parser.add_option('-p', dest='MAX_PER_URL', type='int',
help=('Max number of related urls from each page '
'(default %d)' % config_pytomo.MAX_PER_URL),
default=config_pytomo.MAX_PER_URL)
parser.add_option('-P', dest='MAX_PER_PAGE', type='int',
help=('Max number of related videos from each page '
'(default %d)' % config_pytomo.MAX_PER_PAGE),
default=config_pytomo.MAX_PER_PAGE)
parser.add_option('-s', dest='CRAWL_SERVICE', type='string', action='store',
help=('Service for the most popular videos to fetch '
"at start of crawl: select between 'youtube', "
"or 'dailymotion' (default '%s')"
% config_pytomo.CRAWL_SERVICE),
default=config_pytomo.CRAWL_SERVICE)
parser.add_option('--snmp', dest='SNMP', action='store_true',
default=config_pytomo.SNMP,
help='SNMP mode')
parser.add_option('-t', dest='TIME_FRAME', type='string',
help=('Timeframe for the most popular videos to fetch '
"at start of crawl put 'today', or 'all_time' "
"(default '%s') [only for YouTube]"
% config_pytomo.TIME_FRAME),
default=config_pytomo.TIME_FRAME)
parser.add_option('-n', dest='PING_PACKETS', type='int',
help=('Number of packets to be sent for each ping '
'(default %d)' % config_pytomo.PING_PACKETS),
default=config_pytomo.PING_PACKETS)
parser.add_option('-D', dest='DOWNLOAD_TIME', type='float',
help=('Download time for the video in seconds '
'(default %f)' % config_pytomo.DOWNLOAD_TIME),
default=config_pytomo.DOWNLOAD_TIME)
parser.add_option('-S', dest='DELAY_BETWEEN_REQUESTS', type='float',
help=('Delay between consecutive video requests in '
' seconds (default %f)'
% config_pytomo.DELAY_BETWEEN_REQUESTS),
default=config_pytomo.DELAY_BETWEEN_REQUESTS)
# parser.add_option('-B', dest='INITIAL_PLAYBACK_DURATION ', type='float',
# help=('Buffering video duration in seconds (default %f)'
# % config_pytomo.INITIAL_PLAYBACK_DURATION),
# default=config_pytomo.INITIAL_PLAYBACK_DURATION )
# parser.add_option('-M', dest='MIN_PLAYOUT_BUFFER_SIZE', type='float',
# help=('Minimum Playout Buffer Size in seconds '
# '(default %f)' % config_pytomo.MIN_PLAYOUT_BUFFER),
# default=config_pytomo.MIN_PLAYOUT_BUFFER)
parser.add_option('-x', '--no-log-ip', dest='LOG_PUBLIC_IP',
action='store_false',
help=('Do NOT store public IP address of the machine '
'in the logs'), default=config_pytomo.LOG_PUBLIC_IP)
parser.add_option('-c', '--centralise', dest='CENTRALISE_DATA',
action='store_true',
help='Send logs to the centralisation server',
default=config_pytomo.CENTRALISE_DATA)
parser.add_option('--centralisation_server',
dest='CENTRALISATION_SERVER',
default=config_pytomo.CENTRALISATION_SERVER,
help=('FTP server to centralise data (default %s)'
% config_pytomo.CENTRALISATION_SERVER))
parser.add_option('--http-proxy', dest='PROXIES', type='string',
help=('in case of http proxy to reach Internet '
'(default %s)' % config_pytomo.PROXIES),
default=config_pytomo.PROXIES, action='callback',
callback=set_proxies)
parser.add_option('--provider', dest='PROVIDER', type='string',
help='Indicate the ISP', default=config_pytomo.PROVIDER)
parser.add_option('--download-extra-dns', dest='DOWNLOAD_FROM_EXTRA_IPS',
action='store_true',
default=config_pytomo.DOWNLOAD_FROM_EXTRA_IPS,
help=('Download videos from IP resolved by other DNS '
'(default %s)' % config_pytomo.DOWNLOAD_FROM_EXTRA_IPS))
parser.add_option('-L', dest='LOG_LEVEL', type='string',
help=('The log level setting for the Logging module.'
"Choose from: 'DEBUG', 'INFO', 'WARNING', "
"'ERROR' and 'CRITICAL' (default '%s')"
% config_pytomo.LEVEL_TO_NAME[
config_pytomo.LOG_LEVEL]),
default=config_pytomo.LOG_LEVEL, action='callback',
callback=convert_debug_level)
parser.add_option('-f', '--input-file', dest='INPUT_FILE', type='string',
help='File indicating the URLs to crawl (one URL per line)',
default=config_pytomo.INPUT_FILE)
parser.add_option('-H', '--hd', dest='HD_FIRST',
action='store_true',
help=('Tries to fetch video in HD (implemented only for'
'YouTube)'),
default=config_pytomo.CENTRALISE_DATA)
def check_options(parser, options):
'Check incompatible options'
if options.TIME_FRAME not in (['today', 'week', 'month', 'all_time']):
parser.error('Incorrect time frame.\n'
"Choose from: 'today', 'week', 'month', 'all_time' "
"(default: '%s')" % config_pytomo.TIME_FRAME)
if options.CRAWL_SERVICE.lower() not in ([YOUTUBE_SERVICE,
DAILYMOTION_SERVICE]):
parser.error('Incorrect Service.\n'
"Choose from: 'youtube', 'dailymotion' "
"(default '%s')" % config_pytomo.CRAWL_SERVICE)
def write_options_to_config(options):
'Write read options to config_pytomo'
for name, value in options.__dict__.items():
setattr(config_pytomo, name, value)
def log_ip_address():
'Log the remote IP addresses'
print('\nLogging the local public IP address.\n')
# is local address of some interest??
# check: http://stackoverflow.com/
# questions/166506/finding-local-ip-addresses-in-python
lib_links_extractor.configure_proxy()
count = 0
retries = 3
public_ip = None
while count < retries:
try:
if sys.hexversion >= int(0x2060000):
# timeout is available only python above 2.6
public_ip = json.load(
urllib2.urlopen(PUBLIC_IP_FINDER, None,
config_pytomo.IPADDR_TIMEOUT))['data']['ip']
else:
public_ip = json.load(
urllib2.urlopen(PUBLIC_IP_FINDER, None,
config_pytomo.URL_TIMEOUT))['data']['ip']
#except urllib2.URLError, mes:
except Exception, mes:
config_pytomo.LOG.critical('Public IP address not found: %s', mes)
count += 1
print('Public IP address not found: %s, retrying in %i seconds'
% (mes, count))
time.sleep(count)
continue
else:
# Check for valid IP
is_valid = re.match(IP_MATCH_PATTERN, public_ip)
if is_valid:
config_pytomo.LOG.critical('Machine has this public IP address:'
' %s', public_ip)
else:
config_pytomo.LOG.critical('Unable to Parse IP address: %s... '
'Skipping', public_ip)
break
if count >= retries:
config_pytomo.LOG.error(u'ERROR: giving up after %d retries, public IP'
' not found', retries)
print('Public IP address could not be logged.\n')
def log_md5_results(result_file, db_file):
'Computes and stores the md5 hash of result and database files'
if db_file:
config_pytomo.LOG.critical('Hash of database file: %s',
md5sum(db_file))
if result_file and result_file != sys.stdout:
config_pytomo.LOG.critical('Hash of result file: %s',
md5sum(result_file))
def configure_log_file(timestamp):
'Configure log file and indicate succes or failure'
print('Configuring log file')
if config_pytomo.LOG_LEVEL == logging.DEBUG:
# to have kaa-metadata logs
config_pytomo.LOG = logging.getLogger('metadata')
else:
config_pytomo.LOG = logging.getLogger('demo')
if config_pytomo.LOG_FILE == '-':
handler = logging.StreamHandler(sys.stdout)
print('Logs are on standard output')
log_file = True
else:
try:
log_file = check_out_files(config_pytomo.LOG_FILE,
config_pytomo.LOG_DIR, timestamp)
except IOError:
raise IOError('Logfile %s could not be open for writing' % log_file)
print('Logs are there: %s' % log_file)
# for lib_youtube_download
config_pytomo.LOG_FILE_TIMESTAMP = log_file
handler = logging.FileHandler(filename=log_file)
log_formatter = logging.Formatter('%(asctime)s - %(filename)s:%(lineno)d - '
'%(levelname)s - %(message)s')
handler.setFormatter(log_formatter)
config_pytomo.LOG.addHandler(handler)
config_pytomo.LOG.setLevel(config_pytomo.LOG_LEVEL)
config_pytomo.LOG.critical('Log level set to %s',
config_pytomo.LEVEL_TO_NAME[config_pytomo.LOG_LEVEL])
# to not have console output
config_pytomo.LOG.propagate = False
# log all config file values except built in values
for value in filter(lambda x: not x.startswith('__'),
config_pytomo.__dict__):
config_pytomo.LOG.critical('%s: %s',
value, getattr(config_pytomo, value))
return log_file
class MyTimeoutException(Exception):
'Class to generate timeout exceptions'
pass
def configure_alarm(timeout):
'''Set timeout if OS support it
Return a bool indicating if signal is supported'''
def timeout_handler(signum, frame):
'handle the timeout'
raise MyTimeoutException()
# non-posix support for signals is weak
support_signal = hasattr(signal, 'SIGALRM') and hasattr(signal, 'alarm')
if support_signal:
signal.signal(signal.SIGALRM, timeout_handler)
# triger alarm in timeout seconds
signal.alarm(timeout)
return support_signal
def prompt_max_crawls(support_signal, timeout):
'Function to prompt the user to enter max_urls'
return raw_input('\n'.join(('Please enter the max. number of videos '
'(press Enter for default %d): ' %
config_pytomo.MAX_CRAWLED_URLS,
('(or wait %d seconds)' % timeout if support_signal
else ''), '')))
def set_max_crawls(timeout=config_pytomo.USER_INPUT_TIMEOUT, prompt=True,
nb_max_crawls=config_pytomo.MAX_CRAWLED_URLS):
'Sets the max number of videos to be crawlled'
support_signal = configure_alarm(timeout)
try:
if prompt:
max_crawls = prompt_max_crawls(support_signal, timeout)
else:
max_crawls = nb_max_crawls
except MyTimeoutException:
return None
finally:
if support_signal:
# alarm disabled
signal.alarm(0)
if max_crawls:
try:
config_pytomo.MAX_CRAWLED_URLS = int(max_crawls)
except ValueError:
config_pytomo.LOG.error('User gave non-integer value: %s',
max_crawls)
max_craw_message = ('The Max Crawls has been set to: %s'
% config_pytomo.MAX_CRAWLED_URLS)
print(max_craw_message)
config_pytomo.LOG.critical(max_craw_message)
return max_crawls
def prompt_proxies(support_signal, timeout):
''' Function to prompt the user to enter the proxies it uses to connect to
the internet'''
return raw_input("\n".join(("Please enter the proxies you use to connect to"
" the internet, in the format:\n"
"http://proxy:8080/\n"
"(press Enter for default: %s): " %
config_pytomo.PROXIES,
("(or wait %d seconds)" % timeout if support_signal
else ""), "")))
def set_proxies_cli(timeout=config_pytomo.USER_INPUT_TIMEOUT):
''' Sets the proxies needed to connect to the internet'''
support_signal = configure_alarm(timeout)
try:
cli_proxies = prompt_proxies(support_signal, timeout)
except MyTimeoutException:
return None
finally:
if support_signal:
# alarm disabled
signal.alarm(0)
if cli_proxies:
try:
#config_pytomo.PROXIES = literal_eval(cli_proxies)
setattr(config_pytomo, 'PROXIES',
{'http': cli_proxies, 'https': cli_proxies,
'ftp': cli_proxies})
except (ValueError, SyntaxError):
proxies_message = ("ERROR: User gave incorrect proxy format: *%s*\n"
"Will use default proxies: %s\nIf you need to "
"configure a specific proxy, please try to run"
" the application again, respecting the format:"
"\nhttp://proxy:8080/\n" %
(cli_proxies, config_pytomo.PROXIES))
config_pytomo.LOG.error(proxies_message)
print(proxies_message)
proxies_message = ('The Proxies have been set to: %s\n'
% config_pytomo.PROXIES)
print(proxies_message)
config_pytomo.LOG.critical(proxies_message)
return cli_proxies
def log_provider(timeout=config_pytomo.USER_INPUT_TIMEOUT):
'Get and logs the provider from the user or skip after timeout seconds'
support_signal = configure_alarm(timeout)
try:
provider = prompt_provider(support_signal, timeout)
except MyTimeoutException:
return None
finally:
if support_signal:
# alarm disabled
signal.alarm(0)
config_pytomo.LOG.critical('User has given this provider: %s', provider)
config_pytomo.PROVIDER = provider
def prompt_provider(support_signal, timeout):
'Function to prompt for provider'
return raw_input(''.join((
'Please indicate your provider/ISP (leave blank for skipping).\n',
'Crawl will START when you PRESS ENTER',
((' (or after %d seconds)' % timeout) if support_signal else ''),
'.\n')))
def prompt_start_crawl():
'Funtion to prompt user for to accept the crawling'
return raw_input('Are you ok to start crawling? (Y/N)\n').upper()
def main(version=None, argv=None):
'''Program wrapper
Setup of log part
'''
if not argv:
argv = sys.argv[1:]
usage = ('%prog [-b --batch] '
'[-u max_crawled_url] '
'[-r max_rounds] '
'[-l, --loop|--no-loop] '
'[-R --related|--no-related] '
'[-p max_per_url] '
'[-P max_per_page] '
'[-s {youtube, dailymotion}] '
'[--snmp] '
'[-t time_frame] '
'[-n ping_packets] '
'[-D download_time] '
'[-S delay_between_requests] '
#'[-B buffering_video_duration] '
#'[-M min_playout_buffer_size] '
'[-x, --no-log-ip] '
'[-c, --no-centralize] '
'[--http-proxy=http://proxy:8080] '
'[--provider=MY_ISP] '
'[--download-extra-dns] '
'[-L log_level] '
'[-f, --input_file input_file_list] '
'[input_urls]')
parser = OptionParser(usage=usage)
create_options(parser)
(options, input_urls) = parser.parse_args(argv)
check_options(parser, options)
write_options_to_config(options)
timestamp = strftime('%Y-%m-%d.%H_%M_%S')
log_file = configure_log_file(timestamp)
image_file = None
if not log_file:
return -1
try:
result_file = check_out_files(config_pytomo.RESULT_FILE,
config_pytomo.RESULT_DIR, timestamp)
except IOError:
result_file = None
if result_file:
print( 'Text results are there: %s' % result_file)
try:
db_file = check_out_files(config_pytomo.DATABASE,
config_pytomo.DATABASE_DIR, timestamp)
except IOError:
db_file = None
if db_file:
print('Database results are there: %s' % db_file)
if config_pytomo.SNMP:
set_up_snmp()
config_pytomo.LOG.critical('Offset between local time and UTC: %d',
timezone)
config_pytomo.LOG.warn('Pytomo version = %s', version)
config_pytomo.SYSTEM = platform.system()
config_pytomo.LOG.warn('Pytomo is running on this system: %s',
config_pytomo.SYSTEM)
if config_pytomo.PLOT:
try:
image_file = check_out_files(config_pytomo.IMAGE_FILE,
config_pytomo.IMAGE_DIR, timestamp)
except IOError:
image_file = None
if image_file:
print('Plots are here: %s' % image_file)
else:
print('Unable to create image_file')
# do NOT prompt for start if BATCH_MODE on (no input expected from user)
if not options.BATCH_MODE:
while True:
start_crawl = prompt_start_crawl()
if start_crawl.startswith('N'):
return 0
elif start_crawl.startswith('Y'):
break
if not options.PROVIDER:
log_provider(timeout=config_pytomo.USER_INPUT_TIMEOUT)
else:
config_pytomo.LOG.critical('Provider given at command line: %s',
options.PROVIDER)
if not options.PROXIES:
set_proxies_cli(timeout=config_pytomo.USER_INPUT_TIMEOUT*2)
else:
config_pytomo.LOG.critical('Proxies given at command line: %s\n',
options.PROXIES)
set_max_crawls(timeout=config_pytomo.USER_INPUT_TIMEOUT,
prompt=(False if options.BATCH_MODE else True),
nb_max_crawls=options.MAX_CRAWLED_URLS)
# log IP after proxies given by the user
if config_pytomo.LOG_PUBLIC_IP:
log_ip_address()
print('Type Ctrl-C to interrupt crawl')
result_stream = None
# memory monitoring module
if result_file:
result_stream = open(result_file, 'w')
if input_urls:
config_pytomo.STATIC_URL_LIST = (config_pytomo.STATIC_URL_LIST
+ input_urls)
if config_pytomo.INPUT_FILE:
try:
with open(config_pytomo.INPUT_FILE, 'r') as input_file:
for line in input_file.readlines():
config_pytomo.STATIC_URL_LIST.append(line.strip())
except IOError, mes:
config_pytomo.LOG.exception(mes)
parser.error('Problem reading input file: %s'
% config_pytomo.INPUT_FILE)
config_pytomo.LOG.debug('Service for most popular links %s',
options.CRAWL_SERVICE)
try:
do_crawl(result_stream=result_stream, db_file=db_file,
image_file=image_file, timestamp=timestamp,
loop=config_pytomo.LOOP, related=config_pytomo.RELATED,
hd_first=config_pytomo.HD_FIRST)
except config_pytomo.BlackListException:
err_mes = ('Crawl detected by YouTube: '
'log to YouTube and enter captcha')
config_pytomo.LOG.critical(err_mes)
print(err_mes)
except KeyboardInterrupt:
print('\nCrawl interrupted by user')
config_pytomo.LOG.critical('Crawl interrupted by user')
except Exception, mes:
config_pytomo.LOG.exception('Uncaught exception: %s', mes)
config_pytomo.LOG.debug(CACHED_PREFIXES)
if config_pytomo.PLOT:
lib_plot.plot_data(db_file, config_pytomo.COLUMN_NAMES,
image_file)
if result_file:
result_stream.close()
log_md5_results(result_file, db_file)
print('Compressing the files: wait a bit')
tarfile_name = check_out_files('to_send.tbz',
config_pytomo.LOG_DIR, timestamp)
tar_file = tarfile.open(name=tarfile_name, mode='w:bz2')
tar_file.add(db_file, arcname=os.path.basename(db_file))
if type(log_file) == str:
tar_file.add(log_file, arcname=os.path.basename(log_file))
tar_file.close()
# upload archive on the FTP server
if options.CENTRALISE_DATA:
print('Trying to upload the files on the centralisation server...\n')
ftp = lib_data_centralisation.PytomoFTP()
if ftp.created:
if (ftp.upload_file(tarfile_name) !=
lib_data_centralisation.ERROR_CODE):
print('\nFile %s has been uploaded on %s server.' %
(tarfile_name, config_pytomo.CENTRALISATION_SERVER))
return 0
else:
print('\nWARNING! File %s was not uploaded to %s server.' %
(tarfile_name, config_pytomo.CENTRALISATION_SERVER))
ftp.close_connection()
else:
print('\nWARNING! Could not establish connection to %s FTP server.'
% (config_pytomo.CENTRALISATION_SERVER))
print('\nCrawl finished.\n%s\n\nPLEASE SEND THIS FILE BY EMAIL: '
'(to pytomo@gmail.com)\n%s\n'
% (SEPARATOR_LINE, tarfile_name))
if not options.BATCH_MODE:
raw_input('Press Enter to exit\n')
return 0
if __name__ == '__main__':
import doctest
doctest.testmod()
| Jamlum/pytomo | pytomo/start_pytomo.py | Python | gpl-2.0 | 76,253 | [
"VisIt"
] | c603231a72e13d3f5ff1e7b1aa2784ef2d8e3808a2c3758561dc1b6888077971 |
#!/usr/bin/env python
import os
import sys
import pychemia.code.octopus
def helper():
print(""" Set a variable in octopus
Use:
octopus_setvar.py --filename 'Octopus_Input_File' [ --set varname value ]... [--del varname]...
""")
if __name__ == '__main__':
# Script starts from here
if len(sys.argv) < 2:
helper()
sys.exit(1)
filename = ''
toset = {}
todel = []
for i in range(1, len(sys.argv)):
if sys.argv[i].startswith('--'):
option = sys.argv[i][2:]
# fetch sys.argv[1] but without the first two characters
if option == 'version':
print('Version 1.0')
sys.exit()
elif option == 'help':
helper()
sys.exit()
elif option == 'filename':
filename = sys.argv[i + 1]
elif option == 'set':
toset[sys.argv[i + 1]] = sys.argv[i + 2]
elif option == 'del':
todel.append(sys.argv[i + 1])
else:
print('Unknown option. --' + option)
# Set the variables
if os.path.isfile(filename):
data = pychemia.code.octopus.AbinitInput(filename)
for i in toset.keys():
data.variables[i] = toset[i]
for i in todel:
if i in data.variables.keys():
data.variables.pop(i)
data.write(filename)
else:
print('ERROR: no filename', filename)
| MaterialsDiscovery/PyChemia | scripts/oct_setvar.py | Python | mit | 1,497 | [
"Octopus"
] | 9c8febe3739e5b47e27ddf3dd9865837209fd7f5a3fb482d0c0282482a20da40 |
from distutils.version import LooseVersion
import pytest
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter, list_providers
from cfme.markers.env import EnvironmentMarker
from cfme.utils.pytest_shortcuts import fixture_filter
ONE = 'one'
ALL = 'all'
LATEST = 'latest'
ONE_PER_VERSION = 'one_per_version'
ONE_PER_CATEGORY = 'one_per_category'
ONE_PER_TYPE = 'one_per_type'
def _param_check(metafunc, argnames, argvalues):
"""Helper function to check if parametrizing is necessary
* If no argnames were specified, parametrization is unnecessary.
* If argvalues were generated, parametrization is necessary.
* If argnames were specified, but no values were generated, the test cannot run successfully,
and will be uncollected using the :py:mod:`markers.uncollect` mark.
See usage in :py:func:`parametrize`
Args:
metafunc: metafunc objects from pytest_generate_tests
argnames: argnames list for use in metafunc.parametrize
argvalues: argvalues list for use in metafunc.parametrize
Returns:
* ``True`` if this test should be parametrized
* ``False`` if it shouldn't be parametrized
* ``None`` if the test will be uncollected
"""
# If no parametrized args were named, don't parametrize
if not argnames:
return False
# If parametrized args were named and values were generated, parametrize
elif any(argvalues):
return True
# If parametrized args were named, but no values were generated, mark this test to be
# removed from the test collection. Otherwise, py.test will try to find values for the
# items in argnames by looking in its fixture pool, which will almost certainly fail.
else:
# module and class are optional, but function isn't
modname = getattr(metafunc.module, '__name__', None)
classname = getattr(metafunc.cls, '__name__', None)
funcname = metafunc.function.__name__
test_name = '.'.join(filter(None, (modname, classname, funcname)))
uncollect_msg = 'Parametrization for {} yielded no values,'\
' marked for uncollection'.format(test_name)
logger.warning(uncollect_msg)
# apply the mark
pytest.mark.uncollect(reason=uncollect_msg)(metafunc.function)
def parametrize(metafunc, argnames, argvalues, *args, **kwargs):
"""parametrize wrapper that calls :py:func:`_param_check`, and only parametrizes when needed
This can be used in any place where conditional parametrization is used.
"""
kwargs.pop('selector')
if _param_check(metafunc, argnames, argvalues):
metafunc.parametrize(argnames, argvalues, *args, **kwargs)
# if param check failed and the test was supposed to be parametrized around a provider
elif 'provider' in metafunc.fixturenames:
try:
# hack to pass trough in case of a failed param_check
# where it sets a custom message
metafunc.function.uncollect
except AttributeError:
pytest.mark.uncollect(
reason="provider was not parametrized did you forget --use-provider?"
)(metafunc.function)
def providers(metafunc, filters=None, selector=ALL):
""" Gets providers based on given (+ global) filters
Note:
Using the default 'function' scope, each test will be run individually for each provider
before moving on to the next test. To group all tests related to single provider together,
parametrize tests in the 'module' scope.
Note:
testgen for providers now requires the usage of test_flags for collection to work.
Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
for more details.
"""
filters = filters or []
argnames = []
argvalues = []
idlist = []
# Obtains the test's flags in form of a ProviderFilter
meta = getattr(metafunc.function, 'meta', None)
test_flag_str = getattr(meta, 'kwargs', {}).get('from_docs', {}).get('test_flag')
if test_flag_str:
test_flags = test_flag_str.split(',')
flags_filter = ProviderFilter(required_flags=test_flags)
filters = filters + [flags_filter]
potential_providers = list_providers(filters)
if selector == ONE:
if potential_providers:
allowed_providers = [potential_providers[0]]
else:
allowed_providers = []
elif selector == LATEST:
allowed_providers = [sorted(
potential_providers, key=lambda k:LooseVersion(
str(k.data.get('version', 0))), reverse=True
)[0]]
elif selector == ONE_PER_TYPE:
types = set()
def add_prov(prov):
types.add(prov.type)
return prov
allowed_providers = [
add_prov(prov) for prov in potential_providers if prov.type not in types
]
elif selector == ONE_PER_CATEGORY:
categories = set()
def add_prov(prov):
categories.add(prov.category)
return prov
allowed_providers = [
add_prov(prov) for prov in potential_providers if prov.category not in categories
]
elif selector == ONE_PER_VERSION:
versions = set()
def add_prov(prov):
versions.add(prov.data.get('version', 0))
return prov
allowed_providers = [
add_prov(prov) for prov in potential_providers if prov.data.get(
'version', 0) not in versions
]
else:
allowed_providers = potential_providers
for provider in allowed_providers:
argvalues.append([provider])
# Use the provider key for idlist, helps with readable parametrized test output
idlist.append(provider.key)
# Add provider to argnames if missing
if 'provider' in metafunc.fixturenames and 'provider' not in argnames:
metafunc.function = pytest.mark.uses_testgen()(metafunc.function)
argnames.append('provider')
if metafunc.config.getoption('sauce') or selector == ONE:
break
return argnames, argvalues, idlist
def providers_by_class(metafunc, classes, required_fields=None, selector=ALL):
""" Gets providers by their class
Args:
metafunc: Passed in by pytest
classes: List of classes to fetch
required_fields: See :py:class:`cfme.utils.provider.ProviderFilter`
Usage:
# In the function itself
def pytest_generate_tests(metafunc):
argnames, argvalues, idlist = testgen.providers_by_class(
[GCEProvider, AzureProvider], required_fields=['provisioning']
)
metafunc.parametrize(argnames, argvalues, ids=idlist, scope='module')
# Using the parametrize wrapper
pytest_generate_tests = testgen.parametrize([GCEProvider], scope='module')
"""
pf = ProviderFilter(classes=classes, required_fields=required_fields)
return providers(metafunc, filters=[pf], selector=selector)
class ProviderEnvironmentMarker(EnvironmentMarker):
NAME = 'provider'
def process_env_mark(self, metafunc):
if hasattr(metafunc.function, self.NAME):
args = getattr(metafunc.function, self.NAME).args
kwargs = getattr(metafunc.function, self.NAME).kwargs.copy()
scope = kwargs.pop('scope', 'function')
indirect = kwargs.pop('indirect', False)
filter_unused = kwargs.pop('filter_unused', True)
selector = kwargs.pop('selector', ALL)
gen_func = kwargs.pop('gen_func', providers_by_class)
# If parametrize doesn't get you what you need, steal this and modify as needed
kwargs.update({'selector': selector})
argnames, argvalues, idlist = gen_func(metafunc, *args, **kwargs)
# Filter out argnames that aren't requested on the metafunc test item, so not all tests
# need all fixtures to run, and tests not using gen_func's fixtures aren't parametrized.
if filter_unused:
argnames, argvalues = fixture_filter(metafunc, argnames, argvalues)
# See if we have to parametrize at all after filtering
parametrize(
metafunc, argnames, argvalues, indirect=indirect,
ids=idlist, scope=scope, selector=selector
)
| mfalesni/cfme_tests | cfme/markers/env_markers/provider.py | Python | gpl-2.0 | 8,447 | [
"VisIt"
] | f7cef5abf4b4d00a10e6e9dbe50b6fb76e89fc606c9d4c34192fbc76970cf622 |
# Copyright 2003, 2007 by Sebastian Bassi. sbassi@genesdigitales.com
# All rights reserved. This code is part of the Biopython
# distribution and governed by its license.
# Please see the LICENSE file that should have been included as part
# of this package.
"""Local Composition Complexity."""
import math
def lcc_mult(seq, wsize):
"""Local Composition Complexity (LCC) values over sliding window.
Returns a list of floats, the LCC values for a sliding window over
the sequence.
seq - an unambiguous DNA sequence (a string or Seq object)
wsize - window size, integer
The result is the same as applying lcc_simp multiple times, but this
version is optimized for speed. The optimization works by using the
value of previous window as a base to compute the next one."""
l2 = math.log(2)
tamseq = len(seq)
try:
# Assume its a string
upper = seq.upper()
except AttributeError:
# Should be a Seq object then
upper = str(seq).upper()
compone = [0]
lccsal = [0]
for i in range(wsize):
compone.append(((i + 1) / float(wsize)) *
((math.log((i + 1) / float(wsize))) / l2))
window = seq[0:wsize]
cant_a = window.count('A')
cant_c = window.count('C')
cant_t = window.count('T')
cant_g = window.count('G')
term_a = compone[cant_a]
term_c = compone[cant_c]
term_t = compone[cant_t]
term_g = compone[cant_g]
lccsal.append(-(term_a + term_c + term_t + term_g))
tail = seq[0]
for x in range(tamseq - wsize):
window = upper[x + 1:wsize + x + 1]
if tail == window[-1]:
lccsal.append(lccsal[-1])
elif tail == 'A':
cant_a -= 1
if window.endswith('C'):
cant_c += 1
term_a = compone[cant_a]
term_c = compone[cant_c]
lccsal.append(-(term_a + term_c + term_t + term_g))
elif window.endswith('T'):
cant_t += 1
term_a = compone[cant_a]
term_t = compone[cant_t]
lccsal.append(-(term_a + term_c + term_t + term_g))
elif window.endswith('G'):
cant_g += 1
term_a = compone[cant_a]
term_g = compone[cant_g]
lccsal.append(-(term_a + term_c + term_t + term_g))
elif tail == 'C':
cant_c -= 1
if window.endswith('A'):
cant_a += 1
term_a = compone[cant_a]
term_c = compone[cant_c]
lccsal.append(-(term_a + term_c + term_t + term_g))
elif window.endswith('T'):
cant_t += 1
term_c = compone[cant_c]
term_t = compone[cant_t]
lccsal.append(-(term_a + term_c + term_t + term_g))
elif window.endswith('G'):
cant_g += 1
term_c = compone[cant_c]
term_g = compone[cant_g]
lccsal.append(-(term_a + term_c + term_t + term_g))
elif tail == 'T':
cant_t -= 1
if window.endswith('A'):
cant_a += 1
term_a = compone[cant_a]
term_t = compone[cant_t]
lccsal.append(-(term_a + term_c + term_t + term_g))
elif window.endswith('C'):
cant_c += 1
term_c = compone[cant_c]
term_t = compone[cant_t]
lccsal.append(-(term_a + term_c + term_t + term_g))
elif window.endswith('G'):
cant_g += 1
term_t = compone[cant_t]
term_g = compone[cant_g]
lccsal.append(-(term_a + term_c + term_t + term_g))
elif tail == 'G':
cant_g -= 1
if window.endswith('A'):
cant_a += 1
term_a = compone[cant_a]
term_g = compone[cant_g]
lccsal.append(-(term_a + term_c + term_t + term_g))
elif window.endswith('C'):
cant_c += 1
term_c = compone[cant_c]
term_g = compone[cant_g]
lccsal.append(-(term_a + term_c + term_t + term_g))
elif window.endswith('T'):
cant_t += 1
term_t = compone[cant_t]
term_g = compone[cant_g]
lccsal.append(-(term_a + term_c + term_t + term_g))
tail = window[0]
return lccsal
def lcc_simp(seq):
"""Local Composition Complexity (LCC) for a sequence.
seq - an unambiguous DNA sequence (a string or Seq object)
Returns the Local Composition Complexity (LCC) value for the entire
sequence (as a float).
Reference:
Andrzej K Konopka (2005) Sequence Complexity and Composition
DOI: 10.1038/npg.els.0005260
"""
wsize = len(seq)
try:
# Assume its a string
upper = seq.upper()
except AttributeError:
# Should be a Seq object then
upper = str(seq).upper()
l2 = math.log(2)
if 'A' not in seq:
term_a = 0
# Check to avoid calculating the log of 0.
else:
term_a = ((upper.count('A')) / float(wsize)) * \
((math.log((upper.count('A')) / float(wsize))) / l2)
if 'C' not in seq:
term_c = 0
else:
term_c = ((upper.count('C')) / float(wsize)) * \
((math.log((upper.count('C')) / float(wsize))) / l2)
if 'T' not in seq:
term_t = 0
else:
term_t = ((upper.count('T')) / float(wsize)) * \
((math.log((upper.count('T')) / float(wsize))) / l2)
if 'G' not in seq:
term_g = 0
else:
term_g = ((upper.count('G')) / float(wsize)) * \
((math.log((upper.count('G')) / float(wsize))) / l2)
return -(term_a + term_c + term_t + term_g)
| zjuchenyuan/BioWeb | Lib/Bio/SeqUtils/lcc.py | Python | mit | 5,908 | [
"Biopython"
] | 087fb911b7807ce51bf55ebc09b8b63a53347a7e65d27d870d423a462ad12574 |
"""
Test the convergence of a small diffusion problem discretized with the local
discontinuous Galerkin method. The polynomial order is 5. To utilize the
visualization capabilities, you need to have Paraview and scikits.delaunay
installed.
References
----------
[1] L. N. Olson and J. B. Schroder. Smoothed Aggregation Multigrid Solvers for
High-Order Discontinuous Galerkin Methods. Journal of Computational Physics.
Submitted 2010.
"""
import numpy
import scipy
from pyamg.gallery import load_example
from pyamg import smoothed_aggregation_solver
from convergence_tools import print_cycle_history
if __name__ == '__main__':
print "\nDiffusion problem discretized with p=5 and the local\n" + \
"discontinuous Galerkin method."
# Discontinuous Galerkin Diffusion Problem
data = load_example('local_disc_galerkin_diffusion')
A = data['A'].tocsr()
B = data['B']
elements = data['elements']
vertices = data['vertices']
numpy.random.seed(625)
x0 = scipy.rand(A.shape[0])
b = numpy.zeros_like(x0)
##
# For demonstration, show that a naive SA solver
# yields unsatisfactory convergence
smooth=('jacobi', {'filter' : True})
strength=('symmetric', {'theta' : 0.1})
SA_solve_args={'cycle':'W', 'maxiter':20, 'tol':1e-8, 'accel' : 'cg'}
SA_build_args={'max_levels':10, 'max_coarse':25, 'coarse_solver':'pinv2', \
'symmetry':'hermitian', 'keep':True}
presmoother =('gauss_seidel', {'sweep':'symmetric', 'iterations':1})
postsmoother=('gauss_seidel', {'sweep':'symmetric', 'iterations':1})
##
# Construct solver and solve
sa = smoothed_aggregation_solver(A, B=B, smooth=smooth, \
strength=strength, presmoother=presmoother, \
postsmoother=postsmoother, **SA_build_args)
resvec = []
x = sa.solve(b, x0=x0, residuals=resvec, **SA_solve_args)
print "\n*************************************************************"
print "*************************************************************"
print "Observe that standard SA parameters for this p=5 discontinuous \n" + \
"Galerkin system yield an inefficient solver.\n"
print_cycle_history(resvec, sa, verbose=True, plotting=False)
##
# Now, construct and solve with appropriate parameters
p = 5
Bimprove = [('block_gauss_seidel', {'sweep':'symmetric', 'iterations':p}),
('gauss_seidel', {'sweep':'symmetric', 'iterations':p})]
aggregate = ['naive', 'standard']
# the initial conforming aggregation step requires no prolongation smoothing
smooth=[None, ('energy', {'krylov' : 'cg', 'maxiter' : p})]
strength =[('distance', {'V' : data['vertices'], 'theta':5e-5, 'relative_drop':False}),\
('evolution', {'k':4, 'proj_type':'l2', 'epsilon':2.0})]
sa = smoothed_aggregation_solver(A, B=B, smooth=smooth, Bimprove=Bimprove,\
strength=strength, presmoother=presmoother, aggregate=aggregate,\
postsmoother=postsmoother, **SA_build_args)
resvec = []
x = sa.solve(b, x0=x0, residuals=resvec, **SA_solve_args)
print "\n*************************************************************"
print "*************************************************************"
print "Now use appropriate parameters, especially \'energy\' prolongation\n" + \
"smoothing and a distance based strength measure on level 0. This\n" + \
"yields a much more efficient solver.\n"
print_cycle_history(resvec, sa, verbose=True, plotting=False)
##
# check for scikits and print message about needing to have paraview in order
# to view the visualization files
try:
from scikits import delaunay
except:
print "Must install scikits.delaunay to generate the visualization files (.vtu for paraview)."
# generate visualization files
print "\n\n*************************************************************"
print "*************************************************************"
print "Generating visualization files in .vtu format for use with Paraview."
print "\nAll values from coarse levels are interpolated using the aggregates,\n" +\
"i.e., there is no fixed geometric hierarchy. Additionally, the mesh\n" +\
"has been artificially shrunk towards each element's barycenter, in order\n" +\
"to highlight the discontinuous nature of the discretization.\n"
print "-- Near null-space mode from level * is in the file\n"+\
" DG_Example_B_variable0_lvl*.vtu"
print "-- Aggregtes from level * are in the two file\n"+\
" DG_Example_aggs_lvl*_point-aggs, and \n"+\
" DG_Example_aggs_lvl*_aggs.vtu"
print "-- The mesh from from level * is in the file\n"+\
" DG_Example_mesh_lvl*.vtu"
print "-- The error is in file\n"+\
" DG_Example_error_variable0.vtu"
print ""
from my_vis import shrink_elmts, my_vis
elements2,vertices2 = shrink_elmts(elements, vertices)
my_vis(sa, vertices2, error=x, fname="DG_Example_", E2V=elements2[:,0:3])
| pombreda/pyamg | Examples/Diffusion/demo_local_disc_galerkin_diffusion.py | Python | bsd-3-clause | 5,019 | [
"ParaView"
] | a9d9448c63bb9521ff8fd8b3ee82e54ac19f4fd287558a18446930ebe94ba878 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2009 Søren Roug, European Environment Agency
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
import platform
from setuptools import setup
version = '1.4.1'
if platform.system() in ('Linux','Unix'):
man1pages = [('share/man/man1', [
'csv2ods/csv2ods.1',
'mailodf/mailodf.1',
'odf2xhtml/odf2xhtml.1',
'odf2mht/odf2mht.1',
'odf2xml/odf2xml.1',
'odfimgimport/odfimgimport.1',
'odflint/odflint.1',
'odfmeta/odfmeta.1',
'odfoutline/odfoutline.1',
'odfuserfield/odfuserfield.1',
'xml2odf/xml2odf.1'])]
else:
man1pages = []
# Currently no other data files to add
datafiles = [] + man1pages
setup(name='odfpy',
version=version,
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'License :: OSI Approved :: GNU General Public License (GPL)',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Office/Business',
'Topic :: Software Development :: Libraries :: Python Modules',
],
description='Python API and tools to manipulate OpenDocument files',
long_description = (
"""
Odfpy is a library to read and write OpenDocument v. 1.2 files.
The main focus has been to prevent the programmer from creating invalid
documents. It has checks that raise an exception if the programmer adds
an invalid element, adds an attribute unknown to the grammar, forgets to
add a required attribute or adds text to an element that doesn't allow it.
These checks and the API itself were generated from the RelaxNG
schema, and then hand-edited. Therefore the API is complete and can
handle all ODF constructions.
In addition to the API, there are a few scripts:
- csv2odf - Create OpenDocument spreadsheet from comma separated values
- mailodf - Email ODF file as HTML archive
- odf2xhtml - Convert ODF to (X)HTML
- odf2mht - Convert ODF to HTML archive
- odf2xml - Create OpenDocument XML file from OD? package
- odfimgimport - Import external images
- odflint - Check ODF file for problems
- odfmeta - List or change the metadata of an ODF file
- odfoutline - Show outline of OpenDocument
- odfuserfield - List or change the user-field declarations in an ODF file
- xml2odf - Create OD? package from OpenDocument in XML form
The source code is at https://github.com/eea/odfpy
Visit https://github.com/eea/odfpy/wiki for documentation and examples.
The code at https://joinup.ec.europa.eu/software/odfpy/home is obsolete."""
),
author='Soren Roug',
author_email='soren.roug@eea.europa.eu',
url='https://github.com/eea/odfpy',
packages=['odf'],
scripts=[
'csv2ods/csv2ods',
'mailodf/mailodf',
'odf2xhtml/odf2xhtml',
'odf2mht/odf2mht',
'odf2xml/odf2xml',
'odfimgimport/odfimgimport',
'odflint/odflint',
'odfmeta/odfmeta',
'odfoutline/odfoutline',
'odfuserfield/odfuserfield',
'xml2odf/xml2odf'],
data_files=datafiles,
install_requires=['defusedxml', ]
)
| kawamon/hue | desktop/core/ext-py/odfpy-1.4.1/setup.py | Python | apache-2.0 | 4,535 | [
"VisIt"
] | 6cdac33d204d9b0192916cfb740259345469d3fd2800644a7848bac27c087f28 |
import os, threading, collections
from functools import wraps
from flask import Module, url_for, render_template, request, session, redirect, g, current_app, make_response
from decorators import guest_or_login_required, login_required, with_lock
from collections import defaultdict
from flask.ext.babel import Babel, gettext, ngettext, lazy_gettext
_ = gettext
from sagenb.notebook.interact import INTERACT_UPDATE_PREFIX
from sagenb.notebook.misc import encode_response
ws = Module('sagenb.flask_version.worksheet')
worksheet_locks = defaultdict(threading.Lock)
def worksheet_view(f):
"""
The `username` in the wrapper function is the username in the URL to the worksheet, which normally
is the owner of the worksheet. Don't confuse this with `g.username`, the actual username of the
user looking at the worksheet.
"""
@guest_or_login_required
@wraps(f)
def wrapper(username, id, **kwds):
worksheet_filename = username + "/" + id
try:
worksheet = kwds['worksheet'] = g.notebook.get_worksheet_with_filename(worksheet_filename)
except KeyError:
return _("You do not have permission to access this worksheet")
with worksheet_locks[worksheet]:
owner = worksheet.owner()
if owner != '_sage_' and g.username != owner:
if not worksheet.is_published():
if (not g.username in worksheet.collaborators() and
not g.notebook.user_manager().user_is_admin(g.username)):
return current_app.message(_("You do not have permission to access this worksheet"))
if not worksheet.is_published():
worksheet.set_active(g.username)
#This was in twist.Worksheet.childFactory
from base import notebook_updates
notebook_updates()
return f(username, id, **kwds)
return wrapper
def url_for_worksheet(worksheet):
"""
Returns the url for a given worksheet.
"""
return url_for('worksheet.worksheet', username=worksheet.owner(),
id=worksheet.filename_without_owner())
def get_cell_id():
"""
Returns the cell ID from the request.
We cast the incoming cell ID to an integer, if it's possible.
Otherwise, we treat it as a string.
"""
try:
return int(request.values['id'])
except ValueError:
return request.values['id']
##############################
# Views
##############################
@ws.route('/new_worksheet')
@login_required
def new_worksheet():
if g.notebook.readonly_user(g.username):
return current_app.message(_("Account is in read-only mode"), cont=url_for('worksheet_listing.home', username=g.username))
W = g.notebook.create_new_worksheet(gettext("Untitled"), g.username)
return redirect(url_for_worksheet(W))
@ws.route('/home/<username>/<id>/')
@worksheet_view
def worksheet(username, id, worksheet=None):
"""
username is the owner of the worksheet
id is the id of the worksheet
"""
# /home/pub/* is handled in worksheet_listing.py
assert worksheet is not None
worksheet.sage()
return render_template(os.path.join('html', 'worksheet.html'))
published_commands_allowed = set(['alive', 'cells', 'cell_update',
'data', 'download', 'edit_published_page', 'eval',
'quit_sage', 'rate', 'rating_info', 'new_cell_before',
'new_cell_after', 'introspect', 'delete_all_output',
'copy', 'restart_sage'])
readonly_commands_allowed = set(['alive', 'cells', 'data', 'datafile', 'download', 'quit_sage', 'rating_info', 'delete_all_output'])
def worksheet_command(target, **route_kwds):
if 'methods' not in route_kwds:
route_kwds['methods'] = ['GET', 'POST']
def decorator(f):
@ws.route('/home/<username>/<id>/' + target, **route_kwds)
@worksheet_view
@wraps(f)
def wrapper(*args, **kwds):
#We remove the first two arguments corresponding to the
#username and the worksheet id
username_id = args[:2]
args = args[2:]
#####################
# Public worksheets #
#####################
#_sage_ is used by live docs and published interacts
if username_id and username_id[0] in ['_sage_']:
if target.split('/')[0] not in published_commands_allowed:
raise NotImplementedError("User _sage_ can not access URL %s"%target)
if g.notebook.readonly_user(g.username):
if target.split('/')[0] not in readonly_commands_allowed:
return current_app.message(_("Account is in read-only mode"), cont=url_for('worksheet_listing.home', username=g.username))
#Make worksheet a non-keyword argument appearing before the
#other non-keyword arguments.
worksheet = kwds.pop('worksheet', None)
if worksheet is not None:
args = (worksheet,) + args
return f(*args, **kwds)
#This function shares some functionality with url_for_worksheet.
#Maybe we can refactor this some?
def wc_url_for(worksheet, *args, **kwds):
kwds['username'] = g.username
kwds['id'] = worksheet.filename_without_owner()
return url_for(f.__name__, *args, **kwds)
wrapper.url_for = wc_url_for
return wrapper
return decorator
@worksheet_command('rename')
def worksheet_rename(worksheet):
worksheet.set_name(request.values['name'])
return 'done'
@worksheet_command('alive')
def worksheet_alive(worksheet):
return str(worksheet.state_number())
@worksheet_command('system/<system>')
def worksheet_system(worksheet, system):
worksheet.set_system(system)
return 'success'
@worksheet_command('pretty_print/<enable>')
def worksheet_pretty_print(worksheet, enable):
worksheet.set_pretty_print(enable)
return 'success'
@worksheet_command('conf')
def worksheet_conf(worksheet):
return str(worksheet.conf())
########################################################
# Save a worksheet
########################################################
@worksheet_command('save')
def worksheet_save(worksheet):
"""
Save the contents of a worksheet after editing it in plain-text
edit mode.
"""
if 'button_save' in request.form:
E = request.values['textfield']
worksheet.edit_save(E)
worksheet.record_edit(g.username)
return redirect(url_for_worksheet(worksheet))
@worksheet_command('save_snapshot')
def worksheet_save_snapshot(worksheet):
"""Save a snapshot of a worksheet."""
worksheet.save_snapshot(g.username)
return 'saved'
@worksheet_command('save_and_quit')
def worksheet_save_and_quit(worksheet):
"""Save a snapshot of a worksheet then quit it. """
worksheet.save_snapshot(g.username)
worksheet.quit()
return 'saved'
#XXX: Redundant due to the above?
@worksheet_command('save_and_close')
def worksheet_save_and_close(worksheet):
"""Save a snapshot of a worksheet then quit it. """
worksheet.save_snapshot(g.username)
worksheet.quit()
return 'saved'
@worksheet_command('discard_and_quit')
def worksheet_discard_and_quit(worksheet):
"""Quit the worksheet, discarding any changes."""
worksheet.revert_to_last_saved_state()
worksheet.quit()
return 'saved' #XXX: Should this really be saved?
@worksheet_command('revert_to_last_saved_state')
def worksheet_revert_to_last_saved_state(worksheet):
worksheet.revert_to_last_saved_state()
return 'reverted'
########################################################
# Worksheet properties
########################################################
@worksheet_command('worksheet_properties')
def worksheet_properties(worksheet):
"""
Send worksheet properties as a JSON object
"""
from sagenb.notebook.misc import encode_response
r = worksheet.basic()
if worksheet.has_published_version():
hostname = request.headers.get('host', g.notebook.interface + ':' + str(g.notebook.port))
r['published_url'] = 'http%s://%s/home/%s' % ('' if not g.notebook.secure else 's',
hostname,
worksheet.published_version().filename())
return encode_response(r)
########################################################
# Used in refreshing the cell list
########################################################
@worksheet_command('cell_properties')
def worksheet_cell_properties(worksheet):
"""
Return the cell with the given id as a JSON object
"""
id = get_cell_id()
return encode_response(worksheet.get_cell_with_id(id).basic())
@worksheet_command('cell_list')
def worksheet_cell_list(worksheet):
"""
Return a list of cells in JSON format.
"""
r = {}
r['state_number'] = worksheet.state_number()
r['cell_list'] = [c.basic() for c in worksheet.cell_list()]
return encode_response(r)
########################################################
# Set output type of a cell
########################################################
@worksheet_command('set_cell_output_type')
def worksheet_set_cell_output_type(worksheet):
"""
Set the output type of the cell.
This enables the type of output cell, such as to allowing wrapping
for output that is very long.
"""
id = get_cell_id()
type = request.values['type']
worksheet.get_cell_with_id(id).set_cell_output_type(type)
return ''
########################################################
#Cell creation
########################################################
from sagenb.misc.misc import unicode_str
@worksheet_command('new_cell_before')
def worksheet_new_cell_before(worksheet):
"""Add a new cell before a given cell."""
r = {}
r['id'] = id = get_cell_id()
input = unicode_str(request.values.get('input', ''))
cell = worksheet.new_cell_before(id, input=input)
worksheet.increase_state_number()
r['new_id'] = cell.id()
#r['new_html'] = cell.html(div_wrap=False)
return encode_response(r)
@worksheet_command('new_text_cell_before')
def worksheet_new_text_cell_before(worksheet):
"""Add a new text cell before a given cell."""
r = {}
r['id'] = id = get_cell_id()
input = unicode_str(request.values.get('input', ''))
cell = worksheet.new_text_cell_before(id, input=input)
worksheet.increase_state_number()
r['new_id'] = cell.id()
#r['new_html'] = cell.html(editing=True)
# XXX: Does editing correspond to TinyMCE? If so, we should try
# to centralize that code.
return encode_response(r)
@worksheet_command('new_cell_after')
def worksheet_new_cell_after(worksheet):
"""Add a new cell after a given cell."""
r = {}
r['id'] = id = get_cell_id()
input = unicode_str(request.values.get('input', ''))
cell = worksheet.new_cell_after(id, input=input)
worksheet.increase_state_number()
r['new_id'] = cell.id()
#r['new_html'] = cell.html(div_wrap=True)
return encode_response(r)
@worksheet_command('new_text_cell_after')
def worksheet_new_text_cell_after(worksheet):
"""Add a new text cell after a given cell."""
r = {}
r['id'] = id = get_cell_id()
input = unicode_str(request.values.get('input', ''))
cell = worksheet.new_text_cell_after(id, input=input)
worksheet.increase_state_number()
r['new_id'] = cell.id()
#r['new_html'] = cell.html(editing=True)
# XXX: Does editing correspond to TinyMCE? If so, we should try
# to centralize that code.
return encode_response(r)
########################################################
# Cell deletion
########################################################
@worksheet_command('delete_cell')
def worksheet_delete_cell(worksheet):
"""
Deletes a worksheet cell, unless there's only one compute cell
left. This allows functions which evaluate relative to existing
cells, e.g., inserting a new cell, to continue to work.
"""
r = {}
r['id'] = id = get_cell_id()
if len(worksheet.compute_cell_id_list()) <= 1:
r['command'] = 'ignore'
else:
prev_id = worksheet.delete_cell_with_id(id)
r['command'] = 'delete'
r['prev_id'] = worksheet.delete_cell_with_id(id)
r['cell_id_list'] = worksheet.cell_id_list()
return encode_response(r)
@worksheet_command('delete_cell_output')
def worksheet_delete_cell_output(worksheet):
"""Delete's a cell's output."""
r = {}
r['id'] = id = get_cell_id()
worksheet.get_cell_with_id(id).delete_output()
r['command'] = 'delete_output'
return encode_response(r)
########################################################
# Evaluation and cell update
########################################################
@worksheet_command('eval')
def worksheet_eval(worksheet):
"""
Evaluate a worksheet cell.
If the request is not authorized (the requester did not enter the
correct password for the given worksheet), then the request to
evaluate or introspect the cell is ignored.
If the cell contains either 1 or 2 question marks at the end (not
on a comment line), then this is interpreted as a request for
either introspection to the documentation of the function, or the
documentation of the function and the source code of the function
respectively.
"""
from base import notebook_updates
r = {}
r['id'] = id = get_cell_id()
cell = worksheet.get_cell_with_id(id)
public = worksheet.tags().get('_pub_', [False])[0] #this is set in pub_worksheet
if public and not cell.is_interactive_cell():
r['command'] = 'error'
r['message'] = 'Cannot evaluate non-interactive public cell with ID %r.' % id
return encode_response(r)
worksheet.increase_state_number()
if public:
# Make public input cells read-only.
input_text = cell.input_text()
else:
input_text = unicode_str(request.values.get('input', '')).replace('\r\n', '\n') #DOS
# Handle an updated / recomputed interact. TODO: JSON encode
# the update data.
if 'interact' in request.values:
r['interact'] = 1
input_text = INTERACT_UPDATE_PREFIX
variable = request.values.get('variable', '')
if variable!='':
adapt_number = int(request.values.get('adapt_number', -1))
value = request.values.get('value', '')
input_text += "\n_interact_.update('%s', '%s', %s, _interact_.standard_b64decode('%s'), globals())" % (id, variable, adapt_number, value)
if int(request.values.get('recompute', 0)):
input_text += "\n_interact_.recompute('%s')" % id
cell.set_input_text(input_text)
if int(request.values.get('save_only', '0')):
notebook_updates()
return encode_response(r)
elif int(request.values.get('text_only', '0')):
notebook_updates()
r['cell_html'] = cell.html()
return encode_response(r)
cell.evaluate(username=g.username)
new_cell = int(request.values.get('newcell', 0)) #whether to insert a new cell or not
if new_cell:
new_cell = worksheet.new_cell_after(id)
r['command'] = 'insert_cell'
r['new_cell_id'] = new_cell.id()
r['new_cell_html'] = new_cell.html(div_wrap=False)
else:
r['next_id'] = cell.next_compute_id()
notebook_updates()
return encode_response(r)
@worksheet_command('cell_update')
def worksheet_cell_update(worksheet):
import time
r = {}
r['id'] = id = get_cell_id()
# update the computation one "step".
worksheet.check_comp()
# now get latest status on our cell
r['status'], cell = worksheet.check_cell(id)
if r['status'] == 'd':
r['new_input'] = cell.changed_input_text()
r['output_html'] = cell.output_html()
# Update the log.
t = time.strftime('%Y-%m-%d at %H:%M',
time.localtime(time.time()))
H = "Worksheet '%s' (%s)\n" % (worksheet.name(), t)
H += cell.edit_text(ncols=g.notebook.HISTORY_NCOLS, prompts=False,
max_out=g.notebook.HISTORY_MAX_OUTPUT)
g.notebook.add_to_user_history(H, g.username)
else:
r['new_input'] = ''
r['output_html'] = ''
r['interrupted'] = cell.interrupted()
if 'Unhandled SIGSEGV' in cell.output_text(raw=True).split('\n'):
r['interrupted'] = 'restart'
print 'Segmentation fault detected in output!'
r['output'] = cell.output_text(html=True)
r['output_wrapped'] = cell.output_text(g.notebook.conf()['word_wrap_cols'])
r['introspect_output'] = cell.introspect_output()
# Compute 'em, if we got 'em.
worksheet.start_next_comp()
return encode_response(r)
########################################################
# Cell introspection
########################################################
@worksheet_command('introspect')
def worksheet_introspect(worksheet):
"""
Cell introspection. This is called when the user presses the tab
key in the browser in order to introspect.
"""
r = {}
r['id'] = id = get_cell_id()
if worksheet.tags().get('_pub_', [False])[0]: #tags set in pub_worksheet
r['command'] = 'error'
r['message'] = 'Cannot evaluate public cell introspection.'
return encode_response(r)
before_cursor = request.values.get('before_cursor', '')
after_cursor = request.values.get('after_cursor', '')
cell = worksheet.get_cell_with_id(id)
cell.evaluate(introspect=[before_cursor, after_cursor])
r['command'] = 'introspect'
return encode_response(r)
########################################################
# Edit the entire worksheet
########################################################
@worksheet_command('edit')
def worksheet_edit(worksheet):
"""
Return a window that allows the user to edit the text of the
worksheet with the given filename.
"""
return render_template(os.path.join("html", "worksheet_edit.html"),
worksheet = worksheet,
username = g.username)
########################################################
# Plain text log view of worksheet
########################################################
@worksheet_command('text')
def worksheet_text(worksheet):
"""
Return a window that allows the user to edit the text of the
worksheet with the given filename.
"""
from cgi import escape
plain_text = worksheet.plain_text(prompts=True, banner=False)
plain_text = escape(plain_text).strip()
return render_template(os.path.join("html", "worksheet_text.html"),
username = g.username,
plain_text = plain_text)
########################################################
# Copy a worksheet
########################################################
@worksheet_command('copy')
def worksheet_copy(worksheet):
copy = g.notebook.copy_worksheet(worksheet, g.username)
if 'no_load' in request.values:
return ''
else:
return redirect(url_for_worksheet(copy))
########################################################
# Get a copy of a published worksheet and start editing it
########################################################
@worksheet_command('edit_published_page')
def worksheet_edit_published_page(worksheet):
## if user_type(self.username) == 'guest':
## return current_app.message('You must <a href="/">login first</a> in order to edit this worksheet.')
ws = worksheet.worksheet_that_was_published()
if ws.owner() == g.username:
W = ws
else:
W = g.notebook.copy_worksheet(worksheet, g.username)
W.set_name(worksheet.name())
return redirect(url_for_worksheet(W))
########################################################
# Collaborate with others
########################################################
@worksheet_command('invite_collab')
def worksheet_invite_collab(worksheet):
owner = worksheet.owner()
id_number = worksheet.id_number()
old_collaborators = set(worksheet.collaborators())
collaborators = set([u.strip() for u in request.values.get('collaborators', '').split(',') if u!=owner])
if len(collaborators-old_collaborators)>500:
# to prevent abuse, you can't add more than 500 collaborators at a time
return current_app.message(_("Error: can't add more than 500 collaborators at a time"), cont=url_for_worksheet(worksheet))
worksheet.set_collaborators(collaborators)
user_manager = g.notebook.user_manager()
# add worksheet to new collaborators
for u in collaborators-old_collaborators:
try:
user_manager.user(u).viewable_worksheets().add((owner, id_number))
except KeyError:
# user doesn't exist
pass
# remove worksheet from ex-collaborators
for u in old_collaborators-collaborators:
try:
user_manager.user(u).viewable_worksheets().discard((owner, id_number))
except KeyError:
# user doesn't exist
pass
return ''
########################################################
# Revisions
########################################################
# TODO take out or implement
@worksheet_command('revisions')
def worksheet_revisions(worksheet):
"""
Show a list of revisions of this worksheet.
"""
if 'action' not in request.values:
if 'rev' in request.values:
return g.notebook.html_specific_revision(g.username, worksheet,
request.values['rev'])
else:
return g.notebook.html_worksheet_revision_list(g.username, worksheet)
else:
rev = request.values['rev']
action = request.values['action']
if action == 'revert':
import bz2
worksheet.save_snapshot(g.username)
#XXX: Requires access to filesystem
txt = bz2.decompress(open(worksheet.get_snapshot_text_filename(rev)).read())
worksheet.delete_cells_directory()
worksheet.edit_save(txt)
return redirect(url_for_worksheet(worksheet))
elif action == 'publish':
import bz2
W = g.notebook.publish_worksheet(worksheet, g.username)
txt = bz2.decompress(open(worksheet.get_snapshot_text_filename(rev)).read())
W.delete_cells_directory()
W.edit_save(txt)
return redirect(url_for_worksheet(W))
else:
return current_app.message(_('Error'))
########################################################
# Cell directories
########################################################
@worksheet_command('cells/<path:filename>')
def worksheet_cells(worksheet, filename):
#XXX: This requires that the worker filesystem be accessible from
#the server.
from flask.helpers import send_from_directory
return send_from_directory(worksheet.cells_directory(), filename)
##############################################
# Data
##############################################
@worksheet_command('data/<path:filename>')
def worksheed_data_folder(worksheet, filename):
# preferred way of accessing data
return worksheet_data(worksheet, filename)
def worksheet_data(worksheet, filename):
dir = os.path.abspath(worksheet.data_directory())
if not os.path.exists(dir):
return make_response(_('No data file'), 404)
else:
from flask.helpers import send_from_directory
return send_from_directory(worksheet.data_directory(), filename)
@worksheet_command('delete_datafile')
def worksheet_delete_datafile(worksheet):
dir = os.path.abspath(worksheet.data_directory())
filename = request.values['name']
path = os.path.join(dir, filename)
os.unlink(path)
return ''
@worksheet_command('edit_datafile/<path:filename>')
def worksheet_edit_datafile(worksheet, filename):
ext = os.path.splitext(filename)[1].lower()
file_is_image, file_is_text = False, False
text_file_content = ""
path = "/home/%s/data/%s" % (worksheet.filename(), filename)
if ext in ['.png', '.jpg', '.gif']:
file_is_image = True
if ext in ['.txt', '.tex', '.sage', '.spyx', '.py', '.f', '.f90', '.c']:
file_is_text = True
text_file_content = open(os.path.join(worksheet.data_directory(), filename)).read()
return render_template(os.path.join("html", "datafile_edit.html"),
worksheet = worksheet,
username = g.username,
filename_ = filename,
file_is_image = file_is_image,
file_is_text = file_is_text,
text_file_content = text_file_content,
path = path)
@worksheet_command('save_datafile')
def worksheet_save_datafile(worksheet):
filename = request.values['filename']
if 'button_save' in request.values:
text_field = request.values['textfield']
dest = os.path.join(worksheet.data_directory(), filename) #XXX: Requires access to filesystem
if os.path.exists(dest):
os.unlink(dest)
open(dest, 'w').write(text_field)
print 'saving datafile, redirect'
return redirect(url_for_worksheet(worksheet))
# @worksheet_command('link_datafile')
# def worksheet_link_datafile(worksheet):
# target_worksheet_filename = request.values['target']
# data_filename = request.values['filename']
# src = os.path.abspath(os.path.join(
# worksheet.data_directory(), data_filename))
# target_ws = g.notebook.get_worksheet_with_filename(target_worksheet_filename)
# target = os.path.abspath(os.path.join(
# target_ws.data_directory(), data_filename))
# if target_ws.owner() != g.username and not target_ws.is_collaborator(g.username):
# return current_app.message(_("illegal link attempt!"), worksheet_datafile.url_for(worksheet, name=data_filename))
# if os.path.exists(target):
# return current_app.message(_("The data filename already exists in other worksheet\nDelete the file in the other worksheet before creating a link."), worksheet_datafile.url_for(worksheet, name=data_filename))
# os.link(src,target)
# return redirect(worksheet_datafile.url_for(worksheet, name=data_filename))
# #return redirect(url_for_worksheet(target_ws) + '/datafile?name=%s'%data_filename) #XXX: Can we not hardcode this?
@worksheet_command('upload_datafile')
def worksheet_upload_datafile(worksheet):
from werkzeug.utils import secure_filename
file = request.files['file']
name = request.values.get('name', '').strip() or file.filename
name = secure_filename(name)
#XXX: disk access
dest = os.path.join(worksheet.data_directory(), name)
if os.path.exists(dest):
if not os.path.isfile(dest):
return _('Suspicious filename encountered uploading file.')
os.unlink(dest)
file.save(dest)
return ''
@worksheet_command('datafile_from_url')
def worksheet_datafile_from_url(worksheet):
from werkzeug.utils import secure_filename
name = request.values.get('name', '').strip()
url = request.values.get('url', '').strip()
if url and not name:
name = url.split('/')[-1]
name = secure_filename(name)
import urllib2
from urlparse import urlparse
# we normalize the url by parsing it first
parsedurl = urlparse(url)
if not parsedurl[0] in ('http','https','ftp'):
return _('URL must start with http, https, or ftp.')
download = urllib2.urlopen(parsedurl.geturl())
dest = os.path.join(worksheet.data_directory(), name)
if os.path.exists(dest):
if not os.path.isfile(dest):
return _('Suspicious filename encountered uploading file.')
os.unlink(dest)
import re
matches = re.match("file://(?:localhost)?(/.+)", url)
if matches:
f = file(dest, 'wb')
f.write(open(matches.group(1)).read())
f.close()
return ''
with open(dest, 'w') as f:
f.write(download.read())
return ''
@worksheet_command('new_datafile')
def worksheet_new_datafile(worksheet):
from werkzeug.utils import secure_filename
name = request.values.get('new', '').strip()
name = secure_filename(name)
#XXX: disk access
dest = os.path.join(worksheet.data_directory(), name)
if os.path.exists(dest):
if not os.path.isfile(dest):
return _('Suspicious filename encountered uploading file.')
os.unlink(dest)
open(dest, 'w').close()
return ''
################################
#Publishing
################################
@worksheet_command('publish')
def worksheet_publish(worksheet):
"""
This provides a frontend to the management of worksheet
publication. This management functionality includes
initializational of publication, re-publication, automated
publication when a worksheet saved, and ending of publication.
"""
if 'publish_on' in request.values:
g.notebook.publish_worksheet(worksheet, g.username)
if 'publish_off' in request.values and worksheet.has_published_version():
g.notebook.delete_worksheet(worksheet.published_version().filename())
if 'auto_on' in request.values:
worksheet.set_auto_publish(True)
if 'auto_off' in request.values:
worksheet.set_auto_publish(False)
if 'is_auto' in request.values:
return str(worksheet.is_auto_publish())
if 'republish' in request.values:
g.notebook.publish_worksheet(worksheet, g.username)
return ''
############################################
# Ratings
############################################
# @worksheet_command('rating_info')
# def worksheet_rating_info(worksheet):
# return worksheet.html_ratings_info()
# @worksheet_command('rate')
# def worksheet_rate(worksheet):
# ## if user_type(self.username) == "guest":
# ## return HTMLResponse(stream = message(
# ## 'You must <a href="/">login first</a> in order to rate this worksheet.', ret))
# rating = int(request.values['rating'])
# if rating < 0 or rating >= 5:
# return current_app.messge("Gees -- You can't fool the rating system that easily!",
# url_for_worksheet(worksheet))
# comment = request.values['comment']
# worksheet.rate(rating, comment, g.username)
# s = """
# Thank you for rating the worksheet <b><i>%s</i></b>!
# You can <a href="rating_info">see all ratings of this worksheet.</a>
# """%(worksheet.name())
# #XXX: Hardcoded url
# return current_app.message(s.strip(), '/pub/', title=u'Rating Accepted')
########################################################
# Downloading, moving around, renaming, etc.
########################################################
@worksheet_command('download/<path:title>')
def worksheet_download(worksheet, title):
return unconditional_download(worksheet, title)
def unconditional_download(worksheet, title):
from sagenb.misc.misc import tmp_filename
from flask.helpers import send_file
filename = tmp_filename() + '.sws'
if title.endswith('.sws'):
title = title[:-4]
try:
#XXX: Accessing the hard disk.
g.notebook.export_worksheet(worksheet.filename(), filename, title)
except KeyError:
return current_app.message(_('No such worksheet.'))
from flask.helpers import send_file
return send_file(filename, mimetype='application/sage')
@worksheet_command('restart_sage')
def worksheet_restart_sage(worksheet):
#XXX: TODO -- this must not block long (!)
worksheet.restart_sage()
return 'done'
@worksheet_command('quit_sage')
def worksheet_quit_sage(worksheet):
#XXX: TODO -- this must not block long (!)
worksheet.quit()
return 'done'
@worksheet_command('interrupt')
def worksheet_interrupt(worksheet):
#XXX: TODO -- this must not block long (!)
worksheet.sage().interrupt()
return 'failed' if worksheet.sage().is_computing() else 'success'
@worksheet_command('hide_all')
def worksheet_hide_all(worksheet):
worksheet.hide_all()
return 'success'
@worksheet_command('show_all')
def worksheet_show_all(worksheet):
worksheet.show_all()
return 'success'
@worksheet_command('delete_all_output')
def worksheet_delete_all_output(worksheet):
try:
worksheet.delete_all_output(g.username)
except ValueError:
return 'fail'
else:
return 'success'
@worksheet_command('print')
def worksheet_print(worksheet):
#XXX: We might want to separate the printing template from the
#regular html template.
return g.notebook.html(worksheet.filename(), do_print=True)
#######################################################
# Live "docbrowser" worksheets from HTML documentation
#######################################################
doc_worksheet_number = -1
def doc_worksheet():
global doc_worksheet_number
doc_worksheet_number = doc_worksheet_number % g.notebook.conf()['doc_pool_size']
W = None
for X in g.notebook.users_worksheets('_sage_'):
if X.compute_process_has_been_started():
continue
if X.id_number() == doc_worksheet_number:
W = X
W.clear()
break
if W is None:
# The first argument here is the worksheet's title, which the
# caller should set with W.set_name.
W = g.notebook.create_new_worksheet('', '_sage_')
return W
# def extract_title(html_page):
# #XXX: This might be better as a regex
# h = html_page.lower()
# i = h.find('<title>')
# if i == -1:
# return gettext("Untitled")
# j = h.find('</title>')
# return html_page[i + len('<title>') : j]
# @login_required
# def worksheet_file(path):
# # Create a live Sage worksheet from the given path.
# if not os.path.exists(path):
# return current_app.message(_('Document does not exist.'))
# doc_page_html = open(path).read()
# from sagenb.notebook.docHTMLProcessor import SphinxHTMLProcessor
# doc_page = SphinxHTMLProcessor().process_doc_html(doc_page_html)
# title = (extract_title(doc_page_html).replace('—', '--') or
# 'Live Sage Documentation')
# W = doc_worksheet()
# W.edit_save(doc_page)
# W.set_system('sage')
# W.set_name(title)
# W.save()
# W.quit()
# # FIXME: For some reason, an extra cell gets added so we
# # remove it here.
# W.cell_list().pop()
# # TODO
# return g.notebook.html(worksheet_filename=W.filename(),
# username=g.username)
####################
# Public Worksheets
####################
# def pub_worksheet(source):
# # TODO: Independent pub pool and server settings.
# proxy = doc_worksheet()
# proxy.set_name(source.name())
# proxy.set_last_change(*source.last_change())
# proxy.set_worksheet_that_was_published(source.worksheet_that_was_published())
# g.notebook._initialize_worksheet(source, proxy)
# proxy.set_tags({'_pub_': [True]})
# proxy.save()
# return proxy
#######################################################
# Jmol Popup
#######################################################
@ws.route('/home/<username>/<id>/jmol_popup.html', methods=['GET'])
@login_required
def jmol_popup(username, id):
return render_template(os.path.join('html', 'jmol_popup.html')) | macieksk/sagenb | sagenb/flask_version/worksheet.py | Python | gpl-3.0 | 35,813 | [
"Jmol"
] | 285c6d559630f41e0f346f5ed6e3e08f5354323be2301478333c14fa1223230f |
import numpy as np
import GPy
from GPy.core import Model
from GPy.core.parameterization import variational
from GPy.util.linalg import tdot, jitchol, dtrtrs, dtrtri
class InferenceX(Model):
"""
The model class for inference of new X with given new Y. (replacing the "do_test_latent" in Bayesian GPLVM)
It is a tiny inference model created from the original GP model. The kernel, likelihood (only Gaussian is supported at the moment)
and posterior distribution are taken from the original model.
For Regression models and GPLVM, a point estimate of the latent variable X will be inferred.
For Bayesian GPLVM, the variational posterior of X will be inferred.
X is inferred through a gradient optimization of the inference model.
:param model: the GPy model used in inference
:type model: GPy.core.Model
:param Y: the new observed data for inference
:type Y: numpy.ndarray
:param init: the distance metric of Y for initializing X with the nearest neighbour.
:type init: 'L2', 'NCC' and 'rand'
"""
def __init__(self, model, Y, name='inferenceX', init='L2'):
if np.isnan(Y).any() or getattr(model, 'missing_data', False):
assert Y.shape[0]==1, "The current implementation of inference X only support one data point at a time with missing data!"
self.missing_data = True
self.valid_dim = np.logical_not(np.isnan(Y[0]))
self.ninan = getattr(model, 'ninan', None)
else:
self.missing_data = False
super(InferenceX, self).__init__(name)
self.likelihood = model.likelihood.copy()
self.kern = model.kern.copy()
from copy import deepcopy
self.posterior = deepcopy(model.posterior)
self.uncertain_input = False
# if isinstance(model.X, variational.VariationalPosterior):
# self.uncertain_input = True
# else:
# self.uncertain_input = False
if hasattr(model, 'Z'):
self.sparse_gp = True
self.Z = model.Z.copy()
else:
self.sparse_gp = False
self.uncertain_input = False
self.Z = model.X.copy()
self.Y = Y
self.X = self._init_X(model, Y, init=init)
self.compute_dL()
self.link_parameter(self.X)
def _init_X(self, model, Y_new, init='L2'):
# Initialize the new X by finding the nearest point in Y space.
Y = model.Y
if self.missing_data:
Y = Y[:,self.valid_dim]
Y_new = Y_new[:,self.valid_dim]
dist = -2.*Y_new.dot(Y.T) + np.square(Y_new).sum(axis=1)[:,None]+ np.square(Y).sum(axis=1)[None,:]
else:
if init=='L2':
dist = -2.*Y_new.dot(Y.T) + np.square(Y_new).sum(axis=1)[:,None]+ np.square(Y).sum(axis=1)[None,:]
elif init=='NCC':
dist = Y_new.dot(Y.T)
elif init=='rand':
dist = np.random.rand(Y_new.shape[0],Y.shape[0])
idx = dist.argmin(axis=1)
from GPy.core import Param
if isinstance(model.X, variational.VariationalPosterior):
X = Param('latent mean',model.X.mean.values[idx].copy())
X.set_prior(GPy.core.parameterization.priors.Gaussian(0.,1.), warning=False)
else:
X = Param('latent mean',(model.X[idx].values).copy())
return X
def compute_dL(self):
# Common computation
beta = 1./np.fmax(self.likelihood.variance, 1e-6)
output_dim = self.Y.shape[-1]
wv = self.posterior.woodbury_vector
if self.missing_data:
wv = wv[:,self.valid_dim]
output_dim = self.valid_dim.sum()
if self.ninan is not None:
self.dL_dpsi2 = beta/2.*(self.posterior.woodbury_inv[:,:,self.valid_dim] - tdot(wv)[:, :, None]).sum(-1)
else:
self.dL_dpsi2 = beta/2.*(output_dim*self.posterior.woodbury_inv - tdot(wv))
self.dL_dpsi1 = beta*np.dot(self.Y[:,self.valid_dim], wv.T)
self.dL_dpsi0 = - beta/2.* np.ones(self.Y.shape[0])
else:
self.dL_dpsi2 = beta*(output_dim*self.posterior.woodbury_inv - tdot(wv))/2. #np.einsum('md,od->mo',wv, wv)
self.dL_dpsi1 = beta*np.dot(self.Y, wv.T)
self.dL_dpsi0 = -beta/2.*output_dim* np.ones(self.Y.shape[0])
def parameters_changed(self):
N, D = self.Y.shape
Kss = self.kern.K(self.X)
Ksu = self.kern.K(self.X, self.Z)
wv = self.posterior.woodbury_vector
wi = self.posterior.woodbury_inv
a = self.Y - Ksu.dot(wv)
C = Kss + np.eye(N)*self.likelihood.variance - Ksu.dot(wi).dot(Ksu.T)
Lc = jitchol(C)
LcInva = dtrtrs(Lc, a)[0]
LcInv = dtrtri(Lc)
CInva = dtrtrs(Lc, LcInva,trans=1)[0]
self._log_marginal_likelihood = -N*D/2.*np.log(2*np.pi) - D*np.log(np.diag(Lc)).sum() - np.square(LcInva).sum()/2.
dKsu = CInva.dot(wv.T)
dKss = tdot(CInva)/2. -D* tdot(LcInv.T)/2.
dKsu += -2. * dKss.dot(Ksu).dot(wi)
X_grad = self.kern.gradients_X(dKss, self.X)
X_grad += self.kern.gradients_X(dKsu, self.X, self.Z)
self.X.gradient = X_grad
if self.uncertain_input:
# Update Log-likelihood
KL_div = self.variational_prior.KL_divergence(self.X)
# update for the KL divergence
self.variational_prior.update_gradients_KL(self.X)
self._log_marginal_likelihood += -KL_div
def log_likelihood(self):
return self._log_marginal_likelihood
| zhenwendai/DeepGP | deepgp/inference/inferenceX.py | Python | bsd-3-clause | 5,652 | [
"Gaussian"
] | 87641dc7b4c704dce1d1c0236586aa93a397f7e52261a8e735c24d7f84e089e1 |
import unittest
from unittest import TestCase
from flask import url_for, session
from app import app, users
from activity import Activity
from user import User
from bucketlist import BucketList
class BucketListTest(TestCase):
def setUp(self):
app.config['SECRET_KEY'] = 'seasasaskrit!'
# creates a test client
self.client = app.test_client()
self.client.testing = True
def test_success(self):
# sends HTTP GET request to the application
# on the specified path
result = self.client.get('/login')
self.assertEqual(result.status_code, 200)
def test_failure(self):
# sends HTTP GET request to the application
# on the specified path
result = self.client.get('/nonexistant.html')
self.assertEqual(result.status_code, 404)
def test_login_page_loads(self):
# assert login page loads correctly
result = self.client.get('/login')
self.assertTrue(b'The best way to keep track of your dreams and goals' in result.data)
# def test_logout_redirects_user(self):
# user = User('hermano', 'herm@email.com', 'hard')
# users['herm@email.com'] = user
# self.client.post('login', data={
# 'username': 'hermano',
# 'password': 'hard'
# })
# # assert login page loads correctly
# result = self.client.get('/logout')
# self.assertTrue(result.status_code == 302)
def test_signup_page_posts_and_redirects(self):
result = self.client.post('signup', data={
'username': 'hermano',
'email': 'herm@email.com',
'password': 'hard',
'confirm_password': 'hard'
})
self.assertTrue(result.status_code == 302)
def test_signup_redirects_to_add_bucketlist(self):
result = self.client.post('signup', data={
'username': 'hermano',
'email': 'herm@email.com',
'password': 'hard',
'confirm_password': 'hard'
}, follow_redirects = True)
self.assertIn(b'My Bucket Lists', result.data)
# def test_login_page_posts_and_redirects(self):
# user = User('hermano', 'herm@email.com', 'hard')
# users['herm@email.com'] = user
# result = self.client.post('login', data={
# 'username': 'hermano',
# 'password': 'hard'
# })
# self.assertTrue(result.status_code == 302)
def test_successful_login_redirects_to_managelists(self):
user = User('hermano', 'herm@email.com', 'hard')
users['herm@email.com'] = user
result = self.client.post('login', data={
'username': 'hermano',
'password': 'hard'
}, follow_redirects = True)
self.assertIn(b'My Bucket Lists', result.data)
def test_add_bucketlist_successfully_to_user(self):
user = User('hermano', 'herm@email.com', 'hard')
users['herm@email.com'] = user
initial_no_of_bucketlists = len(user.bucketlists)
bktlist = BucketList('Recipes', 'Learn to cook different')
user.bucketlists['Recipes'] = bktlist
self.assertEqual(len(user.bucketlists) - initial_no_of_bucketlists, 1)
def test_add_activity_successfully_to_bucketlist(self):
bucketlist = BucketList('Travels', 'Tour Africa')
activity = Activity('Egypt', 'Visit the Pyramids')
initial_no_of_activities = len(bucketlist.activities)
bucketlist.add_activity(activity)
self.assertEqual(len(bucketlist.activities) - initial_no_of_activities, 1)
def test_user_has_property_bucketlists(self):
user = User('hermano', 'herm@email.com', 'hard')
users['herm@email.com'] = user
self.assertTrue(hasattr(user, 'bucketlists'))
def test_bucket_list_is_instance_of_BucketList(self):
bktlist = BucketList('Recipes', 'Learn to cook different')
self.assertEqual(isinstance(bktlist, BucketList), True)
if __name__ == '__main__':
unittest.main()
| mkiterian/bucket-list-app | test_app.py | Python | mit | 4,103 | [
"VisIt"
] | af308d801d852f4b79291a63edec90ec331dde6da08117a7c3321b852943b05e |
# Blender rock creation tool
#
# Based on BlenderGuru's asteroid tutorial and personal experimentation.
# Tutorial: http://www.blenderguru.com/how-to-make-a-realistic-asteroid/
# Update with another tutorial shared by "rusted" of BlenderArtists:
# Tutorial: http://saschahenrichs.blogspot.com/2010/03/3dsmax-environment-modeling-1.html
#
# Uses the NumPy Gaussian random number generator to generate a
# a rock within a given range and give some randomness to the displacement
# texture values. NumPy's gaussian generator was chosen as, based on
# profiling I performed, it runs in about half the time as the built in
# Python gaussian equivalent. I would like to shift the script to use the
# NumPy beta distribution as it ran in about half the time as the NumPy
# gaussian once the skew calculations are added.
#
# Set lower and upper bounds to the same for no randomness.
#
# Tasks:
# Generate meshes with random scaling between given values.
# - Allow for a skewed distribution
# *** Completed on 4/17/2011 ***
# - Create a set of meshes that can be used
# Give the user the ability to set the subsurf level (detail level)
# *** Completed on 4/29/2011 ***
# - Set subsurf modifiers to default at view:3, render:3.
# *** Completed on 4/17/2011 ***
# - Set crease values to allow for hard edges on first subsurf.
# *** Completed on 4/29/2011 ***
# Be able to generate and add a texture to the displacement modifiers.
# *** Completed 5/17/2011 ***
# - Generate three displacement modifiers.
# - The first only uses a Musgrave for initial intentations.
# *** Now generating four displacement modifiers ***
# *** Completed on 5/17/2011 ***
# - Set a randomness for the type and values of the displacement texture.
# *** Completed 5/9/2011 ***
# - Allow the user to set a value for the range of displacement.
# -> Modification: have user set "roughness" and "roughness range".
# *** Compleded on 4/23/2011 ***
# Set material settings and assign material textures
# *** Completed 6/9/2011 ***
# - Mossiness of the rocks.
# *** Completed 6/9/2011 ***
# - Color of the rocks.
# *** Completed 5/16/2011 ***
# - Wetness/shinyness of the rock.
# *** Completed 5/6/2011 ***
# - For all the user provides a mean value for a skewed distribution.
# *** Removed to lessen usage complexity ***
# Add some presets (mesh) to make it easier to use
# - Examples: river rock, asteroid, quaried rock, etc
# *** Completed 7/12/2011 ***
#
# Code Optimization:
# Remove all "bpy.ops" operations with "bpy.data" base operations.
# Remove material/texture cataloging with building a list of
# returned values from bpy.data.*.new() operations.
# *** Completed on 9/6/2011 ***
# Search for places where list comprehensions can be used.
# Look for alternate methods
# - Possible alternate and more efficient data structures
# - Possible alternate algorithms may realize greater performance
# - Look again at multi-processing. Without bpy.ops is might
# be viable.
#
# Future tasks:
# Multi-thread the script
# *** Will not be implemented. Multi-processing is adding to much
# overhead to realize a performance increase ***
# - Learn basic multi-threading in Python (multiprocessing)
# - Break material generation into separate threads (processes)
# - Break mesh generation into separate threads (processes)
# - Move name generation, texture ID generation, etc to process first
# - Roll version to 2.0 on completion
#
# Paul "BrikBot" Marshall
# Created: April 17, 2011
# Last Modified: November 17, 2011
# Homepage (blog): http://post.darkarsenic.com/
# //blog.darkarsenic.com/
# Thanks to Meta-Androco, RickyBlender, Ace Dragon, and PKHG for ideas
# and testing.
#
# Coded in IDLE, tested in Blender 2.59. NumPy Recommended.
# Search for "@todo" to quickly find sections that need work.
#
# Remeber -
# Functional code comes before fast code. Once it works, then worry about
# making it faster/more efficient.
#
# ##### BEGIN GPL LICENSE BLOCK #####
#
# The Blender Rock Creation tool is for rapid generation of mesh rocks.
# Copyright (C) 2011 Paul Marshall
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
import math
import time
from add_mesh_rocks import (settings,
utils)
from bpy_extras import object_utils
from mathutils import (Color,
Vector)
from bpy.props import (BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
EnumProperty)
# This try block allows for the script to psudo-intelligently select the
# appropriate random to use. If Numpy's random is present it will use that.
# If Numpy's random is not present, it will through a "module not found"
# exception and instead use the slower built-in random that Python has.
try:
from numpy.random import random_integers as randint
from numpy.random import normal as gauss
from numpy.random import (beta,
uniform,
seed,
weibull)
print("Rock Generator: Numpy found.")
numpy = True
except:
from random import (randint,
gauss,
uniform,
seed)
from random import betavariate as beta
from random import weibullvariate as weibull
print("Rock Generator: Numpy not found. Using Python's random.")
numpy = False
# Global variables:
lastRock = 0
# Creates a new mesh:
#
# param: verts - Vector of vertices for the mesh.
# edges - Edges for the mesh. Can be "[]".
# faces - Face tuples corresponding to vertices.
# name - Name of the mesh.
def createMeshObject(context, verts, edges, faces, name):
# Create new mesh
mesh = bpy.data.meshes.new(name)
# Make a mesh from a list of verts/edges/faces.
mesh.from_pydata(verts, edges, faces)
# Set mesh to use auto smoothing:
mesh.use_auto_smooth = True
# Update mesh geometry after adding stuff.
mesh.update()
return object_utils.object_data_add(context, mesh, operator=None)
# Set the values for a texture from parameters.
#
# param: texture - bpy.data.texture to modify.
# level - designated tweaked settings to use
# -> Below 10 is a displacment texture
# -> Between 10 and 20 is a base material texture
def randomizeTexture(texture, level=1):
noises = ['BLENDER_ORIGINAL', 'ORIGINAL_PERLIN', 'IMPROVED_PERLIN',
'VORONOI_F1', 'VORONOI_F2', 'VORONOI_F3', 'VORONOI_F4',
'VORONOI_F2_F1', 'VORONOI_CRACKLE']
if texture.type == 'CLOUDS':
if randint(0, 1) == 0:
texture.noise_type = 'SOFT_NOISE'
else:
texture.noise_type = 'HARD_NOISE'
if level != 11:
tempInt = randint(0, 6)
else:
tempInt = randint(0, 8)
texture.noise_basis = noises[tempInt]
texture.noise_depth = 8
if level == 0:
texture.noise_scale = gauss(0.625, 1 / 24)
elif level == 2:
texture.noise_scale = 0.15
elif level == 11:
texture.noise_scale = gauss(0.5, 1 / 24)
if texture.noise_basis in ['BLENDER_ORIGINAL', 'ORIGINAL_PERLIN',
'IMPROVED_PERLIN', 'VORONOI_F1']:
texture.intensity = gauss(1, 1 / 6)
texture.contrast = gauss(4, 1 / 3)
elif texture.noise_basis in ['VORONOI_F2', 'VORONOI_F3', 'VORONOI_F4']:
texture.intensity = gauss(0.25, 1 / 12)
texture.contrast = gauss(2, 1 / 6)
elif texture.noise_basis == 'VORONOI_F2_F1':
texture.intensity = gauss(0.5, 1 / 6)
texture.contrast = gauss(2, 1 / 6)
elif texture.noise_basis == 'VORONOI_CRACKLE':
texture.intensity = gauss(0.5, 1 / 6)
texture.contrast = gauss(2, 1 / 6)
elif texture.type == 'MUSGRAVE':
musgraveType = ['MULTIFRACTAL', 'RIDGED_MULTIFRACTAL',
'HYBRID_MULTIFRACTAL', 'FBM', 'HETERO_TERRAIN']
texture.musgrave_type = 'MULTIFRACTAL'
texture.dimension_max = abs(gauss(0, 0.6)) + 0.2
texture.lacunarity = beta(3, 8) * 8.2 + 1.8
if level == 0:
texture.noise_scale = gauss(0.625, 1 / 24)
texture.noise_intensity = 0.2
texture.octaves = 1.0
elif level == 2:
texture.intensity = gauss(1, 1 / 6)
texture.contrast = 0.2
texture.noise_scale = 0.15
texture.octaves = 8.0
elif level == 10:
texture.intensity = gauss(0.25, 1 / 12)
texture.contrast = gauss(1.5, 1 / 6)
texture.noise_scale = 0.5
texture.octaves = 8.0
elif level == 12:
texture.octaves = uniform(1, 3)
elif level > 12:
texture.octaves = uniform(2, 8)
else:
texture.intensity = gauss(1, 1 / 6)
texture.contrast = 0.2
texture.octaves = 8.0
elif texture.type == 'DISTORTED_NOISE':
tempInt = randint(0, 8)
texture.noise_distortion = noises[tempInt]
tempInt = randint(0, 8)
texture.noise_basis = noises[tempInt]
texture.distortion = skewedGauss(2.0, 2.6666, (0.0, 10.0), False)
if level == 0:
texture.noise_scale = gauss(0.625, 1 / 24)
elif level == 2:
texture.noise_scale = 0.15
elif level >= 12:
texture.noise_scale = gauss(0.2, 1 / 48)
elif texture.type == 'STUCCI':
stucciTypes = ['PLASTIC', 'WALL_IN', 'WALL_OUT']
if randint(0, 1) == 0:
texture.noise_type = 'SOFT_NOISE'
else:
texture.noise_type = 'HARD_NOISE'
tempInt = randint(0, 2)
texture.stucci_type = stucciTypes[tempInt]
if level == 0:
tempInt = randint(0, 6)
texture.noise_basis = noises[tempInt]
texture.noise_scale = gauss(0.625, 1 / 24)
elif level == 2:
tempInt = randint(0, 6)
texture.noise_basis = noises[tempInt]
texture.noise_scale = 0.15
elif level >= 12:
tempInt = randint(0, 6)
texture.noise_basis = noises[tempInt]
texture.noise_scale = gauss(0.2, 1 / 30)
else:
tempInt = randint(0, 6)
texture.noise_basis = noises[tempInt]
elif texture.type == 'VORONOI':
metrics = ['DISTANCE', 'DISTANCE_SQUARED', 'MANHATTAN', 'CHEBYCHEV',
'MINKOVSKY_HALF', 'MINKOVSKY_FOUR', 'MINKOVSKY']
# Settings for first dispalcement level:
if level == 0:
tempInt = randint(0, 1)
texture.distance_metric = metrics[tempInt]
texture.noise_scale = gauss(0.625, 1 / 24)
texture.contrast = 0.5
texture.intensity = 0.7
elif level == 2:
texture.noise_scale = 0.15
tempInt = randint(0, 6)
texture.distance_metric = metrics[tempInt]
elif level >= 12:
tempInt = randint(0, 1)
texture.distance_metric = metrics[tempInt]
texture.noise_scale = gauss(0.125, 1 / 48)
texture.contrast = 0.5
texture.intensity = 0.7
else:
tempInt = randint(0, 6)
texture.distance_metric = metrics[tempInt]
return
# Randomizes the given material given base values.
#
# param: Material to randomize
def randomizeMaterial(material, color, dif_int, rough, spec_int, spec_hard,
use_trans, alpha, cloudy, mat_IOR, mossiness, spec_IOR):
skew = False
stddev = 0.0
lastUsedTex = 1
numTex = 6
baseColor = []
# Diffuse settings:
material.diffuse_shader = 'OREN_NAYAR'
if 0.5 > dif_int:
stddev = dif_int / 3
skew = False
else:
stddev = (1 - dif_int) / 3
skew = True
material.diffuse_intensity = skewedGauss(dif_int, stddev, (0.0, 1.0), skew)
if 1.57 > rough:
stddev = rough / 3
skew = False
else:
stddev = (3.14 - rough) / 3
skew = True
material.roughness = skewedGauss(rough, stddev, (0.0, 3.14), skew)
for i in range(3):
if color[i] > 0.9 or color[i] < 0.1:
baseColor.append(skewedGauss(color[i], color[i] / 30,
(0, 1), color[i] > 0.9))
else:
baseColor.append(gauss(color[i], color[i] / 30))
material.diffuse_color = baseColor
# Specular settings:
material.specular_shader = 'BLINN'
if 0.5 > spec_int:
variance = spec_int / 3
skew = False
else:
variance = (1 - spec_int) / 3
skew = True
material.specular_intensity = skewedGauss(spec_int, stddev,
(0.0, 1.0), skew)
if 256 > spec_hard:
variance = (spec_hard - 1) / 3
skew = False
else:
variance = (511 - spec_hard) / 3
skew = True
material.specular_hardness = int(round(skewedGauss(spec_hard, stddev,
(1.0, 511.0), skew)))
if 5.0 > spec_IOR:
variance = spec_IOR / 3
skew = False
else:
variance = (10.0 - spec_IOR) / 3
skew = True
material.specular_ior = skewedGauss(spec_IOR, stddev, (0.0, 10.0), skew)
# Raytrans settings:
# *** Added on 11/17/2011 ***
material.use_transparency = use_trans
if use_trans:
trans = material.raytrace_transparency
# Fixed values:
material.transparency_method = 'RAYTRACE'
trans.depth = 24
trans.gloss_samples = 32
trans.falloff = 1.0
# Needs randomization:
material.alpha = -gauss(alpha, 0.05) + 1;
trans.gloss_factor = -gauss(cloudy, 0.05) + 1
trans.filter = gauss(cloudy, 0.1)
trans.ior = skewedGauss(mat_IOR, 0.01, [0.25, 4.0], mat_IOR > 2.125)
#Misc. settings:
material.use_transparent_shadows = True
# Rock textures:
# Now using slot.texture for texture access instead of
# bpy.data.textures[newTex[<index>]]
# *** Completed on 9/6/2011 ***
# Create the four new textures:
textureTypes = ['MUSGRAVE', 'CLOUDS', 'DISTORTED_NOISE',
'STUCCI', 'VORONOI']
for i in range(numTex):
texColor = []
# Set the active material slot:
material.active_texture_index = i
# Assign a texture to the active material slot:
material.active_texture = bpy.data.textures.new(name = 'stone_tex',
type = 'NONE')
# Store the slot to easy coding access:
slot = material.texture_slots[i]
# If the texture is not a moss texture:
if i > 1:
slot.texture.type = textureTypes[randint(0, 3)]
# Set the texture's color (RGB):
for j in range(3):
if color[j] > 0.9 or color[j] < 0.1:
texColor.append(skewedGauss(color[j], color[j] / 30,
(0, 1), color[j] > 0.9))
else:
texColor.append(gauss(color[j], color[j] / 30))
slot.color = texColor
# Randomize the value (HSV):
v = material.diffuse_color.v
if v == 0.5:
slot.color.v = gauss(v, v / 3)
elif v > 0.5:
slot.color.v = skewedGauss(v, v / 3, (0, 1), True)
else:
slot.color.v = skewedGauss(v, (1 - v) / 3, (0, 1), False)
# Adjust scale and normal based on texture type:
if slot.texture.type == 'VORONOI':
slot.scale = (gauss(5, 1), gauss(5, 1), gauss(5, 1))
slot.normal_factor = gauss(rough / 10, rough / 30)
elif slot.texture.type == 'STUCCI':
slot.scale = (gauss(1.5, 0.25), gauss(1.5, 0.25),
gauss(1.5, 0.25))
slot.normal_factor = gauss(rough / 10, rough / 30)
elif slot.texture.type == 'DISTORTED_NOISE':
slot.scale = (gauss(1.5, 0.25), gauss(1.5, 0.25),
gauss(1.5, 0.25))
slot.normal_factor = gauss(rough / 10, rough / 30)
elif slot.texture.type == 'MUSGRAVE':
slot.scale = (gauss(1.5, 0.25), gauss(1.5, 0.25),
gauss(1.5, 0.25))
slot.normal_factor = gauss(rough, rough / 3)
elif slot.texture.type == 'CLOUDS':
slot.scale = (gauss(1.5, 0.25), gauss(1.5, 0.25),
gauss(1.5, 0.25))
slot.normal_factor = gauss(rough, rough / 3)
# Set the color influence to 0.5.
# This allows for the moss textures to show:
slot.diffuse_color_factor = 0.5
# Set additional influence booleans:
slot.use_stencil = True
slot.use_map_specular = True
slot.use_map_color_spec = True
slot.use_map_hardness = True
slot.use_map_normal = True
# The following is for setting up the moss textures:
else:
slot.texture.type = textureTypes[i]
# Set the mosses color (RGB):
texColor.append(gauss(0.5, 1 / 6))
texColor.append(1)
texColor.append(0)
slot.color = texColor
# Randomize the value (HSV):
slot.color.v = gauss(0.275, 1 / 24)
# Scale the texture size:
slot.scale = (gauss(1.5, 0.25),
gauss(1.5, 0.25),
gauss(1.5, 0.25))
# Set the strength of the moss color:
slot.diffuse_color_factor = mossiness
# Have it influence spec and hardness:
slot.use_map_specular = True
slot.use_map_color_spec = True
slot.use_map_hardness = True
# If the texutre is a voronoi crackle clouds, use "Negative":
if slot.texture.type == 'CLOUDS':
if slot.texture.noise_basis == 'VORONOI_CRACKLE':
slot.invert = True
if mossiness == 0:
slot.use = False
randomizeTexture(slot.texture, 10 + i)
return
# Generates an object based on one of several different mesh types.
# All meshes have exactly eight vertices, and may be built from either
# tri's or quads.
#
# param: muX - mean X offset value
# sigmaX - X offset standard deviation
# scaleX - X upper and lower bounds
# upperSkewX - Is the distribution upperskewed?
# muY - mean Y offset value
# sigmaY - Y offset standard deviation
# scaleY - Y upper and lower bounds
# upperSkewY - Is the distribution upperskewed?
# muZ - mean Z offset value
# sigmaZ - Z offset standard deviation
# scaleZ - Z upper and lower bounds
# upperSkewY - Is the distribution upperskewed?
# base - base number on the end of the object name
# shift - Addition to the base number for multiple runs.
# scaleDisplace - Scale the displacement maps
#
# return: name - the built name of the object
def generateObject(context, muX, sigmaX, scaleX, upperSkewX, muY, sigmaY,
scaleY, upperSkewY, muZ, sigmaZ, scaleZ, upperSkewZ, base,
shift, scaleDisplace, scale_fac):
x = []
y = []
z = []
shape = randint(0, 11)
# Cube
# Use parameters to re-scale cube:
# Reversed if/for nesting. Should be a little faster.
if shape == 0:
for j in range(8):
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 1:
for j in range(8):
if j in [0, 1, 3, 4]:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j in [2, 5]:
if sigmaX == 0:
x.append(0)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 4)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j in [6, 7]:
if sigmaX == 0:
x.append(0)
else:
x.append(skewedGauss(0, sigmaX, scaleX, upperSkewX) / 4)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 4)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 2:
for j in range(8):
if j in [0, 2, 5, 7]:
if sigmaX == 0:
x.append(scaleX[0] / 4)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 4)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 4)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 4)
elif j in [1, 3, 4, 6]:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 3:
for j in range(8):
if j > 0:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
else:
if sigmaX == 0:
x.append(0)
else:
x.append(skewedGauss(0, sigmaX, scaleX, upperSkewX) / 8)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 8)
if sigmaZ == 0:
z.append(0)
else:
z.append(skewedGauss(0, sigmaZ, scaleZ, upperSkewZ) / 8)
elif shape == 4:
for j in range(10):
if j in [0, 9]:
if sigmaX == 0:
x.append(0)
else:
x.append(skewedGauss(0, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j in [1, 2, 3, 4]:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j in [5, 7]:
if sigmaX == 0:
x.append(0)
else:
x.append(skewedGauss(0, sigmaX, scaleX, upperSkewX) / 3)
if sigmaY == 0:
y.append(scaleY[0] / 3)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 3)
if sigmaZ == 0:
z.append(0)
else:
z.append(skewedGauss(0, sigmaZ, scaleZ, upperSkewZ) / 6)
elif j in [6, 8]:
if sigmaX == 0:
x.append(scaleX[0] / 3)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 3)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 3)
if sigmaZ == 0:
z.append(0)
else:
z.append(skewedGauss(0, sigmaZ, scaleZ, upperSkewZ) / 6)
elif shape == 5:
for j in range(10):
if j == 0:
if sigmaX == 0:
x.append(0)
else:
x.append(skewedGauss(0, sigmaX, scaleX, upperSkewX) / 8)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 8)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j in [1, 2]:
if sigmaX == 0:
x.append(scaleZ[0] * .125)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) * 0.125)
if sigmaY == 0:
y.append(scaleZ[0] * 0.2165)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) * 0.2165)
if sigmaZ == 0:
z.append(0)
else:
z.append(skewedGauss(0, sigmaZ, scaleZ, upperSkewZ) / 4)
elif j == 3:
if sigmaX == 0:
x.append(scaleX[0] / 4)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 4)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 4)
if sigmaZ == 0:
z.append(0)
else:
z.append(skewedGauss(0, sigmaZ, scaleZ, upperSkewZ) / 4)
elif j in [4, 6]:
if sigmaX == 0:
x.append(scaleX[0] * 0.25)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) * 0.25)
if sigmaY == 0:
y.append(scaleY[0] * 0.433)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) * 0.433)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j == 5:
if sigmaX == 0:
x.append(scaleX[0] / 4)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 4)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j in [7, 9]:
if sigmaX == 0:
x.append(scaleX[0] * 0.10825)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) * 0.10825)
if sigmaY == 0:
y.append(scaleY[0] * 0.2165)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) * 0.2165)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j == 8:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 4)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 6:
for j in range(7):
if j > 0:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
else:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 7:
for j in range(10):
if j in [1, 3, 4, 5, 8, 9]:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
else:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 8:
for j in range(7):
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 9:
for j in range(8):
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 10:
for j in range(7):
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 11:
for j in range(7):
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
# This is for scaling the displacement textures.
# Scale the vertices so that their average is equal to 1 * scale factor.
if scaleDisplace:
averageX = (sum(x) / len(x)) * scale_fac[0]
for i in range(len(x)):
x[i] /= averageX
averageY = (sum(y) / len(y)) * scale_fac[1]
for i in range(len(y)):
y[i] /= averageY
averageZ = (sum(z) / len(z)) * scale_fac[2]
for i in range(len(z)):
z[i] /= averageZ
# Build vertex and face arrays:
if shape == 1:
verts = [(-x[0],-y[0],-z[0]),(x[1],-y[1],-z[1]),(x[2],-y[2],z[2]),
(-x[3],y[3],-z[3]),(x[4],y[4],-z[4]),(x[5],y[5],z[5]),
(x[6],y[6],z[6]),(x[7],y[7],-z[7])]
faces = [[0,1,2],[0,1,7],[3,0,7],[3,4,7],[1,4,7],[3,4,5],[1,2,6],
[1,4,6],[4,5,6],[0,2,6],[0,3,6],[3,5,6]]
elif shape == 2:
verts = [(-x[0],y[0],-z[0]),(x[1],-y[1],-z[1]),(x[2],y[2],-z[2]),
(-x[3],y[3],-z[3]),(-x[4],-y[4],z[4]),(x[5],y[5],z[5]),
(x[6],y[6],z[6]),(-x[7],y[7],z[7])]
faces = [[0,1,2],[0,2,3],[0,3,7],[0,7,4],[1,4,5],[0,1,4],[5,1,2],
[5,2,6],[3,2,6],[3,6,7],[5,4,7],[5,6,7]]
elif shape == 3:
verts = [(x[0],y[0],z[0]),(x[1],-y[1],-z[1]),(x[2],y[2],-z[2]),
(-x[3],y[3],-z[3]),(x[4],-y[4],z[4]),(x[5],y[5],z[5]),
(-x[6],y[6],z[6]),(-x[7],-y[7],z[7])]
faces = [[0,1,2],[0,2,3],[0,3,6],[0,6,7],[0,7,4],[0,4,1],[5,4,1,2],
[5,6,3,2],[5,4,7,6]]
elif shape == 4:
verts = [(x[0],y[0],z[0]),(x[1],-y[1],-z[1]),(x[2],y[2],-z[2]),
(-x[3],y[3],-z[3]),(-x[4],-y[4],-z[4]),(x[5],-y[5],-z[5]),
(x[6],y[6],-z[6]),(x[7],y[7],-z[7]),(-x[8],y[8],-z[8]),
(x[9],y[9],-z[9])]
faces = [[0,1,6],[0,6,2],[0,2,7],[0,7,3],[0,3,8],[0,8,4],[0,4,5],
[0,5,1],[1,9,2],[2,9,3],[3,9,4],[4,9,1],[1,6,2],[2,7,3],
[3,8,4],[4,5,1]]
elif shape == 5:
verts = [(x[0],y[0],z[0]),(x[1],-y[1],z[1]),(x[2],y[2],z[2]),
(-x[3],y[3],z[3]),(x[4],-y[4],-z[4]),(x[5],y[5],-z[5]),
(x[6],y[6],-z[6]),(-x[7],y[7],-z[7]),(-x[8],y[8],-z[8]),
(-x[9],-y[9],-z[9])]
faces = [[0,1,2],[0,2,3],[0,3,1],[1,4,5],[1,5,2],[2,5,6],[2,6,7],
[2,7,3],[3,7,8],[3,8,9],[3,9,1],[1,9,4],[4,5,9],[5,6,7],
[7,8,9],[9,5,7]]
elif shape == 6:
verts = [(x[0],y[0],z[0]),(x[1],-y[1],-z[1]),(x[2],y[2],-z[2]),
(-x[3],y[3],-z[3]),(-x[4],y[4],z[4]),(-x[5],-y[5],z[5]),
(-x[6],-y[6],-z[6])]
faces = [[0,1,2],[0,2,3,4],[0,1,6,5],[0,4,5],[1,2,3,6],[3,4,5,6]]
elif shape == 7:
verts = [(x[0],y[0],z[0]),(x[1],-y[1],-z[1]),(x[2],y[2],-z[2]),
(x[3],y[3],-z[3]),(-x[4],y[4],-z[4]),(-x[5],y[5],z[5]),
(-x[6],y[6],z[6]),(-x[7],y[7],-z[7]),(-x[8],-y[8],-z[8]),
(-x[9],-y[9],z[9])]
faces = [[0,1,2],[0,2,3],[0,5,6],[0,6,9],[0,1,8,9],[0,3,4,5],
[1,2,7,8],[2,3,4,7],[4,5,6,7],[6,7,8,9]]
elif shape == 8:
verts = [(x[0],y[0],z[0]),(x[1],-y[1],-z[1]),(x[2],y[2],-z[2]),
(-x[3],y[3],-z[3]),(-x[4],-y[4],-z[4]),(-x[5],-y[5],z[5]),
(-x[6],y[6],z[6])]
faces = [[0,2,1],[0,1,4],[0,4,5],[0,5,6],[0,6,3,2],[2,1,4,3],
[3,6,5,4]]
elif shape == 9:
verts = [(-x[0],-y[0],-z[0]),(-x[1],y[1],-z[1]),(-x[2],y[2],z[2]),
(-x[3],-y[3],z[3]),(x[4],-y[4],-z[4]),(x[5],y[5],-z[5]),
(x[6],y[6],z[6]),(x[7],-y[7],z[7])]
faces = [[0,1,6,2],[1,5,7,6],[5,4,3,7],[4,0,2,3],[0,1,5,4],[3,2,6,7]]
elif shape == 10:
verts = [(-x[0],-y[0],-z[0]),(-x[1],y[1],-z[1]),(-x[2],y[2],z[2]),
(x[3],-y[3],z[3]),(x[4],y[4],z[4]),(x[5],y[5],-z[5]),
(x[6],-y[6],-z[6])]
faces = [[0,2,3],[0,3,6],[0,1,5,6],[2,3,4],[0,1,2],[1,2,4,5],[3,4,5,6]]
elif shape == 11:
verts = [(-x[0],-y[0],-z[0]),(-x[1],y[1],-z[1]),(-x[2],y[2],z[2]),
(x[3],-y[3],z[3]),(x[4],y[4],z[4]),(x[5],y[5],-z[5]),
(x[6],-y[6],-z[6])]
faces = [[0,2,3],[0,3,6],[0,1,5,6],[2,3,4],[5,6,3],[1,5,3,4],[0,1,4,2]]
else:
verts = [(-x[0],-y[0],-z[0]),(-x[1],y[1],-z[1]),(-x[2],-y[2],z[2]),
(-x[3],y[3],z[3]),(x[4],-y[4],-z[4]),(x[5],y[5],-z[5]),
(x[6],-y[6],z[6]),(x[7],y[7],z[7])]
faces = [[0,1,3,2],[0,1,5,4],[0,4,6,2],[7,5,4,6],[7,3,2,6],[7,5,1,3]]
## name = "Rock." + str(base + shift).zfill(3)
name = "rock"
# Make object:
obj = createMeshObject(context, verts, [], faces, name)
if scaleDisplace:
## bpy.data.objects[name].scale = Vector((averageX, averageY, averageZ))
obj.object.scale = Vector((averageX, averageY, averageZ))
# For a slight speed bump / Readability:
## mesh = bpy.data.meshes[name]
mesh = obj.object.data
# Apply creasing:
if shape == 0:
for i in range(12):
# todo: "0.375 / 3"? WTF? That = 0.125. . . .
# *** Completed 7/15/2011: Changed second one ***
mesh.edges[i].crease = gauss(0.125, 0.125)
elif shape == 1:
for i in [0, 2]:
mesh.edges[i].crease = gauss(0.5, 0.125)
for i in [6, 9, 11, 12]:
mesh.edges[i].crease = gauss(0.25, 0.05)
for i in [5, 7, 15, 16]:
mesh.edges[i].crease = gauss(0.125, 0.025)
elif shape == 2:
for i in range(18):
mesh.edges[i].crease = gauss(0.125, 0.025)
elif shape == 3:
for i in [0, 1, 6, 10, 13]:
mesh.edges[i].crease = gauss(0.25, 0.05)
mesh.edges[8].crease = gauss(0.5, 0.125)
elif shape == 4:
for i in [5, 6, 7, 10, 14, 16, 19, 21]:
mesh.edges[i].crease = gauss(0.5, 0.125)
elif shape == 7:
for i in range(18):
if i in [0, 1, 2, 3, 6, 7, 8, 9, 13, 16]:
mesh.edges[i].crease = gauss(0.5, 0.125)
elif i in [11,17]:
mesh.edges[i].crease = gauss(0.25, 0.05)
else:
mesh.edges[i].crease = gauss(0.125, 0.025)
elif shape == 8:
for i in range(12):
if i in [0, 3, 8, 9, 10]:
mesh.edges[i].crease = gauss(0.5, 0.125)
elif i == 11:
mesh.edges[i].crease = gauss(0.25, 0.05)
else:
mesh.edges[i].crease = gauss(0.125, 0.025)
elif shape == 9:
for i in range(12):
if i in [0, 3, 4, 11]:
mesh.edges[i].crease = gauss(0.5, 0.125)
else:
mesh.edges[i].crease = gauss(0.25, 0.05)
elif shape == 10:
for i in range(12):
if i in [0, 2, 3, 4, 8, 11]:
mesh.edges[i].crease = gauss(0.5, 0.125)
elif i in [1, 5, 7]:
mesh.edges[i].crease = gauss(0.25, 0.05)
else:
mesh.edges[i].crease = gauss(0.125, 0.025)
elif shape == 11:
for i in range(11):
if i in [1, 2, 3, 4, 8, 11]:
mesh.edges[i].crease = gauss(0.25, 0.05)
else:
mesh.edges[i].crease = gauss(0.125, 0.025)
return obj.object
## return name
# Artifically skews a normal (gaussian) distribution. This will not create
# a continuous distribution curve but instead acts as a piecewise finction.
# This linearly scales the output on one side to fit the bounds.
#
# Example output historgrams:
#
# Upper skewed: Lower skewed:
# | ▄ | _
# | █ | █
# | █_ | █
# | ██ | _█
# | _██ | ██
# | _▄███_ | ██ _
# | ▄██████ | ▄██▄█▄_
# | _█▄███████ | ███████
# | _██████████_ | ████████▄▄█_ _
# | _▄▄████████████ | ████████████▄█_
# | _▄_ ▄███████████████▄_ | _▄███████████████▄▄_
# ------------------------- -----------------------
# |mu |mu
# Historgrams were generated in R (http://www.r-project.org/) based on the
# calculations below and manually duplicated here.
#
# param: mu - mu is the mean of the distribution.
# sigma - sigma is the standard deviation of the distribution.
# bounds - bounds[0] is the lower bound and bounds[1]
# is the upper bound.
# upperSkewed - if the distribution is upper skewed.
# return: out - Rondomly generated value from the skewed distribution.
#
# @todo: Because NumPy's random value generators are faster when called
# a bunch of times at once, maybe allow this to generate and return
# multiple values at once?
def skewedGauss(mu, sigma, bounds, upperSkewed=True):
raw = gauss(mu, sigma)
# Quicker to check an extra condition than do unnecessary math. . . .
if raw < mu and not upperSkewed:
out = ((mu - bounds[0]) / (3 * sigma)) * raw + ((mu * (bounds[0] - (mu - 3 * sigma))) / (3 * sigma))
elif raw > mu and upperSkewed:
out = ((mu - bounds[1]) / (3 * -sigma)) * raw + ((mu * (bounds[1] - (mu + 3 * sigma))) / (3 * -sigma))
else:
out = raw
return out
# @todo create a def for generating an alpha and beta for a beta distribution
# given a mu, sigma, and an upper and lower bound. This proved faster in
# profiling in addition to providing a much better distribution curve
# provided multiple iterations happen within this function; otherwise it was
# slower.
# This might be a scratch because of the bounds placed on mu and sigma:
#
# For alpha > 1 and beta > 1:
# mu^2 - mu^3 mu^3 - mu^2 + mu
# ----------- < sigma < ----------------
# 1 + mu 2 - mu
#
##def generateBeta(mu, sigma, scale, repitions=1):
## results = []
##
## return results
# Creates rock objects:
def generateRocks(context, scaleX, skewX, scaleY, skewY, scaleZ, skewZ,
scale_fac, detail, display_detail, deform, rough,
smooth_fac, smooth_it, mat_enable, color, mat_bright,
mat_rough, mat_spec, mat_hard, mat_use_trans, mat_alpha,
mat_cloudy, mat_IOR, mat_mossy, numOfRocks=1, userSeed=1.0,
scaleDisplace=False, randomSeed=True):
global lastRock
newMat = []
sigmaX = 0
sigmaY = 0
sigmaZ = 0
upperSkewX = False
upperSkewY = False
upperSkewZ = False
shift = 0
lastUsedTex = 1
vertexScaling = []
# Seed the random Gaussian value generator:
if randomSeed:
seed(int(time.time()))
else:
seed(userSeed)
if mat_enable:
# Calculate the number of materials to use.
# If less than 10 rocks are being generated, generate one material
# per rock.
# If more than 10 rocks are being generated, generate
# ceil[(1/9)n + (80/9)] materials.
# -> 100 rocks will result in 20 materials
# -> 1000 rocks will result in 120 materials.
if numOfRocks < 10:
numOfMats = numOfRocks
else:
numOfMats = math.ceil((1/9) * numOfRocks + (80/9))
# newMat = generateMaterialsList(numOfMats)
# *** No longer needed on 9/6/2011 ***
# todo Set general material settings:
# *** todo completed 5/25/2011 ***
# Material roughness actual max = 3.14. Needs scaling.
mat_rough *= 0.628
spec_IOR = 1.875 * (mat_spec ** 2) + 7.125 * mat_spec + 1
# Changed as material mapping is no longer needed.
# *** Complete 9/6/2011 ***
for i in range(numOfMats):
newMat.append(bpy.data.materials.new(name = 'stone'))
randomizeMaterial(newMat[i], color, mat_bright,
mat_rough, mat_spec, mat_hard, mat_use_trans,
mat_alpha, mat_cloudy, mat_IOR, mat_mossy,
spec_IOR)
# These values need to be really small to look good.
# So the user does not have to use such ridiculously small values:
deform /= 10
rough /= 100
# Verify that the min really is the min:
if scaleX[1] < scaleX[0]:
scaleX[0], scaleX[1] = scaleX[1], scaleX[0]
if scaleY[1] < scaleY[0]:
scaleY[0], scaleY[1] = scaleY[1], scaleY[0]
if scaleZ[1] < scaleZ[0]:
scaleZ[0], scaleZ[1] = scaleZ[1], scaleZ[0]
# todo: edit below to allow for skewing the distribution
# *** todo completed 4/22/2011 ***
# *** Code now generating "int not scriptable error" in Blender ***
#
# Calculate mu and sigma for a Gaussian distributed random number
# generation:
# If the lower and upper bounds are the same, skip the math.
#
# sigma is the standard deviation of the values. The 95% interval is three
# standard deviations, which is what we want most generated values to fall
# in. Since it might be skewed we are going to use half the difference
# betwee the mean and the furthest bound and scale the other side down
# post-number generation.
if scaleX[0] != scaleX[1]:
skewX = (skewX + 1) / 2
muX = scaleX[0] + ((scaleX[1] - scaleX[0]) * skewX)
if skewX < 0.5:
sigmaX = (scaleX[1] - muX) / 3
else:
sigmaX = (muX - scaleX[0]) / 3
upperSkewX = True
else:
muX = scaleX[0]
if scaleY[0] != scaleY[1]:
skewY = (skewY + 1) / 2
muY = scaleY[0] + ((scaleY[1] - scaleY[0]) * skewY)
if skewY < 0.5:
sigmaY = (scaleY[1] - muY) / 3
else:
sigmaY = (muY - scaleY[0]) / 3
upperSkewY = True
else:
muY = scaleY[0]
if scaleZ[0] != scaleZ[1]:
skewZ = (skewZ + 1) / 2
muZ = scaleZ[0] + ((scaleZ[1] - scaleZ[0]) * skewZ)
if skewZ < 0.5:
sigmaZ = (scaleZ[1] - muZ) / 3
else:
sigmaZ = (muZ - scaleZ[0]) / 3
upperSkewZ = True
else:
muZ = scaleZ
for i in range(numOfRocks):
# todo: enable different random values for each (x,y,z) corrdinate for
# each vertex. This will add additional randomness to the shape of the
# generated rocks.
# *** todo completed 4/19/2011 ***
# *** Code is notably slower at high rock counts ***
rock = generateObject(context, muX, sigmaX, scaleX, upperSkewX, muY,
## name = generateObject(context, muX, sigmaX, scaleX, upperSkewX, muY,
sigmaY, scaleY, upperSkewY, muZ, sigmaZ, scaleZ,
upperSkewZ, i, lastRock, scaleDisplace, scale_fac)
## rock = bpy.data.objects[name]
# todo Map what the two new textures will be:
# This is not working. It works on paper so . . . ???
# *** todo completed on 4/23/2011 ***
# *** todo re-added as the first rock is getting
# 'Texture.001' twice. ***
# *** todo completed on 4/25/2011 ***
# *** Script no longer needs to map new texture names 9/6/2011 ***
# Create the four new textures:
# todo Set displacement texture parameters:
# *** todo completed on 5/31/2011 ***
# Voronoi has been removed from being an option for the fine detail
# texture.
texTypes = ['CLOUDS', 'MUSGRAVE', 'DISTORTED_NOISE', 'STUCCI', 'VORONOI']
newTex = []
# The first texture is to give a more ranodm base shape appearance:
newTex.append(bpy.data.textures.new(name = 'rock_displacement',
type = texTypes[1]))
randomizeTexture(newTex[0], 0)
newTex.append(bpy.data.textures.new(name = 'rock_displacement',
type = texTypes[4]))
randomizeTexture(newTex[1], 0)
if numpy:
newTex.append(bpy.data.textures.new(name = 'rock_displacement',
type = texTypes[int(round(weibull(1, 1)[0] / 2.125))]))
randomizeTexture(newTex[2], 1)
newTex.append(bpy.data.textures.new(name = 'rock_displacement',
type = texTypes[int(round(weibull(1, 1)[0] / 2.125))]))
randomizeTexture(newTex[3], 2)
else:
newTex.append(bpy.data.textures.new(name = 'rock_displacement',
type = texTypes[int(round(weibull(1, 1) / 2.125))]))
randomizeTexture(newTex[2], 1)
newTex.append(bpy.data.textures.new(name = 'rock_displacement',
type = texTypes[int(round(weibull(1, 1) / 2.125))]))
randomizeTexture(newTex[3], 2)
# Add modifiers:
rock.modifiers.new(name = "Subsurf", type = 'SUBSURF')
rock.modifiers.new(name = "Subsurf", type = 'SUBSURF')
rock.modifiers.new(name = "Displace", type = 'DISPLACE')
rock.modifiers.new(name = "Displace", type = 'DISPLACE')
rock.modifiers.new(name = "Displace", type = 'DISPLACE')
rock.modifiers.new(name = "Displace", type = 'DISPLACE')
# If smoothing is enabled, allow a little randomness into the
# smoothing factor. Then add the smoothing modifier.
if smooth_fac > 0.0 and smooth_it > 0:
rock.modifiers.new(name = "Smooth", type='SMOOTH')
rock.modifiers[6].factor = gauss(smooth_fac, (smooth_fac ** 0.5) / 12)
rock.modifiers[6].iterations = smooth_it
# Make a call to random to keep things consistant:
else:
gauss(0, 1)
# Set subsurf modifier parameters:
rock.modifiers[0].levels = display_detail
rock.modifiers[0].render_levels = detail
rock.modifiers[1].levels = display_detail
rock.modifiers[1].render_levels = detail
# todo Set displacement modifier parameters:
# *** todo completed on 4/23/2011 ***
# *** toned down the variance on 4/26/2011 ***
# *** added third modifier on 4/28/2011 ***
# *** texture access changed on 9/6/2011 ***
rock.modifiers[2].texture = newTex[0]
rock.modifiers[2].strength = gauss(deform / 100, (1 / 300) * deform)
rock.modifiers[2].mid_level = 0
rock.modifiers[3].texture = newTex[1]
rock.modifiers[3].strength = gauss(deform, (1 / 3) * deform)
rock.modifiers[3].mid_level = 0
rock.modifiers[4].texture = newTex[2]
rock.modifiers[4].strength = gauss(rough * 2, (1 / 3) * rough)
rock.modifiers[5].texture = newTex[3]
rock.modifiers[5].strength = gauss(rough, (1 / 3) * rough)
# Set mesh to be smooth and fix the normals:
utils.smooth(rock.data)
## utils.smooth(bpy.data.meshes[name])
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.normals_make_consistent()
bpy.ops.object.editmode_toggle()
if mat_enable:
bpy.ops.object.material_slot_add()
rock.material_slots[0].material = newMat[randint(0, numOfMats - 1)]
# Store the last value of i:
shift = i
# Add the shift to lastRock:
lastRock += shift + 1
return
# Much of the code below is more-or-less imitation of other addons and as such
# I have left it undocumented.
class rocks(bpy.types.Operator):
"""Add rock objects"""
bl_idname = "mesh.rocks"
bl_label = "Add Rocks"
bl_options = {'REGISTER', 'UNDO'}
bl_description = "Add rocks"
# Get the preset values from the XML file.
# -> The script was morphed into a Python module
# to support this.
# Tell settings.py to parse the XML file with the settings.
# Then get the default values resulting from the parsing.
# Make a list containing the default values and append to that
# the presets specified in the same XML file. This list will
# be used to load preset values.
settings.parse()
defaults = settings.getDefault()
presetsList = [defaults]
presetsList += settings.getPresetLists()
presets = []
lastPreset = 0
# Build the presets list for the enum property.
# This needs to be a for loop as the user might add presets to
# the XML file and those should show here:
for i in range(len(presetsList)):
value = str(i)
name = presetsList[i][0]
description = name + " preset values."
presets.append((value, name, description))
preset_values = EnumProperty(items = presets,
name = "Presets",
description = "Preset values for some rock types")
num_of_rocks = IntProperty(name = "Number of rocks",
description = "Number of rocks to generate. WARNING: Slow at high values!",
min = 1, max = 1048576,
soft_max = 20,
default = 1)
scale_X = FloatVectorProperty(name = "X scale",
description = "X axis scaling range.",
min = 0.0, max = 256.0, step = 1,
default = defaults[1], size = 2)
skew_X = FloatProperty(name = "X skew",
description = "X Skew ratio. 0.5 is no skew.",
min = -1.0, max = 1.0, default = defaults[4])
scale_Y = FloatVectorProperty(name = "Y scale",
description = "Y axis scaling range.",
min = 0.0, max = 256.0, step = 1,
default = defaults[2], size = 2)
skew_Y = FloatProperty(name = "Y skew",
description = "Y Skew ratio. 0.5 is no skew.",
min = -1.0, max = 1.0, default = defaults[5])
scale_Z = FloatVectorProperty(name = "Z scale",
description = "Z axis scaling range.",
min = 0.0, max = 256.0, step = 1,
default = defaults[3], size = 2)
skew_Z = FloatProperty(name = "Z skew",
description = "Z Skew ratio. 0.5 is no skew.",
min = -1.0, max = 1.0, default = defaults[6])
use_scale_dis = BoolProperty(name = "Scale displace textures",
description = "Scale displacement textures with dimensions. May cause streched textures.",
default = defaults[7])
scale_fac = FloatVectorProperty(name = "Scaling Factor",
description = "XYZ scaling factor. 1 = no scaling.",
min = 0.0001, max = 256.0, step = 0.1,
default = defaults[8], size = 3)
# @todo Possible to title this section "Physical Properties:"?
deform = FloatProperty(name = "Deformation",
description = "Rock deformation",
min = 0.0, max = 1024.0, default = defaults[9])
rough = FloatProperty(name = "Roughness",
description = "Rock roughness",
min = 0.0, max = 1024.0, default = defaults[10])
detail = IntProperty(name = "Detail level",
description = "Detail level. WARNING: Slow at high values!",
min = 1, max = 1024, default = defaults[11])
display_detail = IntProperty(name = "Display Detail",
description = "Display detail. Use a lower value for high numbers of rocks.",
min = 1, max = 128, default = defaults[12])
smooth_fac = FloatProperty(name = "Smooth Factor",
description = "Smoothing factor. A value of 0 disables.",
min = 0.0, max = 128.0, default = defaults[13])
smooth_it = IntProperty(name = "Smooth Iterations",
description = "Smoothing iterations. A value of 0 disables.",
min = 0, max = 128, default = defaults[14])
# @todo Add material properties
mat_enable = BoolProperty(name = "Generate materials",
description = "Generate materials and textures for the rocks",
default = defaults[15])
mat_color = FloatVectorProperty(name = "Color",
description = "Base color settings (RGB)",
min = 0.0, max = 1.0, default = defaults[16], size = 3, subtype = 'COLOR')
mat_bright = FloatProperty(name = "Brightness",
description = "Material brightness",
min = 0.0, max = 1.0, default = defaults[17])
mat_rough = FloatProperty(name = "Roughness",
description = "Material roughness",
min = 0.0, max = 5.0, default = defaults[18])
mat_spec = FloatProperty(name = "Shine",
description = "Material specularity strength",
min = 0.0, max = 1.0, default = defaults[19])
mat_hard = IntProperty(name = "Hardness",
description = "Material hardness",
min = 0, max = 511, default = defaults[20])
mat_use_trans = BoolProperty(name = "Use Transparency",
description = "Enables transparency in rocks (WARNING: SLOW RENDER TIMES)",
default = defaults[21])
mat_alpha = FloatProperty(name = "Alpha",
description = "Transparency of the rocks",
min = 0.0, max = 1.0, default = defaults[22])
mat_cloudy = FloatProperty(name = "Cloudy",
description = "How cloudy the transparent rocks look",
min = 0.0, max = 1.0, default = defaults[23])
mat_IOR = FloatProperty(name = "IoR",
description = "Index of Refraction",
min = 0.25, max = 4.0, soft_max = 2.5,
default = defaults[24])
mat_mossy = FloatProperty(name = "Mossiness",
description = "Amount of mossiness on the rocks",
min = 0.0, max = 1.0, default = defaults[25])
use_generate = BoolProperty(name = "Generate Rocks",
description = "Enable actual generation.",
default = defaults[26])
use_random_seed = BoolProperty(name = "Use a random seed",
description = "Create a seed based on time. Causes user seed to be ignored.",
default = defaults[27])
user_seed = IntProperty(name = "User seed",
description = "Use a specific seed for the generator.",
min = 0, max = 1048576, default = defaults[28])
def draw(self, context):
layout = self.layout
box = layout.box()
box.prop(self, 'num_of_rocks')
box = layout.box()
box.prop(self, 'scale_X')
box.prop(self, 'skew_X')
box.prop(self, 'scale_Y')
box.prop(self, 'skew_Y')
box.prop(self, 'scale_Z')
box.prop(self, 'skew_Z')
box.prop(self, 'use_scale_dis')
if self.use_scale_dis:
box.prop(self, 'scale_fac')
else:
self.scale_fac = utils.toFloats(self.defaults[8])
box = layout.box()
box.prop(self, 'deform')
box.prop(self, 'rough')
box.prop(self, 'detail')
box.prop(self, 'display_detail')
box.prop(self, 'smooth_fac')
box.prop(self, 'smooth_it')
box = layout.box()
box.prop(self, 'mat_enable')
if self.mat_enable:
box.prop(self, 'mat_color')
box.prop(self, 'mat_bright')
box.prop(self, 'mat_rough')
box.prop(self, 'mat_spec')
box.prop(self, 'mat_hard')
box.prop(self, 'mat_use_trans')
if self.mat_use_trans:
box.prop(self, 'mat_alpha')
box.prop(self, 'mat_cloudy')
box.prop(self, 'mat_IOR')
box.prop(self, 'mat_mossy')
box = layout.box()
box.prop(self, 'use_generate')
box.prop(self, 'use_random_seed')
if not self.use_random_seed:
box.prop(self, 'user_seed')
box.prop(self, 'preset_values')
def execute(self, context):
# The following "if" block loads preset values:
if self.lastPreset != int(self.preset_values):
self.scale_X = utils.toFloats(self.presetsList[int(self.preset_values)][1])
self.scale_Y = utils.toFloats(self.presetsList[int(self.preset_values)][2])
self.scale_Z = utils.toFloats(self.presetsList[int(self.preset_values)][3])
self.skew_X = float(self.presetsList[int(self.preset_values)][4])
self.skew_Y = float(self.presetsList[int(self.preset_values)][5])
self.skew_Z = float(self.presetsList[int(self.preset_values)][6])
self.use_scale_dis = bool(self.presetsList[int(self.preset_values)][7])
self.scale_fac = utils.toFloats(self.presetsList[int(self.preset_values)][8])
self.deform = float(self.presetsList[int(self.preset_values)][9])
self.rough = float(self.presetsList[int(self.preset_values)][10])
self.detail = int(self.presetsList[int(self.preset_values)][11])
self.display_detail = int(self.presetsList[int(self.preset_values)][12])
self.smooth_fac = float(self.presetsList[int(self.preset_values)][13])
self.smooth_it = int(self.presetsList[int(self.preset_values)][14])
self.mat_enable = bool(self.presetsList[int(self.preset_values)][15])
self.mat_color = utils.toFloats(self.presetsList[int(self.preset_values)][16])
self.mat_bright = float(self.presetsList[int(self.preset_values)][17])
self.mat_rough = float(self.presetsList[int(self.preset_values)][18])
self.mat_spec = float(self.presetsList[int(self.preset_values)][19])
self.mat_hard = int(self.presetsList[int(self.preset_values)][20])
self.mat_use_trans = bool(self.presetsList[int(self.preset_values)][21])
self.mat_alpha = float(self.presetsList[int(self.preset_values)][22])
self.mat_cloudy = float(self.presetsList[int(self.preset_values)][23])
self.mat_IOR = float(self.presetsList[int(self.preset_values)][24])
self.mat_mossy = float(self.presetsList[int(self.preset_values)][25])
self.use_generate = bool(self.presetsList[int(self.preset_values)][26])
self.use_random_seed = bool(self.presetsList[int(self.preset_values)][27])
self.user_seed = int(self.presetsList[int(self.preset_values)][28])
self.lastPreset = int(self.preset_values)
# todo Add deform, deform_Var, rough, and rough_Var:
# *** todo completed 4/23/2011 ***
# *** Eliminated "deform_Var" and "rough_Var" so the script is not
# as complex to use. May add in again as advanced features. ***
if self.use_generate:
generateRocks(context,
self.scale_X,
self.skew_X,
self.scale_Y,
self.skew_Y,
self.scale_Z,
self.skew_Z,
self.scale_fac,
self.detail,
self.display_detail,
self.deform,
self.rough,
self.smooth_fac,
self.smooth_it,
self.mat_enable,
self.mat_color,
self.mat_bright,
self.mat_rough,
self.mat_spec,
self.mat_hard,
self.mat_use_trans,
self.mat_alpha,
self.mat_cloudy,
self.mat_IOR,
self.mat_mossy,
self.num_of_rocks,
self.user_seed,
self.use_scale_dis,
self.use_random_seed)
return {'FINISHED'}
| Passtechsoft/TPEAlpGen | blender/release/scripts/addons_contrib/add_mesh_rocks/rockgen.py | Python | gpl-3.0 | 69,120 | [
"Gaussian"
] | abd82cd58013c2a472b209149637f6868ffea4bb79b4ca435b69ec0e2b9bb643 |
# -*- coding: utf-8 -*-
"""Tests for :mod:`pybel.parser`."""
| pybel/pybel | tests/test_parse/__init__.py | Python | mit | 62 | [
"Pybel"
] | fb08b8d9bcc1b130f5405ec715ead8dcd529d260d9a38f69285abfd572fa12a8 |
from math import log
import numpy as np
import functools
import gym
import tree # pip install dm_tree
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils import MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT, \
SMALL_NUMBER
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.framework import try_import_tf, try_import_tfp
from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
from ray.rllib.utils.typing import TensorType, List, Union, \
Tuple, ModelConfigDict
tf1, tf, tfv = try_import_tf()
tfp = try_import_tfp()
@DeveloperAPI
class TFActionDistribution(ActionDistribution):
"""TF-specific extensions for building action distributions."""
@override(ActionDistribution)
def __init__(self, inputs: List[TensorType], model: ModelV2):
super().__init__(inputs, model)
self.sample_op = self._build_sample_op()
self.sampled_action_logp_op = self.logp(self.sample_op)
@DeveloperAPI
def _build_sample_op(self) -> TensorType:
"""Implement this instead of sample(), to enable op reuse.
This is needed since the sample op is non-deterministic and is shared
between sample() and sampled_action_logp().
"""
raise NotImplementedError
@override(ActionDistribution)
def sample(self) -> TensorType:
"""Draw a sample from the action distribution."""
return self.sample_op
@override(ActionDistribution)
def sampled_action_logp(self) -> TensorType:
"""Returns the log probability of the sampled action."""
return self.sampled_action_logp_op
class Categorical(TFActionDistribution):
"""Categorical distribution for discrete action spaces."""
@DeveloperAPI
def __init__(self,
inputs: List[TensorType],
model: ModelV2 = None,
temperature: float = 1.0):
assert temperature > 0.0, "Categorical `temperature` must be > 0.0!"
# Allow softmax formula w/ temperature != 1.0:
# Divide inputs by temperature.
super().__init__(inputs / temperature, model)
@override(ActionDistribution)
def deterministic_sample(self) -> TensorType:
return tf.math.argmax(self.inputs, axis=1)
@override(ActionDistribution)
def logp(self, x: TensorType) -> TensorType:
return -tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.inputs, labels=tf.cast(x, tf.int32))
@override(ActionDistribution)
def entropy(self) -> TensorType:
a0 = self.inputs - tf.reduce_max(self.inputs, axis=1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.math.log(z0) - a0), axis=1)
@override(ActionDistribution)
def kl(self, other: ActionDistribution) -> TensorType:
a0 = self.inputs - tf.reduce_max(self.inputs, axis=1, keepdims=True)
a1 = other.inputs - tf.reduce_max(other.inputs, axis=1, keepdims=True)
ea0 = tf.exp(a0)
ea1 = tf.exp(a1)
z0 = tf.reduce_sum(ea0, axis=1, keepdims=True)
z1 = tf.reduce_sum(ea1, axis=1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(
p0 * (a0 - tf.math.log(z0) - a1 + tf.math.log(z1)), axis=1)
@override(TFActionDistribution)
def _build_sample_op(self) -> TensorType:
return tf.squeeze(tf.random.categorical(self.inputs, 1), axis=1)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return action_space.n
class MultiCategorical(TFActionDistribution):
"""MultiCategorical distribution for MultiDiscrete action spaces."""
def __init__(self,
inputs: List[TensorType],
model: ModelV2,
input_lens: Union[List[int], np.ndarray, Tuple[int, ...]],
action_space=None):
# skip TFActionDistribution init
ActionDistribution.__init__(self, inputs, model)
self.cats = [
Categorical(input_, model)
for input_ in tf.split(inputs, input_lens, axis=1)
]
self.action_space = action_space
self.sample_op = self._build_sample_op()
self.sampled_action_logp_op = self.logp(self.sample_op)
@override(ActionDistribution)
def deterministic_sample(self) -> TensorType:
sample_ = tf.stack(
[cat.deterministic_sample() for cat in self.cats], axis=1)
if isinstance(self.action_space, gym.spaces.Box):
return tf.cast(
tf.reshape(sample_, [-1] + list(self.action_space.shape)),
self.action_space.dtype)
return sample_
@override(ActionDistribution)
def logp(self, actions: TensorType) -> TensorType:
# If tensor is provided, unstack it into list.
if isinstance(actions, tf.Tensor):
if isinstance(self.action_space, gym.spaces.Box):
actions = tf.reshape(
actions, [-1, int(np.product(self.action_space.shape))])
actions = tf.unstack(tf.cast(actions, tf.int32), axis=1)
logps = tf.stack(
[cat.logp(act) for cat, act in zip(self.cats, actions)])
return tf.reduce_sum(logps, axis=0)
@override(ActionDistribution)
def multi_entropy(self) -> TensorType:
return tf.stack([cat.entropy() for cat in self.cats], axis=1)
@override(ActionDistribution)
def entropy(self) -> TensorType:
return tf.reduce_sum(self.multi_entropy(), axis=1)
@override(ActionDistribution)
def multi_kl(self, other: ActionDistribution) -> TensorType:
return tf.stack(
[cat.kl(oth_cat) for cat, oth_cat in zip(self.cats, other.cats)],
axis=1)
@override(ActionDistribution)
def kl(self, other: ActionDistribution) -> TensorType:
return tf.reduce_sum(self.multi_kl(other), axis=1)
@override(TFActionDistribution)
def _build_sample_op(self) -> TensorType:
sample_op = tf.stack([cat.sample() for cat in self.cats], axis=1)
if isinstance(self.action_space, gym.spaces.Box):
return tf.cast(
tf.reshape(sample_op, [-1] + list(self.action_space.shape)),
dtype=self.action_space.dtype)
return sample_op
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(
action_space: gym.Space,
model_config: ModelConfigDict) -> Union[int, np.ndarray]:
# Int Box.
if isinstance(action_space, gym.spaces.Box):
assert action_space.dtype.name.startswith("int")
low_ = np.min(action_space.low)
high_ = np.max(action_space.high)
assert np.all(action_space.low == low_)
assert np.all(action_space.high == high_)
np.product(action_space.shape) * (high_ - low_ + 1)
# MultiDiscrete space.
else:
return np.sum(action_space.nvec)
class GumbelSoftmax(TFActionDistribution):
"""GumbelSoftmax distr. (for differentiable sampling in discr. actions
The Gumbel Softmax distribution [1] (also known as the Concrete [2]
distribution) is a close cousin of the relaxed one-hot categorical
distribution, whose tfp implementation we will use here plus
adjusted `sample_...` and `log_prob` methods. See discussion at [0].
[0] https://stackoverflow.com/questions/56226133/
soft-actor-critic-with-discrete-action-space
[1] Categorical Reparametrization with Gumbel-Softmax (Jang et al, 2017):
https://arxiv.org/abs/1611.01144
[2] The Concrete Distribution: A Continuous Relaxation of Discrete Random
Variables (Maddison et al, 2017) https://arxiv.org/abs/1611.00712
"""
@DeveloperAPI
def __init__(self,
inputs: List[TensorType],
model: ModelV2 = None,
temperature: float = 1.0):
"""Initializes a GumbelSoftmax distribution.
Args:
temperature (float): Temperature parameter. For low temperatures,
the expected value approaches a categorical random variable.
For high temperatures, the expected value approaches a uniform
distribution.
"""
assert temperature >= 0.0
self.dist = tfp.distributions.RelaxedOneHotCategorical(
temperature=temperature, logits=inputs)
self.probs = tf.nn.softmax(self.dist._distribution.logits)
super().__init__(inputs, model)
@override(ActionDistribution)
def deterministic_sample(self) -> TensorType:
# Return the dist object's prob values.
return self.probs
@override(ActionDistribution)
def logp(self, x: TensorType) -> TensorType:
# Override since the implementation of tfp.RelaxedOneHotCategorical
# yields positive values.
if x.shape != self.dist.logits.shape:
values = tf.one_hot(
x, self.dist.logits.shape.as_list()[-1], dtype=tf.float32)
assert values.shape == self.dist.logits.shape, (
values.shape, self.dist.logits.shape)
# [0]'s implementation (see line below) seems to be an approximation
# to the actual Gumbel Softmax density.
return -tf.reduce_sum(
-x * tf.nn.log_softmax(self.dist.logits, axis=-1), axis=-1)
@override(TFActionDistribution)
def _build_sample_op(self) -> TensorType:
return self.dist.sample()
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(
action_space: gym.Space,
model_config: ModelConfigDict) -> Union[int, np.ndarray]:
return action_space.n
class DiagGaussian(TFActionDistribution):
"""Action distribution where each vector element is a gaussian.
The first half of the input vector defines the gaussian means, and the
second half the gaussian standard deviations.
"""
def __init__(self, inputs: List[TensorType], model: ModelV2):
mean, log_std = tf.split(inputs, 2, axis=1)
self.mean = mean
self.log_std = log_std
self.std = tf.exp(log_std)
super().__init__(inputs, model)
@override(ActionDistribution)
def deterministic_sample(self) -> TensorType:
return self.mean
@override(ActionDistribution)
def logp(self, x: TensorType) -> TensorType:
return -0.5 * tf.reduce_sum(
tf.math.square((tf.cast(x, tf.float32) - self.mean) / self.std),
axis=1
) - 0.5 * np.log(2.0 * np.pi) * tf.cast(tf.shape(x)[1], tf.float32) - \
tf.reduce_sum(self.log_std, axis=1)
@override(ActionDistribution)
def kl(self, other: ActionDistribution) -> TensorType:
assert isinstance(other, DiagGaussian)
return tf.reduce_sum(
other.log_std - self.log_std +
(tf.math.square(self.std) + tf.math.square(self.mean - other.mean))
/ (2.0 * tf.math.square(other.std)) - 0.5,
axis=1)
@override(ActionDistribution)
def entropy(self) -> TensorType:
return tf.reduce_sum(
self.log_std + .5 * np.log(2.0 * np.pi * np.e), axis=1)
@override(TFActionDistribution)
def _build_sample_op(self) -> TensorType:
return self.mean + self.std * tf.random.normal(tf.shape(self.mean))
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(
action_space: gym.Space,
model_config: ModelConfigDict) -> Union[int, np.ndarray]:
return np.prod(action_space.shape) * 2
class SquashedGaussian(TFActionDistribution):
"""A tanh-squashed Gaussian distribution defined by: mean, std, low, high.
The distribution will never return low or high exactly, but
`low`+SMALL_NUMBER or `high`-SMALL_NUMBER respectively.
"""
def __init__(self,
inputs: List[TensorType],
model: ModelV2,
low: float = -1.0,
high: float = 1.0):
"""Parameterizes the distribution via `inputs`.
Args:
low (float): The lowest possible sampling value
(excluding this value).
high (float): The highest possible sampling value
(excluding this value).
"""
assert tfp is not None
mean, log_std = tf.split(inputs, 2, axis=-1)
# Clip `scale` values (coming from NN) to reasonable values.
log_std = tf.clip_by_value(log_std, MIN_LOG_NN_OUTPUT,
MAX_LOG_NN_OUTPUT)
std = tf.exp(log_std)
self.distr = tfp.distributions.Normal(loc=mean, scale=std)
assert np.all(np.less(low, high))
self.low = low
self.high = high
super().__init__(inputs, model)
@override(ActionDistribution)
def deterministic_sample(self) -> TensorType:
mean = self.distr.mean()
return self._squash(mean)
@override(TFActionDistribution)
def _build_sample_op(self) -> TensorType:
return self._squash(self.distr.sample())
@override(ActionDistribution)
def logp(self, x: TensorType) -> TensorType:
# Unsquash values (from [low,high] to ]-inf,inf[)
unsquashed_values = tf.cast(self._unsquash(x), self.inputs.dtype)
# Get log prob of unsquashed values from our Normal.
log_prob_gaussian = self.distr.log_prob(unsquashed_values)
# For safety reasons, clamp somehow, only then sum up.
log_prob_gaussian = tf.clip_by_value(log_prob_gaussian, -100, 100)
log_prob_gaussian = tf.reduce_sum(log_prob_gaussian, axis=-1)
# Get log-prob for squashed Gaussian.
unsquashed_values_tanhd = tf.math.tanh(unsquashed_values)
log_prob = log_prob_gaussian - tf.reduce_sum(
tf.math.log(1 - unsquashed_values_tanhd**2 + SMALL_NUMBER),
axis=-1)
return log_prob
def sample_logp(self):
z = self.distr.sample()
actions = self._squash(z)
return actions, tf.reduce_sum(
self.distr.log_prob(z) -
tf.math.log(1 - actions * actions + SMALL_NUMBER),
axis=-1)
@override(ActionDistribution)
def entropy(self) -> TensorType:
raise ValueError("Entropy not defined for SquashedGaussian!")
@override(ActionDistribution)
def kl(self, other: ActionDistribution) -> TensorType:
raise ValueError("KL not defined for SquashedGaussian!")
def _squash(self, raw_values: TensorType) -> TensorType:
# Returned values are within [low, high] (including `low` and `high`).
squashed = ((tf.math.tanh(raw_values) + 1.0) / 2.0) * \
(self.high - self.low) + self.low
return tf.clip_by_value(squashed, self.low, self.high)
def _unsquash(self, values: TensorType) -> TensorType:
normed_values = (values - self.low) / (self.high - self.low) * 2.0 - \
1.0
# Stabilize input to atanh.
save_normed_values = tf.clip_by_value(
normed_values, -1.0 + SMALL_NUMBER, 1.0 - SMALL_NUMBER)
unsquashed = tf.math.atanh(save_normed_values)
return unsquashed
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(
action_space: gym.Space,
model_config: ModelConfigDict) -> Union[int, np.ndarray]:
return np.prod(action_space.shape) * 2
class Beta(TFActionDistribution):
"""
A Beta distribution is defined on the interval [0, 1] and parameterized by
shape parameters alpha and beta (also called concentration parameters).
PDF(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z
with Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta)
and Gamma(n) = (n - 1)!
"""
def __init__(self,
inputs: List[TensorType],
model: ModelV2,
low: float = 0.0,
high: float = 1.0):
# Stabilize input parameters (possibly coming from a linear layer).
inputs = tf.clip_by_value(inputs, log(SMALL_NUMBER),
-log(SMALL_NUMBER))
inputs = tf.math.log(tf.math.exp(inputs) + 1.0) + 1.0
self.low = low
self.high = high
alpha, beta = tf.split(inputs, 2, axis=-1)
# Note: concentration0==beta, concentration1=alpha (!)
self.dist = tfp.distributions.Beta(
concentration1=alpha, concentration0=beta)
super().__init__(inputs, model)
@override(ActionDistribution)
def deterministic_sample(self) -> TensorType:
mean = self.dist.mean()
return self._squash(mean)
@override(TFActionDistribution)
def _build_sample_op(self) -> TensorType:
return self._squash(self.dist.sample())
@override(ActionDistribution)
def logp(self, x: TensorType) -> TensorType:
unsquashed_values = self._unsquash(x)
return tf.math.reduce_sum(
self.dist.log_prob(unsquashed_values), axis=-1)
def _squash(self, raw_values: TensorType) -> TensorType:
return raw_values * (self.high - self.low) + self.low
def _unsquash(self, values: TensorType) -> TensorType:
return (values - self.low) / (self.high - self.low)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(
action_space: gym.Space,
model_config: ModelConfigDict) -> Union[int, np.ndarray]:
return np.prod(action_space.shape) * 2
class Deterministic(TFActionDistribution):
"""Action distribution that returns the input values directly.
This is similar to DiagGaussian with standard deviation zero (thus only
requiring the "mean" values as NN output).
"""
@override(ActionDistribution)
def deterministic_sample(self) -> TensorType:
return self.inputs
@override(TFActionDistribution)
def logp(self, x: TensorType) -> TensorType:
return tf.zeros_like(self.inputs)
@override(TFActionDistribution)
def _build_sample_op(self) -> TensorType:
return self.inputs
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(
action_space: gym.Space,
model_config: ModelConfigDict) -> Union[int, np.ndarray]:
return np.prod(action_space.shape)
class MultiActionDistribution(TFActionDistribution):
"""Action distribution that operates on a set of actions.
Args:
inputs (Tensor list): A list of tensors from which to compute samples.
"""
def __init__(self, inputs, model, *, child_distributions, input_lens,
action_space):
ActionDistribution.__init__(self, inputs, model)
self.action_space_struct = get_base_struct_from_space(action_space)
self.input_lens = np.array(input_lens, dtype=np.int32)
split_inputs = tf.split(inputs, self.input_lens, axis=1)
self.flat_child_distributions = tree.map_structure(
lambda dist, input_: dist(input_, model), child_distributions,
split_inputs)
@override(ActionDistribution)
def logp(self, x):
# Single tensor input (all merged).
if isinstance(x, (tf.Tensor, np.ndarray)):
split_indices = []
for dist in self.flat_child_distributions:
if isinstance(dist, Categorical):
split_indices.append(1)
else:
split_indices.append(tf.shape(dist.sample())[1])
split_x = tf.split(x, split_indices, axis=1)
# Structured or flattened (by single action component) input.
else:
split_x = tree.flatten(x)
def map_(val, dist):
# Remove extra categorical dimension.
if isinstance(dist, Categorical):
val = tf.cast(tf.squeeze(val, axis=-1), tf.int32)
return dist.logp(val)
# Remove extra categorical dimension and take the logp of each
# component.
flat_logps = tree.map_structure(map_, split_x,
self.flat_child_distributions)
return functools.reduce(lambda a, b: a + b, flat_logps)
@override(ActionDistribution)
def kl(self, other):
kl_list = [
d.kl(o) for d, o in zip(self.flat_child_distributions,
other.flat_child_distributions)
]
return functools.reduce(lambda a, b: a + b, kl_list)
@override(ActionDistribution)
def entropy(self):
entropy_list = [d.entropy() for d in self.flat_child_distributions]
return functools.reduce(lambda a, b: a + b, entropy_list)
@override(ActionDistribution)
def sample(self):
child_distributions = tree.unflatten_as(self.action_space_struct,
self.flat_child_distributions)
return tree.map_structure(lambda s: s.sample(), child_distributions)
@override(ActionDistribution)
def deterministic_sample(self):
child_distributions = tree.unflatten_as(self.action_space_struct,
self.flat_child_distributions)
return tree.map_structure(lambda s: s.deterministic_sample(),
child_distributions)
@override(TFActionDistribution)
def sampled_action_logp(self):
p = self.flat_child_distributions[0].sampled_action_logp()
for c in self.flat_child_distributions[1:]:
p += c.sampled_action_logp()
return p
@override(ActionDistribution)
def required_model_output_shape(self, action_space, model_config):
return np.sum(self.input_lens)
class Dirichlet(TFActionDistribution):
"""Dirichlet distribution for continuous actions that are between
[0,1] and sum to 1.
e.g. actions that represent resource allocation."""
def __init__(self, inputs: List[TensorType], model: ModelV2):
"""Input is a tensor of logits. The exponential of logits is used to
parametrize the Dirichlet distribution as all parameters need to be
positive. An arbitrary small epsilon is added to the concentration
parameters to be zero due to numerical error.
See issue #4440 for more details.
"""
self.epsilon = 1e-7
concentration = tf.exp(inputs) + self.epsilon
self.dist = tf1.distributions.Dirichlet(
concentration=concentration,
validate_args=True,
allow_nan_stats=False,
)
super().__init__(concentration, model)
@override(ActionDistribution)
def deterministic_sample(self) -> TensorType:
return tf.nn.softmax(self.dist.concentration)
@override(ActionDistribution)
def logp(self, x: TensorType) -> TensorType:
# Support of Dirichlet are positive real numbers. x is already
# an array of positive numbers, but we clip to avoid zeros due to
# numerical errors.
x = tf.maximum(x, self.epsilon)
x = x / tf.reduce_sum(x, axis=-1, keepdims=True)
return self.dist.log_prob(x)
@override(ActionDistribution)
def entropy(self) -> TensorType:
return self.dist.entropy()
@override(ActionDistribution)
def kl(self, other: ActionDistribution) -> TensorType:
return self.dist.kl_divergence(other.dist)
@override(TFActionDistribution)
def _build_sample_op(self) -> TensorType:
return self.dist.sample()
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(
action_space: gym.Space,
model_config: ModelConfigDict) -> Union[int, np.ndarray]:
return np.prod(action_space.shape)
| pcmoritz/ray-1 | rllib/models/tf/tf_action_dist.py | Python | apache-2.0 | 23,931 | [
"Gaussian"
] | 3da222ff40e7d78b1bd36fdc9c8ebbf81b07427cfa78d52c1233393ad8cf6561 |
import logging
import numpy as np
from neon.util.argparser import NeonArgparser
from neon.backends import gen_backend
from neon.initializers import Constant, Gaussian
from neon.layers import Conv, Pooling, GeneralizedCost, Affine
from neon.optimizers.optimizer import MultiOptimizer, GradientDescentMomentum
from neon.transforms import Softmax, CrossEntropyMulti, Rectlin, Misclassification
from neon.models import Model
from neon.data import ArrayIterator, MNIST
from neon.callbacks.callbacks import Callbacks
from callbacks.callbacks import TrainByStageCallback
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
parser = NeonArgparser(__doc__)
args = parser.parse_args()
be = gen_backend(backend='gpu',
batch_size=128,
datatype=np.float32)
# setup a dataset iterator
mnist = MNIST(path='../dataset/mnist')
(X_train, y_train), (X_test, y_test), nclass = mnist.load_data()
train_set = ArrayIterator(X_train, y_train, nclass=nclass, lshape=(1, 28, 28))
valid_set = ArrayIterator(X_test, y_test, nclass=nclass, lshape=(1, 28, 28))
# define model
nfilters = [20, 50, 500]
init_w = Gaussian(scale=0.01)
relu = Rectlin()
common_params = dict(init=init_w, activation=relu)
layers = [
Conv((5, 5, nfilters[0]), bias=Constant(0.1), padding=0, **common_params),
Pooling(2, strides=2, padding=0),
Conv((5, 5, nfilters[1]), bias=Constant(0.1), padding=0, **common_params),
Pooling(2, strides=2, padding=0),
Affine(nout=nfilters[2], bias=Constant(0.1), **common_params),
Affine(nout=10, bias=Constant(0.1), activation=Softmax(), init=Gaussian(scale=0.01))
]
model = Model(layers=layers)
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
# define optimizer
opt_w = GradientDescentMomentum(learning_rate=0.01, momentum_coef=0.9, wdecay=0.0005)
opt_b = GradientDescentMomentum(learning_rate=0.01, momentum_coef=0.9)
opt = MultiOptimizer({'default': opt_w, 'Bias': opt_b}, name='multiopt')
# configure callbacks
callbacks = Callbacks(model, eval_set=valid_set, metric=Misclassification(), **args.callback_args)
callbacks.add_callback(TrainByStageCallback(model, valid_set, Misclassification(), max_patience=5))
logger.info('Training ...')
model.fit(train_set, optimizer=opt, num_epochs=250, cost=cost, callbacks=callbacks)
print('Accuracy = %.2f%%' % (100. - model.eval(valid_set, metric=Misclassification()) * 100))
model.save_params('./models/mnist/mnist_cnn.pkl')
| cs-chan/fuzzyDCN | prune_neon/mnist_cnn.py | Python | bsd-3-clause | 2,431 | [
"Gaussian"
] | 65ffd6fe6f5f8afc514ac0474aa3fe6a8740f85f427a9a487f38300f81d97314 |
# -*- coding: utf-8 -*-
#
# gif_pop_psc_exp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Population rate model of generalized integrate-and-fire neurons
---------------------------------------------------------------
This script simulates a finite network of generalized integrate-and-fire
(GIF) neurons directly on the mesoscopic population level using the effective
stochastic population rate dynamics derived in the paper [1]_. The stochastic
population dynamics is implemented in the NEST model gif_pop_psc_exp. We
demonstrate this model using the example of a Brunel network of two coupled
populations, one excitatory and one inhibitory population.
Note that the population model represents the mesoscopic level
description of the corresponding microscopic network based on the
NEST model ``gif_psc_exp``.
References
~~~~~~~~~~
.. [1] Schwalger T, Degert M, Gerstner W (2017). Towards a theory of cortical columns: From spiking
neurons to interacting neural populations of finite size. PLoS Comput Biol.
https://doi.org/10.1371/journal.pcbi.1005507
"""
# Loading the necessary modules:
import numpy as np
import matplotlib.pyplot as plt
import nest
###############################################################################
# We first set the parameters of the microscopic model:
# All times given in milliseconds
dt = 0.5
dt_rec = 1.
# Simulation time
t_end = 2000.
# Parameters
size = 200
N = np.array([4, 1]) * size
M = len(N) # number of populations
# neuronal parameters
t_ref = 4. * np.ones(M) # absolute refractory period
tau_m = 20 * np.ones(M) # membrane time constant
mu = 24. * np.ones(M) # constant base current mu=R*(I0+Vrest)
c = 10. * np.ones(M) # base rate of exponential link function
Delta_u = 2.5 * np.ones(M) # softness of exponential link function
V_reset = 0. * np.ones(M) # Reset potential
V_th = 15. * np.ones(M) # baseline threshold (non-accumulating part)
tau_sfa_exc = [100., 1000.] # adaptation time constants of excitatory neurons
tau_sfa_inh = [100., 1000.] # adaptation time constants of inhibitory neurons
J_sfa_exc = [1000., 1000.] # size of feedback kernel theta
# (= area under exponential) in mV*ms
J_sfa_inh = [1000., 1000.] # in mV*ms
tau_theta = np.array([tau_sfa_exc, tau_sfa_inh])
J_theta = np.array([J_sfa_exc, J_sfa_inh])
# connectivity
J = 0.3 # excitatory synaptic weight in mV if number of input connections
# is C0 (see below)
g = 5. # inhibition-to-excitation ratio
pconn = 0.2 * np.ones((M, M))
delay = 1. * np.ones((M, M))
C0 = np.array([[800, 200], [800, 200]]) * 0.2 # constant reference matrix
C = np.vstack((N, N)) * pconn # numbers of input connections
# final synaptic weights scaling as 1/C
J_syn = np.array([[J, -g * J], [J, -g * J]]) * C0 / C
taus1_ = [3., 6.] # time constants of exc./inh. postsynaptic currents (PSC's)
taus1 = np.array([taus1_ for k in range(M)])
# step current input
step = [[20.], [20.]] # jump size of mu in mV
tstep = np.array([[1500.], [1500.]]) # times of jumps
# synaptic time constants of excitatory and inhibitory connections
tau_ex = 3. # in ms
tau_in = 6. # in ms
###############################################################################
# Simulation on the mesoscopic level
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# To directly simulate the mesoscopic population activities (i.e. generating
# the activity of a finite-size population without simulating single
# neurons), we can build the populations using the NEST model
# ``gif_pop_psc_exp``:
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
nest.resolution = dt
nest.print_time = True
nest.local_num_threads = 1
t0 = nest.biological_time
nest_pops = nest.Create('gif_pop_psc_exp', M)
C_m = 250. # irrelevant value for membrane capacity, cancels out in simulation
g_L = C_m / tau_m
params = [{
'C_m': C_m,
'I_e': mu[i] * g_L[i],
'lambda_0': c[i], # in Hz!
'Delta_V': Delta_u[i],
'tau_m': tau_m[i],
'tau_sfa': tau_theta[i],
'q_sfa': J_theta[i] / tau_theta[i], # [J_theta]= mV*ms -> [q_sfa]=mV
'V_T_star': V_th[i],
'V_reset': V_reset[i],
'len_kernel': -1, # -1 triggers automatic history size
'N': N[i],
't_ref': t_ref[i],
'tau_syn_ex': max([tau_ex, dt]),
'tau_syn_in': max([tau_in, dt]),
'E_L': 0.
} for i in range(M)]
nest_pops.set(params)
# connect the populations
g_syn = np.ones_like(J_syn) # synaptic conductance
g_syn[:, 0] = C_m / tau_ex
g_syn[:, 1] = C_m / tau_in
for i in range(M):
for j in range(M):
nest.Connect(nest_pops[j], nest_pops[i],
syn_spec={'weight': J_syn[i, j] * g_syn[i, j] * pconn[i, j],
'delay': delay[i, j]})
###############################################################################
# To record the instantaneous population rate `Abar(t)` we use a multimeter,
# and to get the population activity `A_N(t)` we use spike recorder:
# monitor the output using a multimeter, this only records with dt_rec!
nest_mm = nest.Create('multimeter')
nest_mm.set(record_from=['n_events', 'mean'], interval=dt_rec)
nest.Connect(nest_mm, nest_pops)
# monitor the output using a spike recorder
nest_sr = []
for i in range(M):
nest_sr.append(nest.Create('spike_recorder'))
nest_sr[i].time_in_steps = True
nest.Connect(nest_pops[i], nest_sr[i], syn_spec={'weight': 1., 'delay': dt})
###############################################################################
# All neurons in a given population will be stimulated with a step input
# current:
# set initial value (at t0+dt) of step current generator to zero
tstep = np.hstack((dt * np.ones((M, 1)), tstep))
step = np.hstack((np.zeros((M, 1)), step))
# create the step current devices
nest_stepcurrent = nest.Create('step_current_generator', M)
# set the parameters for the step currents
for i in range(M):
nest_stepcurrent[i].set(amplitude_times=tstep[i] + t0,
amplitude_values=step[i] * g_L[i],
origin=t0,
stop=t_end)
pop_ = nest_pops[i]
nest.Connect(nest_stepcurrent[i], pop_, syn_spec={'weight': 1., 'delay': dt})
###############################################################################
# We can now start the simulation:
nest.rng_seed = 1
t = np.arange(0., t_end, dt_rec)
A_N = np.ones((t.size, M)) * np.nan
Abar = np.ones_like(A_N) * np.nan
# simulate 1 step longer to make sure all t are simulated
nest.Simulate(t_end + dt)
data_mm = nest_mm.events
for i, nest_i in enumerate(nest_pops):
a_i = data_mm['mean'][data_mm['senders'] == nest_i.global_id]
a = a_i / N[i] / dt
min_len = np.min([len(a), len(Abar)])
Abar[:min_len, i] = a[:min_len]
data_sr = nest_sr[i].get('events', 'times')
data_sr = data_sr * dt - t0
bins = np.concatenate((t, np.array([t[-1] + dt_rec])))
A = np.histogram(data_sr, bins=bins)[0] / float(N[i]) / dt_rec
A_N[:, i] = A
###############################################################################
# and plot the activity:
plt.figure(1)
plt.clf()
plt.subplot(2, 1, 1)
plt.plot(t, A_N * 1000) # plot population activities (in Hz)
plt.ylabel(r'$A_N$ [Hz]')
plt.title('Population activities (mesoscopic sim.)')
plt.subplot(2, 1, 2)
plt.plot(t, Abar * 1000) # plot instantaneous population rates (in Hz)
plt.ylabel(r'$\bar A$ [Hz]')
plt.xlabel('time [ms]')
###############################################################################
# Microscopic ("direct") simulation
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# As mentioned above, the population model ``gif_pop_psc_exp`` directly
# simulates the mesoscopic population activities, i.e. without the need to
# simulate single neurons. On the other hand, if we want to know single
# neuron activities, we must simulate on the microscopic level. This is
# possible by building a corresponding network of ``gif_psc_exp`` neuron models:
nest.ResetKernel()
nest.resolution = dt
nest.print_time = True
nest.local_num_threads = 1
t0 = nest.biological_time
nest_pops = []
for k in range(M):
nest_pops.append(nest.Create('gif_psc_exp', N[k]))
# set single neuron properties
for i in range(M):
nest_pops[i].set(C_m=C_m,
I_e=mu[i] * g_L[i],
lambda_0=c[i],
Delta_V=Delta_u[i],
g_L=g_L[i],
tau_sfa=tau_theta[i],
q_sfa=J_theta[i] / tau_theta[i],
V_T_star=V_th[i],
V_reset=V_reset[i],
t_ref=t_ref[i],
tau_syn_ex=max([tau_ex, dt]),
tau_syn_in=max([tau_in, dt]),
E_L=0.,
V_m=0.)
# connect the populations
for i, nest_i in enumerate(nest_pops):
for j, nest_j in enumerate(nest_pops):
if np.allclose(pconn[i, j], 1.):
conn_spec = {'rule': 'all_to_all'}
else:
conn_spec = {
'rule': 'fixed_indegree', 'indegree': int(pconn[i, j] * N[j])}
nest.Connect(nest_j, nest_i,
conn_spec,
syn_spec={'weight': J_syn[i, j] * g_syn[i, j],
'delay': delay[i, j]})
###############################################################################
# We want to record all spikes of each population in order to compute the
# mesoscopic population activities `A_N(t)` from the microscopic simulation.
# We also record the membrane potentials of five example neurons:
# monitor the output using a multimeter and a spike recorder
nest_sr = []
for i, nest_i in enumerate(nest_pops):
nest_sr.append(nest.Create('spike_recorder'))
nest_sr[i].time_in_steps = True
# record all spikes from population to compute population activity
nest.Connect(nest_i, nest_sr[i], syn_spec={'weight': 1., 'delay': dt})
Nrecord = [5, 0] # for each population "i" the first Nrecord[i] neurons are recorded
nest_mm_Vm = []
for i, nest_i in enumerate(nest_pops):
nest_mm_Vm.append(nest.Create('multimeter'))
nest_mm_Vm[i].set(record_from=['V_m'], interval=dt_rec)
if Nrecord[i] != 0:
nest.Connect(nest_mm_Vm[i], nest_i[:Nrecord[i]], syn_spec={'weight': 1., 'delay': dt})
###############################################################################
# As before, all neurons in a given population will be stimulated with a
# step input current. The following code block is identical to the one for
# the mesoscopic simulation above:
# create the step current devices if they do not exist already
nest_stepcurrent = nest.Create('step_current_generator', M)
# set the parameters for the step currents
for i in range(M):
nest_stepcurrent[i].set(amplitude_times=tstep[i] + t0,
amplitude_values=step[i] * g_L[i],
origin=t0,
stop=t_end)
nest_stepcurrent[i].set(amplitude_times=tstep[i] + t0,
amplitude_values=step[i] * g_L[i],
origin=t0,
stop=t_end)
# optionally a stopping time may be added by: 'stop': sim_T + t0
pop_ = nest_pops[i]
nest.Connect(nest_stepcurrent[i], pop_, syn_spec={'weight': 1., 'delay': dt})
###############################################################################
# We can now start the microscopic simulation:
nest.rng_seed = 1
t = np.arange(0., t_end, dt_rec)
A_N = np.ones((t.size, M)) * np.nan
# simulate 1 step longer to make sure all t are simulated
nest.Simulate(t_end + dt)
###############################################################################
# Let's retrieve the data of the spike recorder and plot the activity of the
# excitatory population (in Hz):
for i in range(len(nest_pops)):
data_sr = nest_sr[i].get('events', 'times') * dt - t0
bins = np.concatenate((t, np.array([t[-1] + dt_rec])))
A = np.histogram(data_sr, bins=bins)[0] / float(N[i]) / dt_rec
A_N[:, i] = A * 1000 # in Hz
t = np.arange(dt, t_end + dt, dt_rec)
plt.figure(2)
plt.plot(t, A_N[:, 0])
plt.xlabel('time [ms]')
plt.ylabel('population activity [Hz]')
plt.title('Population activities (microscopic sim.)')
###############################################################################
# This should look similar to the population activity obtained from the
# mesoscopic simulation based on the NEST model ``gif_pop_psc_exp`` (cf. figure
# 1). Now we retrieve the data of the multimeter, which allows us to look at
# the membrane potentials of single neurons. Here we plot the voltage traces
# (in mV) of five example neurons:
voltage = []
for i in range(M):
if Nrecord[i] > 0:
senders = nest_mm_Vm[i].get('events', 'senders')
v = nest_mm_Vm[i].get('events', 'V_m')
voltage.append(
np.array([v[np.where(senders == j)] for j in set(senders)]))
else:
voltage.append(np.array([]))
f, axarr = plt.subplots(Nrecord[0], sharex=True)
for i in range(Nrecord[0]):
axarr[i].plot(voltage[0][i])
axarr[i].set_yticks((0, 15, 30))
axarr[i].set_xlabel('time [ms]')
axarr[2].set_ylabel('membrane potential [mV]')
axarr[0].set_title('5 example GIF neurons (microscopic sim.)')
###############################################################################
# Note that this plots only the subthreshold membrane potentials but not the
# spikes (as with every leaky integrate-and-fire model).
plt.show()
| niltonlk/nest-simulator | pynest/examples/gif_pop_psc_exp.py | Python | gpl-2.0 | 14,148 | [
"NEURON"
] | a9b1fbc46cf024d72dfd56222871056c795634a9f93abf6b5b92e2a84fcfc989 |
########################################################################
# $HeadURL $
# File: FTSGraph.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/05/10 20:02:32
########################################################################
""" :mod: FTSGraph
==============
.. module: FTSGraph
:synopsis: FTS graph
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
nodes are FTS sites sites and edges are routes between them
"""
__RCSID__ = "$Id: $"
# #
# @file FTSGraph.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/05/10 20:03:00
# @brief Definition of FTSGraph class.
# # imports
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.Graph import Graph, Node, Edge
# # from RSS
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
# from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getStorageElementSiteMapping, getSites, getFTS2ServersForSites
# # from DMS
from DIRAC.DataManagementSystem.Client.FTSJob import FTSJob
from DIRAC.DataManagementSystem.Client.FTSSite import FTSSite
from DIRAC.DataManagementSystem.private.FTSHistoryView import FTSHistoryView
class Site( Node ):
"""
.. class:: Site
not too much here, inherited to change the name
"""
def __init__( self, name, rwAttrs = None, roAttrs = None ):
""" c'tor """
Node.__init__( self, name, rwAttrs, roAttrs )
def __contains__( self, se ):
""" check if SE is hosted at this site """
return se in self.SEs
def __str__( self ):
""" str() op """
return "<site name='%s' SEs='%s' />" % ( self.name, ",".join( self.SEs.keys() ) )
class Route( Edge ):
"""
.. class:: Route
class representing transfers between sites
"""
def __init__( self, fromNode, toNode, rwAttrs = None, roAttrs = None ):
""" c'tor """
Edge.__init__( self, fromNode, toNode, rwAttrs, roAttrs )
@property
def isActive( self ):
""" check activity of this channel """
successRate = 100.0
attempted = self.SuccessfulFiles + self.FailedFiles
if attempted:
successRate *= self.SuccessfulFiles / attempted
return bool( successRate > self.AcceptableFailureRate )
@property
def timeToStart( self ):
""" get time to start for this channel """
if not self.isActive:
return float( "inf" )
transferSpeed = { "Files": self.FilePut,
"Throughput": self.ThroughPut }[self.SchedulingType]
waitingTransfers = { "Files" : self.WaitingFiles,
"Throughput": self.WaitingSize }[self.SchedulingType]
if transferSpeed:
return waitingTransfers / float( transferSpeed )
return 0.0
class FTS2Graph( Graph ):
"""
.. class:: FTSGraph
graph holding FTS transfers (edges) and sites (nodes)
"""
# # rss client
__rssClient = None
# # resources
__resources = None
def __init__( self,
name,
ftsHistoryViews = None,
accFailureRate = None,
accFailedFiles = None,
schedulingType = None,
maxActiveJobs = None ):
""" c'tor
:param str name: graph name
:param list ftsHistoryViews: list with FTSHistoryViews
:param float accFailureRate: acceptable failure rate
:param int accFailedFiles: acceptable failed files
:param str schedulingType: scheduling type
"""
Graph.__init__( self, name )
self.log = gLogger.getSubLogger( name, True )
self.accFailureRate = accFailureRate if accFailureRate else 0.75
self.accFailedFiles = accFailedFiles if accFailedFiles else 5
self.schedulingType = schedulingType if schedulingType else "Files"
self.maxActiveJobs = maxActiveJobs if maxActiveJobs else 50
self.initialize( ftsHistoryViews )
def initialize( self, ftsHistoryViews = None ):
""" initialize FTSGraph given FTSSites and FTSHistoryViews
:param list ftsSites: list with FTSSites instances
:param list ftsHistoryViews: list with FTSHistoryViews instances
"""
self.log.debug( "initializing FTS graph..." )
ftsSites = self.ftsSites()
if ftsSites["OK"]:
ftsSites = ftsSites["Value"]
else:
ftsSites = []
ftsHistoryViews = ftsHistoryViews if ftsHistoryViews else []
sitesDict = getStorageElementSiteMapping() # [ ftsSite.Name for ftsSite in ftsSites ] )
if not sitesDict["OK"]:
self.log.error( sitesDict["Message"] )
# raise Exception( sitesDict["Message"] )
sitesDict = sitesDict["Value"] if "Value" in sitesDict else {}
# # revert to resources helper
# sitesDict = self.resources().getEligibleResources( "Storage" )
# if not sitesDict["OK"]:
# return sitesDict
# sitesDict = sitesDict["Value"]
# # create nodes
for ftsSite in ftsSites:
rwSEsDict = dict.fromkeys( sitesDict.get( ftsSite.Name, [] ), {} )
for se in rwSEsDict:
rwSEsDict[se] = { "read": False, "write": False }
rwAttrs = { "SEs": rwSEsDict }
roAttrs = { "FTSServer": ftsSite.FTSServer,
"MaxActiveJobs": ftsSite.MaxActiveJobs }
site = Site( ftsSite.Name, rwAttrs, roAttrs )
self.log.debug( "adding site %s using FTSServer %s" % ( ftsSite.Name, ftsSite.FTSServer ) )
self.addNode( site )
for sourceSite in self.nodes():
for destSite in self.nodes():
rwAttrs = { "WaitingFiles": 0, "WaitingSize": 0,
"SuccessfulFiles": 0, "SuccessfulSize": 0,
"FailedFiles": 0, "FailedSize": 0,
"FilePut": 0.0, "ThroughPut": 0.0,
"ActiveJobs": 0, "FinishedJobs": 0 }
roAttrs = { "routeName": "%s#%s" % ( sourceSite.name, destSite.name ),
"AcceptableFailureRate": self.accFailureRate,
"AcceptableFailedFiles": self.accFailedFiles,
"SchedulingType": self.schedulingType }
route = Route( sourceSite, destSite, rwAttrs, roAttrs )
self.log.debug( "adding route between %s and %s" % ( route.fromNode.name, route.toNode.name ) )
self.addEdge( route )
for ftsHistory in ftsHistoryViews:
route = self.findRoute( ftsHistory.SourceSE, ftsHistory.TargetSE )
if not route["OK"]:
self.log.warn( "route between %s and %s not found" % ( ftsHistory.SourceSE, ftsHistory.TargetSE ) )
continue
route = route["Value"]
if ftsHistory.Status in FTSJob.INITSTATES:
route.ActiveJobs += ftsHistory.FTSJobs
route.WaitingFiles += ftsHistory.Files
route.WaitingSize += ftsHistory.Size
elif ftsHistory.Status in FTSJob.TRANSSTATES:
route.ActiveJobs += ftsHistory.FTSJobs
route.WaitingSize += ftsHistory.Completeness * ftsHistory.Size / 100.0
route.WaitingFiles += int( ftsHistory.Completeness * ftsHistory.Files / 100.0 )
elif ftsHistory.Status in FTSJob.FAILEDSTATES:
route.FinishedJobs += ftsHistory.FTSJobs
route.FailedFiles += ftsHistory.FailedFiles
route.FailedSize += ftsHistory.FailedSize
else: # # FINISHEDSTATES
route.FinishedJobs += ftsHistory.FTSJobs
route.SuccessfulFiles += ( ftsHistory.Files - ftsHistory.FailedFiles )
route.SuccessfulSize += ( ftsHistory.Size - ftsHistory.FailedSize )
route.FilePut = float( route.SuccessfulFiles - route.FailedFiles ) / FTSHistoryView.INTERVAL
route.ThroughPut = float( route.SuccessfulSize - route.FailedSize ) / FTSHistoryView.INTERVAL
self.updateRWAccess()
self.log.debug( "init done!" )
def rssClient( self ):
""" RSS client getter """
if not self.__rssClient:
self.__rssClient = ResourceStatus()
return self.__rssClient
# def resources( self ):
# """ resource helper getter """
# if not self.__resources:
# self.__resources = Resources()
# return self.__resources
def updateRWAccess( self ):
""" get RSS R/W for :seList:
:param list seList: SE list
"""
self.log.debug( "updateRWAccess: updating RW access..." )
for site in self.nodes():
seList = site.SEs.keys()
rwDict = dict.fromkeys( seList )
for se in rwDict:
rwDict[se] = { "read": False, "write": False }
for se in seList:
rwDict[se]["read"] = self.rssClient().isUsableStorage( se, 'ReadAccess' )
rwDict[se]["write"] = self.rssClient().isUsableStorage( se, 'WriteAccess' )
self.log.debug( "Site '%s' SE '%s' read %s write %s " % ( site.name, se,
rwDict[se]["read"], rwDict[se]["write"] ) )
site.SEs = rwDict
return S_OK()
# Seems useless
# def findSiteForSE( self, se ):
# """ return FTSSite for a given SE """
# for node in self.nodes():
# if se in node:
# return S_OK( node )
# return S_ERROR( "StorageElement %s not found" % se )
def findRoute( self, fromSE, toSE ):
""" find route between :fromSE: and :toSE: """
for edge in self.edges():
if fromSE in edge.fromNode.SEs and toSE in edge.toNode.SEs:
return S_OK( edge )
return S_ERROR( "FTSGraph: unable to find route between '%s' and '%s'" % ( fromSE, toSE ) )
def ftsSites( self ):
""" get fts site list """
sites = getSites()
if not sites["OK"]:
return sites
sites = sites["Value"]
ftsServers = getFTS2ServersForSites( sites )
if not ftsServers["OK"]:
return ftsServers
ftsServers = ftsServers["Value"]
ftsSites = []
for site, ftsServerURL in ftsServers.items():
ftsSite = FTSSite()
ftsSite.Name = site
ftsSite.FTSServer = ftsServerURL
# # should be read from CS as well
ftsSite.MaxActiveJobs = self.maxActiveJobs
ftsSites.append( ftsSite )
return S_OK( ftsSites )
| miloszz/DIRAC | DataManagementSystem/private/FTS2/FTS2Graph.py | Python | gpl-3.0 | 9,889 | [
"DIRAC"
] | 0505b53687e757598e4ebaccf4ed44c64dd0f990764d0fffeba744609073e78a |
# -*- coding: utf-8 -*-
#
# multimeter_file.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Multimeter to file example
--------------------------
This file demonstrates recording from an `iaf_cond_alpha` neuron using a
multimeter and writing data to file.
'''
'''
First, the necessary modules for simulation and plotting are imported.
The simulation kernel is put back to its initial state using `ResetKernel`.
'''
import nest
import numpy as np
import pylab as pl
nest.ResetKernel()
'''
With `SetKernelStatus`, global properties of the simulation kernel can be
specified. The following properties are related to writing to file:
* `overwrite_files` is set to True to permit overwriting of an existing file.
* `data_path` is the path to which all data is written. It is given relative to
the current working directory.
* 'data_prefix' allows to specify a common prefix for all data files.
'''
nest.SetKernelStatus({"overwrite_files": True,
"data_path": "",
"data_prefix": ""})
'''
For illustration, the recordables of the `iaf_cond_alpha` neuron model are
displayed. This model is an implementation of a spiking neuron using
integrate-and-fire dynamics with conductance-based synapses. Incoming spike
events induce a post-synaptic change of conductance modelled by an alpha
function.
'''
print("iaf_cond_alpha recordables: {0}".format( \
nest.GetDefaults("iaf_cond_alpha")["recordables"]))
'''
A neuron, a multimeter as recording device and two spike generators for
excitatory and inhibitory stimulation are instantiated. The command `Create`
expects a model type and, optionally, the desired number of nodes and a
dictionary of parameters to overwrite the default values of the model.
* For the neuron, the rise time of the excitatory synaptic alpha function
in ms `tau_syn_ex` and the reset potential of the membrane in mV `V_reset`
are specified.
* For the multimeter, the time interval for recording in ms `interval` and a
selection of measures to record (the membrane voltage in mV `V_m` and the
excitatory `g_ex` and inhibitoy `g_in` synaptic conductances in nS) are set.
In addition, more parameters can be modified for writing to file:
- `withgid` is set to True to record the global id of the observed node(s).
(default: False).
- `to_file` indicates whether to write the recordings to file and is set
to True.
- `label` specifies an arbitrary label for the device. It is used instead of
the name of the model in the output file name.
* For the spike generators, the spike times in ms `spike_times` are given
explicitly.
'''
n = nest.Create("iaf_cond_alpha",
params = {"tau_syn_ex": 1.0, "V_reset": -70.0})
m = nest.Create("multimeter",
params = {"interval": 0.1,
"record_from": ["V_m", "g_ex", "g_in"],
"withgid": True,
"to_file": True,
"label": "my_multimeter"})
s_ex = nest.Create("spike_generator",
params = {"spike_times": np.array([10.0, 20.0, 50.0])})
s_in = nest.Create("spike_generator",
params = {"spike_times": np.array([15.0, 25.0, 55.0])})
'''
Next, the spike generators are connected to the neuron with `Connect`. Synapse
specifications can be provided in a dictionary. In this example of a
conductance-based neuron, the synaptic weight `weight` is given in nS. Note that
it is positive for excitatory and negative for inhibitory connections.
'''
nest.Connect(s_ex, n, syn_spec={"weight": 40.0})
nest.Connect(s_in, n, syn_spec={"weight": -20.0})
nest.Connect(m, n)
'''
A network simulation with a duration of 100 ms is started with `Simulate`.
'''
nest.Simulate(100.)
'''
After the simulation, the recordings are obtained from the multimeter via the
key `events` of the status dictionary accessed by `GetStatus`. `times` indicates
the recording times stored for each data point. They are recorded if the
parameter `withtime` of the multimeter is set to True which is the default case.
'''
events = nest.GetStatus(m)[0]["events"]
t = events["times"];
'''
Finally, the time courses of the membrane voltage and the synaptic
conductance are displayed.
'''
pl.clf()
pl.subplot(211)
pl.plot(t, events["V_m"])
pl.axis([0, 100, -75, -53])
pl.ylabel("membrane potential (mV)")
pl.subplot(212)
pl.plot(t, events["g_ex"], t, events["g_in"])
pl.axis([0, 100, 0, 45])
pl.xlabel("time (ms)")
pl.ylabel("synaptic conductance (nS)")
pl.legend(("g_exc", "g_inh"))
| kristoforcarlson/nest-simulator-fork | pynest/examples/multimeter_file.py | Python | gpl-2.0 | 5,200 | [
"NEURON"
] | 135ecea1e15349c01ec1e8d1fcb9f8ba9b01c24b4af17a774a2185dd4c93c9dc |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
One repository to update them all
On mbed.org the mbed SDK is split up in multiple repositories, this script takes
care of updating them all.
"""
import sys
from copy import copy
from os import walk, remove, makedirs
from os.path import join, abspath, dirname, relpath, exists, isfile
from shutil import copyfile
from optparse import OptionParser
import re
import string
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from tools.settings import MBED_ORG_PATH, MBED_ORG_USER, BUILD_DIR
from tools.paths import *
from tools.utils import run_cmd
MBED_URL = "mbed.org"
MBED_USER = "mbed_official"
changed = []
push_remote = True
quiet = False
commit_msg = ''
# Code that does have a mirror in the mbed SDK
# Tuple data: (repo_name, list_of_code_dirs, [team])
# team is optional - if not specified, the code is published under mbed_official
OFFICIAL_CODE = (
("mbed-dev" , MBED_BASE),
("mbed-rtos", RTOS),
("mbed-dsp" , DSP),
("mbed-rpc" , MBED_RPC),
("lwip" , LWIP_SOURCES+"/lwip"),
("lwip-sys", LWIP_SOURCES+"/lwip-sys"),
("Socket" , LWIP_SOURCES+"/Socket"),
("lwip-eth" , ETH_SOURCES+"/lwip-eth"),
("EthernetInterface", ETH_SOURCES+"/EthernetInterface"),
("USBDevice", USB),
("USBHost" , USB_HOST),
("CellularModem", CELLULAR_SOURCES),
("CellularUSBModem", CELLULAR_USB_SOURCES),
("UbloxUSBModem", UBLOX_SOURCES),
("UbloxModemHTTPClientTest", [TEST_DIR+"/net/cellular/http/common", TEST_DIR+"/net/cellular/http/ubloxusb"]),
("UbloxModemSMSTest", [TEST_DIR+"/net/cellular/sms/common", TEST_DIR+"/net/cellular/sms/ubloxusb"]),
("FATFileSystem", FAT_FS, "mbed-official"),
)
# Code that does have dependencies to libraries should point to
# the latest revision. By default, they point to a specific revision.
CODE_WITH_DEPENDENCIES = (
# Libraries
"EthernetInterface",
# RTOS Examples
"rtos_basic",
"rtos_isr",
"rtos_mail",
"rtos_mutex",
"rtos_queue",
"rtos_semaphore",
"rtos_signals",
"rtos_timer",
# Net Examples
"TCPEchoClient",
"TCPEchoServer",
"TCPSocket_HelloWorld",
"UDPSocket_HelloWorld",
"UDPEchoClient",
"UDPEchoServer",
"BroadcastReceive",
"BroadcastSend",
# mbed sources
"mbed-src-program",
)
# A list of regular expressions that will be checked against each directory
# name and skipped if they match.
IGNORE_DIRS = (
)
IGNORE_FILES = (
'COPYING',
'\.md',
"\.lib",
"\.bld"
)
def ignore_path(name, reg_exps):
for r in reg_exps:
if re.search(r, name):
return True
return False
class MbedRepository:
@staticmethod
def run_and_print(command, cwd):
stdout, _, _ = run_cmd(command, work_dir=cwd, redirect=True)
print(stdout)
def __init__(self, name, team = None):
self.name = name
self.path = join(MBED_ORG_PATH, name)
if team is None:
self.url = "http://" + MBED_URL + "/users/" + MBED_USER + "/code/%s/"
else:
self.url = "http://" + MBED_URL + "/teams/" + team + "/code/%s/"
if not exists(self.path):
# Checkout code
if not exists(MBED_ORG_PATH):
makedirs(MBED_ORG_PATH)
self.run_and_print(['hg', 'clone', self.url % name], cwd=MBED_ORG_PATH)
else:
# Update
self.run_and_print(['hg', 'pull'], cwd=self.path)
self.run_and_print(['hg', 'update'], cwd=self.path)
def publish(self):
# The maintainer has to evaluate the changes first and explicitly accept them
self.run_and_print(['hg', 'addremove'], cwd=self.path)
stdout, _, _ = run_cmd(['hg', 'status'], work_dir=self.path)
if stdout == '':
print "No changes"
return False
print stdout
if quiet:
commit = 'Y'
else:
commit = raw_input(push_remote and "Do you want to commit and push? Y/N: " or "Do you want to commit? Y/N: ")
if commit == 'Y':
args = ['hg', 'commit', '-u', MBED_ORG_USER]
if commit_msg:
args = args + ['-m', commit_msg]
self.run_and_print(args, cwd=self.path)
if push_remote:
self.run_and_print(['hg', 'push'], cwd=self.path)
return True
# Check if a file is a text file or a binary file
# Taken from http://code.activestate.com/recipes/173220/
text_characters = "".join(map(chr, range(32, 127)) + list("\n\r\t\b"))
_null_trans = string.maketrans("", "")
def is_text_file(filename):
block_size = 1024
def istext(s):
if "\0" in s:
return 0
if not s: # Empty files are considered text
return 1
# Get the non-text characters (maps a character to itself then
# use the 'remove' option to get rid of the text characters.)
t = s.translate(_null_trans, text_characters)
# If more than 30% non-text characters, then
# this is considered a binary file
if float(len(t))/len(s) > 0.30:
return 0
return 1
with open(filename) as f:
res = istext(f.read(block_size))
return res
# Return the line ending type for the given file ('cr' or 'crlf')
def get_line_endings(f):
examine_size = 1024
try:
tf = open(f, "rb")
lines, ncrlf = tf.readlines(examine_size), 0
tf.close()
for l in lines:
if l.endswith("\r\n"):
ncrlf = ncrlf + 1
return 'crlf' if ncrlf > len(lines) >> 1 else 'cr'
except:
return 'cr'
# Copy file to destination, but preserve destination line endings if possible
# This prevents very annoying issues with huge diffs that appear because of
# differences in line endings
def copy_with_line_endings(sdk_file, repo_file):
if not isfile(repo_file):
copyfile(sdk_file, repo_file)
return
is_text = is_text_file(repo_file)
if is_text:
sdk_le = get_line_endings(sdk_file)
repo_le = get_line_endings(repo_file)
if not is_text or sdk_le == repo_le:
copyfile(sdk_file, repo_file)
else:
print "Converting line endings in '%s' to '%s'" % (abspath(repo_file), repo_le)
f = open(sdk_file, "rb")
data = f.read()
f.close()
f = open(repo_file, "wb")
data = data.replace("\r\n", "\n") if repo_le == 'cr' else data.replace('\n','\r\n')
f.write(data)
f.close()
def visit_files(path, visit):
for root, dirs, files in walk(path):
# Ignore hidden directories
for d in copy(dirs):
full = join(root, d)
if d.startswith('.'):
dirs.remove(d)
if ignore_path(full, IGNORE_DIRS):
print "Skipping '%s'" % full
dirs.remove(d)
for file in files:
if ignore_path(file, IGNORE_FILES):
continue
visit(join(root, file))
def update_repo(repo_name, sdk_paths, team_name):
repo = MbedRepository(repo_name, team_name)
# copy files from mbed SDK to mbed_official repository
def visit_mbed_sdk(sdk_file):
repo_file = join(repo.path, relpath(sdk_file, sdk_path))
repo_dir = dirname(repo_file)
if not exists(repo_dir):
makedirs(repo_dir)
copy_with_line_endings(sdk_file, repo_file)
for sdk_path in sdk_paths:
visit_files(sdk_path, visit_mbed_sdk)
# remove repository files that do not exist in the mbed SDK
def visit_repo(repo_file):
for sdk_path in sdk_paths:
sdk_file = join(sdk_path, relpath(repo_file, repo.path))
if exists(sdk_file):
break
else:
remove(repo_file)
print "remove: %s" % repo_file
visit_files(repo.path, visit_repo)
if repo.publish():
changed.append(repo_name)
def update_code(repositories):
for r in repositories:
repo_name, sdk_dir = r[0], r[1]
team_name = r[2] if len(r) == 3 else None
print '\n=== Updating "%s" ===' % repo_name
sdk_dirs = [sdk_dir] if type(sdk_dir) != type([]) else sdk_dir
update_repo(repo_name, sdk_dirs, team_name)
def update_single_repo(repo):
repos = [r for r in OFFICIAL_CODE if r[0] == repo]
if not repos:
print "Repository '%s' not found" % repo
else:
update_code(repos)
def update_dependencies(repositories):
for repo_name in repositories:
print '\n=== Updating "%s" ===' % repo_name
repo = MbedRepository(repo_name)
# point to the latest libraries
def visit_repo(repo_file):
with open(repo_file, "r") as f:
url = f.read()
with open(repo_file, "w") as f:
f.write(url[:(url.rindex('/')+1)])
visit_files(repo.path, visit_repo, None, MBED_REPO_EXT)
if repo.publish():
changed.append(repo_name)
def update_mbed():
update_repo("mbed", [join(BUILD_DIR, "mbed")], None)
def do_sync(options):
global push_remote, quiet, commit_msg, changed
push_remote = not options.nopush
quiet = options.quiet
commit_msg = options.msg
chnaged = []
if options.code:
update_code(OFFICIAL_CODE)
if options.dependencies:
update_dependencies(CODE_WITH_DEPENDENCIES)
if options.mbed:
update_mbed()
if options.repo:
update_single_repo(options.repo)
if changed:
print "Repositories with changes:", changed
return changed
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", "--code",
action="store_true", default=False,
help="Update the mbed_official code")
parser.add_option("-d", "--dependencies",
action="store_true", default=False,
help="Update the mbed_official code dependencies")
parser.add_option("-m", "--mbed",
action="store_true", default=False,
help="Release a build of the mbed library")
parser.add_option("-n", "--nopush",
action="store_true", default=False,
help="Commit the changes locally only, don't push them")
parser.add_option("", "--commit_message",
action="store", type="string", default='', dest='msg',
help="Commit message to use for all the commits")
parser.add_option("-r", "--repository",
action="store", type="string", default='', dest='repo',
help="Synchronize only the given repository")
parser.add_option("-q", "--quiet",
action="store_true", default=False,
help="Don't ask for confirmation before commiting or pushing")
(options, args) = parser.parse_args()
do_sync(options)
| jeremybrodt/mbed | tools/synch.py | Python | apache-2.0 | 11,435 | [
"VisIt"
] | 435e648e6bdc8c52ba2beeee36ba4ee95fd5c330ce12ba3c62b56e5af5210e69 |
#!/usr/bin/python
# resultsFile is a library which allows to read output files of quantum
# chemistry codes and write input files.
# Copyright (C) 2007 Anthony SCEMAMA
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Anthony Scemama
# LCPQ - IRSAMC
# Universite Paul Sabatier
# 118, route de Narbonne
# 31062 Toulouse Cedex 4
# scemama@irsamc.ups-tlse.fr
import include
eval(include.code)
import struct
import re
xmvbFile_defined_vars = [ "date", "version", "machine", "memory", "disk",\
"cpu_time", "author", "title", "units", "methods", "options", \
"spin_restrict", "conv_threshs", "energies", \
"ee_pot_energies", \
"Ne_pot_energies", "pot_energies", \
"kin_energies", "point_group", "num_elec", \
"charge", "multiplicity","nuclear_energy","dipole","geometry",\
"basis","mo_sets","mo_types","mulliken_mo","mulliken_ao",\
"mulliken_atom","lowdin_ao", "mulliken_atom","lowdin_atom",\
"two_e_int_ao", "determinants", "num_alpha", "num_beta",\
"closed_mos", "active_mos", "virtual_mos", \
"determinants_mo_type", "det_coefficients", \
"csf_mo_type", "csf_coefficients", "symmetries", "occ_num", \
"csf", "num_states"]
class xmvbFile(resultsFile):
""" Class defining the xmvb file.
"""
local_vars = list(local_vars)
defined_vars = list(xmvbFile_defined_vars)
def get_options(self):
if self._machine is None:
self.find_string("\\")
pos = self._pos
self.find_next_string("\\@")
end = self._pos
buffer = ""
self._options = []
for line in self.text[pos:end+1]:
buffer += line[1:].replace('\n','')
buffer = buffer.split('\\\\')
for l in buffer:
self._options.append(l.split('\\'))
self._options.pop()
return self._options
def get_machine(self):
if self._machine is None:
self._machine = self.options[0][2][5:].lower()
return self._machine
def get_version(self):
if self._version is None:
self._version = self.options[4][0].split('=')[1]
return self._version
def get_author(self):
if self._author is None:
self._author = self.options[0][7].lower()
return self._author
def get_charge(self):
if self._charge is None:
self._charge = float(self.options[3][0].split(',')[0])
return self._charge
def get_disk(self):
if self._disk is None:
try:
self.find_string("File lengths")
except IndexError:
return None
pos = self._pos
line = self.text[pos].split()
disk = 0
for i in line[4::2]:
disk += float(i)
disk = disk/1000.
if disk > 1.:
self._disk = str(disk)+" Gb"
else:
disk *= 1000.
if disk > 1.:
self._disk = str(disk)+" Mb"
else:
disk *= 1000.
self._disk = str(disk)+" kb"
return self._disk
def get_memory(self):
if self._memory is None:
try:
self.find_string("Leave Link")
except IndexError:
return None
pos = self._pos
line = self.text[pos].split()
memory = float(line[10])*8. / 1000000000.
if memory > 1.:
self._memory = str(memory)+" Gb"
else:
memory *= 1000.
if memory > 1.:
self._memory = str(memory)+" Mb"
else:
memory *= 1000.
self._memory = str(memory)+" kb"
return self._memory
def get_symmetries(self):
if self._symmetries is None:
try:
self.find_string("There are")
except IndexError:
return None
pos = self._pos
begin = pos
try:
self.find_next_string("Integral")
except IndexError:
return None
end = self._pos-1
sym = []
for k in range(begin,end):
buffer = self.text[k].split()
sym.append([buffer[8],int(buffer[2])])
self._symmetries = sym
return self._symmetries
def get_units(self):
if self._units is None:
try:
self.find_string("Coordinates")
except IndexError:
return None
pos = self._pos
units = self.text[pos].split()[4][1:-1]
if units != 'Angstroms':
self._units = 'BOHR'
else:
self._units = 'ANGS'
return self._units
def get_methods(self):
if self._methods is None:
methods = []
methods.append(self.options[0][4])
self._methods = methods
return self._methods
def get_spin_restrict(self):
if self._spin_restrict is None:
method = self.methods[0]
self._spin_restrict = True
if method == 'UHF': self._spin_restrict = False
return self._spin_restrict
def get_conv_threshs(self):
if self._conv_threshs is None:
self._conv_threshs = []
for m in self.methods:
if m == 'RHF' or m == 'UHF' or m == 'ROHF':
self.find_string("SCF Done")
pos = self._pos + 1
self._conv_threshs.append(float(self.text[pos].split()[2]))
if m == 'CASSCF':
self.find_string("Enter MCSCF program")
self.find_next_string("USED ACCURACY IN CHECKING CONVEGERGENCE")
pos = self._pos
self._conv_threshs.append(float(self.text[pos].split('=')[1]))
if self._conv_threshs == []:
self._conv_threshs = None
return self._conv_threshs
def get_ee_pot_energies(self):
if self._ee_pot_energies is None:
self._ee_pot_energies = []
for i,e in enumerate(self.kin_energies):
self._ee_pot_energies.append(self.energies[i]\
-self.nuclear_energy\
-self.kin_energies[i]\
-self.Ne_pot_energies[i])
return self._ee_pot_energies
def get_Ne_pot_energies(self):
if self._Ne_pot_energies is None:
self.find_string("N-N")
pos = self._pos
self._Ne_pot_energies = [float(self.text[pos].replace('=','= ').split()[3])]
return self._Ne_pot_energies
def get_point_group(self):
if self._point_group is None:
self._point_group = self.options[4][-1].split()[0].split('=')[1]
return self._point_group
def get_geometry(self):
if self._geometry is None:
self._geometry = []
self._pos = 0
pos=0
try:
while True:
pos = self._pos
self.find_next_string("Number Number Type")
self._pos += 1
except IndexError:
pass
pos +=1
self._pos=pos
self.find_next_string("-----")
end = self._pos
while pos<end:
temp = atom()
buffer = self.text[pos].split()
temp.charge = float(buffer[1])
temp.coord = (float(buffer[3]), float(buffer[4]), float(buffer[5]))
self._geometry.append(temp)
pos += 1
for i,line in enumerate(self.options[3][1:]):
buffer = line.split(',')
self._geometry[i].name = buffer[0]
# try:
# b = self.basis
# for f in b:
# for at in self._geometry:
# if f.center is at.coord:
# at.basis.append(f)
# except IndexError:
# pass
return self._geometry
def get_basis(self):
if self._basis is None:
gfprint=False
gfinput=False
buffer = self.options[1][0].split()
if 'GFPRINT' in buffer: gfprint=True
elif 'GFINPUT' in buffer: gfinput=True
if gfprint:
Polar=False
try:
self.find_string("Standard basis")
except:
self.find_string("General basis")
pos = self._pos
if "5D" in self.text[pos]: Polar=True
try:
self.find_next_string("AO basis set:")
pos = self._pos
except IndexError:
return None
self.find_string("Integral buffers")
end = self._pos
doLoop=True
while (doLoop):
try:
self.find_prev_string("There are")
end = self._pos
except:
doLoop = False
pos += 1
basis_read = []
line = self.text[pos].split()
iatom=0
atom = line[1]
while pos < end:
if line[0] == 'Atom':
index = int(line[3])
sym = line[4]
nfunc = int(line[5])
if atom != line[1]:
iatom += 1
atom = line[1]
bf = []
pos+=1
line = self.text[pos].split()
for k in range(nfunc):
expo = float(line[0].replace('D','E'))
coef = float(line[1].replace('D','E'))
if sym == "SP":
coef2 = float(line[2])
bf.append( [expo,coef,coef2] )
else:
bf.append( [expo,coef] )
pos += 1
line = self.text[pos].split()
if len(bf) > 0:
basis_read.append( [index,sym,bf,iatom] )
else:
print "GFPRINT should be present in the gaussian keywords."
return None
Nmax = basis_read[len(basis_read)-1][0]
basis = [None for i in range(Nmax)]
for b in basis_read:
basis[b[0]-1] = [b[1],b[2],b[3]]
NotNone = 0
ReadNone = False
for i in range(len(basis)-1,-1,-1):
if basis[i] == None:
ReadNone = True
basis[i] = list(basis[i+NotNone])
else:
if ReadNone:
NotNone = 0
ReadNone = False
NotNone += 1
k=0
while k<len(basis):
if basis[k][0] == "S":
mylist = []
elif basis[k][0] == "P":
mylist = [ "X", "Y", "Z" ]
elif basis[k][0] == "SP":
mylist = [ "S", "X", "Y", "Z" ]
elif basis[k][0] == "D":
if not Polar:
mylist = [ "XX", "YY", "ZZ", "XY", "XZ", "YZ" ]
else:
mylist = [ "D0", "D1+", "D1-", "D2+", "D2-" ]
elif basis[k][0] == "F":
if not Polar:
mylist = [ "XXX", "YYY", "ZZZ", "YYX", "XXY", "XXZ", "ZZX", "ZZY",
"YYZ", "XYZ" ]
else:
mylist = [ "F0", "F1+", "F1-", "F2+", "F2-", "F3+", "F3-" ]
elif basis[k][0] == "G":
if not Polar:
mylist = [ "ZZZZ", "ZZZY", "YYZZ", "YYYZ", "YYYY", "ZZZX",
"ZZXY", "YYXZ","YYYX", "XXZZ", "XXYZ",
"XXYY", "XXXZ","XXXY", "XXXX" ]
else:
mylist = [ "G0", "G1+", "G1-", "G2+", "G2+", "G3+", "G3-",
"G4+", "G4-" ]
#elif basis[k][0] == "H":
# mylist = [ "XXXXX", "YYYYY", "ZZZZZ", "XXXXY", "XXXXZ", "YYYYX", \
# "YYYYZ", "ZZZZX", "ZZZZY", "XXXYY", "XXXZZ", \
# "YYYXX", "YYYZZ", "ZZZXX", "ZZZYY", "XXXYZ", \
# "YYYXZ", "ZZZXY", "XXYYZ", "XXZZY", "YYZZX" ]
#elif basis[k][0] == "I":
# mylist = [ "XXXXXX", "YYYYYY", "ZZZZZZ", "XXXXXY", "XXXXXZ", \
# "YYYYYX", "YYYYYZ", "ZZZZZX", "ZZZZZY", "XXXXYY", \
# "XXXXZZ", "YYYYXX", "YYYYZZ", "ZZZZXX", "ZZZZYY", \
# "XXXXYZ", "YYYYXZ", "ZZZZXY", "XXXYYY", "XXXZZZ", \
# "YYYZZZ", "XXXYYZ", "XXXZZY", "YYYXXZ", "YYYZZX", \
# "ZZZXXY", "ZZZYYX" ]
mylist = map(normalize_basis_name,mylist)
for i in mylist[:-1]:
basis[k][0] = i
basis.insert(k,list(basis[k]))
k+=1
for i in mylist[-1:]:
basis[k][0] = i
k+=1
self._basis = []
for buffer in basis:
contr = contraction()
for c in buffer[1]:
gauss = gaussian()
atom = self.geometry[int(buffer[2])]
gauss.center = atom.coord
gauss.expo = c[0]
gauss.sym = buffer[0]
if len(c) == 3:
if gauss.sym == "S":
contr.append(c[1],gauss)
else:
contr.append(c[2],gauss)
else:
contr.append(c[1],gauss)
self._basis.append(contr)
return self._basis
def get_mo_types(self):
if self._mo_types is None:
self.get_mo_sets()
return self._mo_types
def get_mulliken_mo(self):
if self._mulliken_mo is None:
pass
return self._mulliken_mo
def get_mulliken_ao(self):
if self._mulliken_ao is None:
pass
return self._mulliken_ao
def get_lowdin_ao(self):
if self._lowdin_ao is None:
pass
return self._lowdin_ao
def get_mulliken_atom(self):
if self._mulliken_atom is None:
try:
self.find_string("Mulliken atomic charges")
except IndexError:
return None
self._pos += 2
pos = self._pos
self.find_next_string("Sum of Mulliken")
end = self._pos
line = self.text[pos].split()
vm = orbital()
vm.set = "Mulliken_Atom"
vm.eigenvalue = 0.
while pos < end:
value = float(line[2])
vm.vector.append(value)
vm.eigenvalue += value
pos += 1
line = self.text[pos].split()
self._mulliken_atom = vm
return self._mulliken_atom
def get_lowdin_atom(self):
if self._lowdin_atom is None:
pass
return self._lowdin_atom
def get_two_e_int_ao(self):
if self._two_e_int_ao is None:
pass
return self._two_e_int_ao
def get_closed_mos(self):
if self._closed_mos is None:
result = []
maxmo = len(self.mo_sets[self.determinants_mo_type])
for orb in range(maxmo):
present = True
for det in self.determinants:
for spin_det in det:
if orb not in det[spin_det]: present = False
if present: result.append(orb)
self._closed_mos = result
return self._closed_mos
def get_virtual_mos(self):
if self._virtual_mos is None:
result = []
minmo = len(self.closed_mos)
maxmo = len(self.mo_sets[self.determinants_mo_type])
for orb in range(minmo,maxmo):
present = False
for det in self.determinants:
for spin_det in det:
if orb in det[spin_det]: present = True
if not present: result.append(orb)
self._virtual_mos = result
return self._virtual_mos
def get_active_mos(self):
if self._active_mos is None:
cl = self.closed_mos
vi = self.virtual_mos
maxmo = len(self.mo_sets[self.determinants_mo_type])
result = []
for i in range(maxmo):
present = i in cl or i in vi
if not present:
result.append(i)
self._active_mos = result
return self._active_mos
def get_occ_num(self):
if self._occ_num is None:
if self.mulliken_mo is not None:
occ = {}
for motype in self.mo_types:
occ[motype] = [ mo.eigenvalue for mo in self.mulliken_mo ]
if occ != {}:
self._occ_num = occ
return self._occ_num
def get_date(self):
if self._date is None:
self.find_string("Job started at")
pos = self._pos
self._date = self.text[pos][16:]
return self._date
def get_multiplicity(self):
if self._multiplicity is None:
self.find_string("nmul=")
pos = self._pos
self._multiplicity = int(self.text[pos].split('=')[-1])
return self._multiplicity
def get_num_elec(self):
if self._num_elec is None:
self.find_string("nelectron=")
pos = self._pos
self._num_elec= int(self.text[pos].split('=')[1].split()[0])
return self._num_elec
def get_nuclear_energy(self):
if self._nuclear_energy is None:
self.find_string("Nuclear Repulsion Energy:")
pos = self._pos
self._nuclear_energy = float(self.text[pos].split(':')[1])
return self._nuclear_energy
def get_title(self):
if self._title is None:
self.find_string("End of Input")
pos = self._pos+3
self._title = self.text[pos].strip()
return self._title
def get_cpu_time(self):
if self._cpu_time is None:
try:
self.find_string("Cpu for the Job:")
except IndexError:
return None
pos = self._pos
self._cpu_time = self.text[pos].split(':')[1].split()[0]+' s'
return self._cpu_time
def get_dipole(self):
if self._dipole is None:
self.find_string("Dipole moment")
pos = self._pos+2
line = self.text[pos].split()
self._dipole = []
self._dipole.append(float(line[1]))
self._dipole.append(float(line[3]))
self._dipole.append(float(line[5]))
return self._dipole
def get_energies(self):
if self._energies is None:
self._energies = []
self.find_string("Total Energy:")
pos = self._pos
self._energies.append(float(self.text[pos].split(':')[1]))
return self._energies
def get_pot_energies(self):
if self._pot_energies is None:
self._pot_energies = []
self.find_string("Potential energy:")
pos = self._pos
self._pot_energies.append(float(self.text[pos].split(':')[1]))
return self._pot_energies
def get_kin_energies(self):
if self._kin_energies is None:
self._kin_energies = []
self.find_string("Kinetic energy:")
pos = self._pos
self._kin_energies.append(float(self.text[pos].split(':')[1]))
return self._kin_energies
def get_mo_sets(self):
if self._mo_sets is None:
self._mo_types = ['VB']
posend = {}
self.find_string("norb=")
norb = int(self.text[self._pos].split("norb=")[1].split()[0])
self.find_string("OPTIMIZED ORBITALS")
pos = self._pos+3
iorb=0
vectors = []
while(iorb<norb):
lorb = len(self.text[pos].split())
pos += 1
begin = pos
for l in range(lorb):
iorb+=1
pos = begin
line = self.text[pos].split()
v = orbital()
v.set = self.mo_types[0]
v.basis = None
v.eigenvalue = float(iorb)
while len(line) > 0:
v.vector.append(float(line[l+1]))
pos += 1
line = self.text[pos].split()
vectors.append(v)
self._mo_sets = {}
self._mo_sets['VB'] = vectors
return self._mo_sets
def get_num_alpha(self):
if self._num_alpha is None:
self._num_alpha = self.num_elec/2 + (self.multiplicity-1)/2
return self._num_alpha
def get_num_beta(self):
if self._num_beta is None:
self._num_beta = self.num_elec/2 - (self.multiplicity-1)/2
return self._num_beta
def get_determinants_mo_type(self):
if self._determinants_mo_type is None:
self._determinants_mo_type = self.mo_types[-1]
return self._determinants_mo_type
def get_csf_mo_type(self):
if self._csf_mo_type is None:
self._csf_mo_type = self.determinants_mo_type
return self._csf_mo_type
def get_determinants(self):
if self._determinants is None:
determinants = []
if self.csf is not None:
for csf in self.csf:
for new_det in csf.determinants:
determinants.append(new_det)
else:
pass
if determinants != []:
self._determinants_mo_type = self.mo_types[-1]
self._determinants = determinants
return self._determinants
def get_csf(self):
if self._csf is None:
csf = []
csf_coefficients = []
self.find_string('COEFFICIENTS OF DETERMINANTS')
self.find_next_string('1')
pos = self._pos
buffer = self.text[pos]
while buffer.strip() != "":
tempcsf_a = []
tempcsf_b = []
buffer = self.text[pos]
coef = float(buffer.split()[1])
ii=0
while ii < self.num_elec:
buffer = buffer[24:].split()
for i in buffer:
ii+=1
if ii <= self.num_alpha:
tempcsf_a.append(int(i)-1)
else:
tempcsf_b.append(int(i)-1)
pos += 1
buffer = self.text[pos]
this_csf = CSF()
this_csf.append(1.,tempcsf_a,tempcsf_b)
csf.append(this_csf)
csf_coefficients.append([coef])
if csf != []:
self._csf = csf
self._csf_coefficients = csf_coefficients
return self._csf
def get_det_coefficients(self):
if self._det_coefficients is None:
if self.csf is not None:
self._det_coefficients = []
csf = self.csf
vector = []
for state_coef in self.csf_coefficients:
for i,c in enumerate(state_coef):
for d in csf[i].coefficients:
vector.append(c*d)
self._det_coefficients.append(vector)
return self._det_coefficients
def get_csf_coefficients(self):
if self._csf_coefficients is None:
self.get_csf()
return self._csf_coefficients
def get_num_states(self):
if self._num_states is None:
self._num_states=1
return self._num_states
to_remove = []
for i, j in local_vars:
if i in resultsFile_defined_vars:
to_remove.append( (i,j) )
for i in to_remove:
local_vars.remove(i)
for i, j in local_vars:
if i not in defined_vars:
exec build_get_funcs(i) in locals()
exec build_property(i,j) in locals()
del to_remove, i, j
fileTypes.insert(0,xmvbFile)
if __name__ == '__main__':
main(xmvbFile)
###### END #####
| scemama/resultsFile | resultsFile/Modules/xmvbFile.py | Python | gpl-2.0 | 23,503 | [
"Gaussian"
] | d6dccaf064ee6a3b36cea005b7b83344d5aef9842352efca8c322cf15953dfe9 |
#!/usr/bin/env python
import os
import sys
from Bio.Blast import NCBIXML
result_handle = open(sys.argv[1])
blast_records = NCBIXML.parse(result_handle)
families = {}
i=0
for blast_record in blast_records:
# taking one record one hsp
try:
hsp = blast_record.alignments[0].hsps[0]
i += 1
except:
continue
# hsp.align_length
# ready query with gaps
# hsp.query
# reference with NNN
# hsp.sbjct
# UMI position
m = re.search("N{5,}", hsp.sbjct)
if not m:
continue
start = m.start()
end = m.end() # end + 1 in fact
# UMI is aligned with NNNNN
raw_umi = hsp.query[start:end]
umi = get_umi(raw_umi, families)
#umi = raw_umi
#print(f"HSP: {hsp.sbjct}")
#print(f"UMI: {umi}")
seq = hsp.query[0:start]
ref = hsp.sbjct[0:start]
result_handle.close()
| naumenko-sa/bioscripts | blast/blastn_xml.parse_example.py | Python | mit | 860 | [
"BLAST"
] | 2b1e4c305dd63cd3a966c18ffd9e640e5c8994cc4d43fc59aa08ae0a5be1c32f |
# sybase/base.py
# Copyright (C) 2010-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
# get_select_precolumns(), limit_clause() implementation
# copyright (C) 2007 Fisch Asset Management
# AG http://www.fam.ch, with coding by Alexander Houben
# alexander.houben@thor-solutions.ch
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase
:name: Sybase
.. note::
The Sybase dialect functions on current SQLAlchemy versions
but is not regularly tested, and may have many issues and
caveats not currently handled.
"""
import operator
import re
from sqlalchemy.sql import compiler, expression, text, bindparam
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, exc
from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
BIGINT, INT, INTEGER, SMALLINT, BINARY,\
VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
UnicodeText, REAL
RESERVED_WORDS = set([
"add", "all", "alter", "and",
"any", "as", "asc", "backup",
"begin", "between", "bigint", "binary",
"bit", "bottom", "break", "by",
"call", "capability", "cascade", "case",
"cast", "char", "char_convert", "character",
"check", "checkpoint", "close", "comment",
"commit", "connect", "constraint", "contains",
"continue", "convert", "create", "cross",
"cube", "current", "current_timestamp", "current_user",
"cursor", "date", "dbspace", "deallocate",
"dec", "decimal", "declare", "default",
"delete", "deleting", "desc", "distinct",
"do", "double", "drop", "dynamic",
"else", "elseif", "encrypted", "end",
"endif", "escape", "except", "exception",
"exec", "execute", "existing", "exists",
"externlogin", "fetch", "first", "float",
"for", "force", "foreign", "forward",
"from", "full", "goto", "grant",
"group", "having", "holdlock", "identified",
"if", "in", "index", "index_lparen",
"inner", "inout", "insensitive", "insert",
"inserting", "install", "instead", "int",
"integer", "integrated", "intersect", "into",
"iq", "is", "isolation", "join",
"key", "lateral", "left", "like",
"lock", "login", "long", "match",
"membership", "message", "mode", "modify",
"natural", "new", "no", "noholdlock",
"not", "notify", "null", "numeric",
"of", "off", "on", "open",
"option", "options", "or", "order",
"others", "out", "outer", "over",
"passthrough", "precision", "prepare", "primary",
"print", "privileges", "proc", "procedure",
"publication", "raiserror", "readtext", "real",
"reference", "references", "release", "remote",
"remove", "rename", "reorganize", "resource",
"restore", "restrict", "return", "revoke",
"right", "rollback", "rollup", "save",
"savepoint", "scroll", "select", "sensitive",
"session", "set", "setuser", "share",
"smallint", "some", "sqlcode", "sqlstate",
"start", "stop", "subtrans", "subtransaction",
"synchronize", "syntax_error", "table", "temporary",
"then", "time", "timestamp", "tinyint",
"to", "top", "tran", "trigger",
"truncate", "tsequal", "unbounded", "union",
"unique", "unknown", "unsigned", "update",
"updating", "user", "using", "validate",
"values", "varbinary", "varchar", "variable",
"varying", "view", "wait", "waitfor",
"when", "where", "while", "window",
"with", "with_cube", "with_lparen", "with_rollup",
"within", "work", "writetext",
])
class _SybaseUnitypeMixin(object):
"""these types appear to return a buffer object."""
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return str(value) # decode("ucs-2")
else:
return None
return process
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNICHAR'
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNIVARCHAR'
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = 'UNITEXT'
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_IMAGE(type_)
def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_)
def visit_UNICHAR(self, type_, **kw):
return "UNICHAR(%d)" % type_.length
def visit_UNIVARCHAR(self, type_, **kw):
return "UNIVARCHAR(%d)" % type_.length
def visit_UNITEXT(self, type_, **kw):
return "UNITEXT"
def visit_TINYINT(self, type_, **kw):
return "TINYINT"
def visit_IMAGE(self, type_, **kw):
return "IMAGE"
def visit_BIT(self, type_, **kw):
return "BIT"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_SMALLMONEY(self, type_, **kw):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
ischema_names = {
'bigint': BIGINT,
'int': INTEGER,
'integer': INTEGER,
'smallint': SMALLINT,
'tinyint': TINYINT,
'unsigned bigint': BIGINT, # TODO: unsigned flags
'unsigned int': INTEGER, # TODO: unsigned flags
'unsigned smallint': SMALLINT, # TODO: unsigned flags
'numeric': NUMERIC,
'decimal': DECIMAL,
'dec': DECIMAL,
'float': FLOAT,
'double': NUMERIC, # TODO
'double precision': NUMERIC, # TODO
'real': REAL,
'smallmoney': SMALLMONEY,
'money': MONEY,
'smalldatetime': DATETIME,
'datetime': DATETIME,
'date': DATE,
'time': TIME,
'char': CHAR,
'character': CHAR,
'varchar': VARCHAR,
'character varying': VARCHAR,
'char varying': VARCHAR,
'unichar': UNICHAR,
'unicode character': UNIVARCHAR,
'nchar': NCHAR,
'national char': NCHAR,
'national character': NCHAR,
'nvarchar': NVARCHAR,
'nchar varying': NVARCHAR,
'national char varying': NVARCHAR,
'national character varying': NVARCHAR,
'text': TEXT,
'unitext': UNITEXT,
'binary': BINARY,
'varbinary': VARBINARY,
'image': IMAGE,
'bit': BIT,
# not in documentation for ASE 15.7
'long varchar': TEXT, # TODO
'timestamp': TIMESTAMP,
'uniqueidentifier': UNIQUEIDENTIFIER,
}
class SybaseInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_id(self, table_name, schema=None):
"""Return the table id from `table_name` and `schema`."""
return self.dialect.get_table_id(self.bind, table_name, schema,
info_cache=self.info_cache)
class SybaseExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
def set_ddl_autocommit(self, connection, value):
"""Must be implemented by subclasses to accommodate DDL executions.
"connection" is the raw unwrapped DBAPI connection. "value"
is True or False. when True, the connection should be configured
such that a DDL can take place subsequently. when False,
a DDL has taken place and the connection should be resumed
into non-autocommit mode.
"""
raise NotImplementedError()
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0]
else:
self._enable_identity_insert = False
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl))
if self.isddl:
# TODO: to enhance this, we can detect "ddl in tran" on the
# database settings. this error message should be improved to
# include a note about that.
if not self.should_autocommit:
raise exc.InvalidRequestError(
"The Sybase dialect only supports "
"DDL in 'autocommit' mode at this time.")
self.root_connection.engine.logger.info(
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
self.set_ddl_autocommit(
self.root_connection.connection.connection,
True)
def post_exec(self):
if self.isddl:
self.set_ddl_autocommit(self.root_connection, False)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.
format_table(self.compiled.statement.table)
)
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class SybaseSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond'
})
def get_select_precolumns(self, select, **kw):
s = select._distinct and "DISTINCT " or ""
# TODO: don't think Sybase supports
# bind params for FIRST / TOP
limit = select._limit
if limit:
# if select._limit == 1:
# s += "FIRST "
# else:
# s += "TOP %s " % (select._limit,)
s += "TOP %s " % (limit,)
offset = select._offset
if offset:
raise NotImplementedError("Sybase ASE does not support OFFSET")
return s
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select, **kw):
# Limit in sybase is after the select keyword
return ""
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (
field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "GETDATE()"
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
kw['literal_binds'] = True
order_by = self.process(select._order_by_clause, **kw)
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
def delete_table_clause(self, delete_stmt, from_table,
extra_froms):
"""If we have extra froms make sure we render any alias as hint."""
ashint = False
if extra_froms:
ashint = True
return from_table._compiler_dispatch(
self, asfrom=True, iscrud=True, ashint=ashint
)
def delete_extra_from_clause(self, delete_stmt, from_table,
extra_froms, from_hints, **kw):
"""Render the DELETE .. FROM clause specific to Sybase."""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in [from_table] + extra_froms)
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(
column.type, type_expression=column)
if column.table is None:
raise exc.CompileError(
"The Sybase dialect requires Table-bound "
"columns in order to generate DDL")
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = isinstance(column.default, sa_schema.Sequence) \
and column.default
if sequence:
start, increment = sequence.start or 1, \
sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self._prepared_index_name(drop.element,
include_schema=False)
)
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class SybaseDialect(default.DefaultDialect):
name = 'sybase'
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
colspecs = {}
ischema_names = ischema_names
type_compiler = SybaseTypeCompiler
statement_compiler = SybaseSQLCompiler
ddl_compiler = SybaseDDLCompiler
preparer = SybaseIdentifierPreparer
inspector = SybaseInspector
construct_arguments = []
def _get_default_schema_name(self, connection):
return connection.scalar(
text("SELECT user_name() as user_name",
typemap={'user_name': Unicode})
)
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if self.server_version_info is not None and\
self.server_version_info < (15, ):
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
def get_table_id(self, connection, table_name, schema=None, **kw):
"""Fetch the id for schema.table_name.
Several reflection methods require the table id. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_id = None
if schema is None:
schema = self.default_schema_name
TABLEID_SQL = text("""
SELECT o.id AS id
FROM sysobjects o JOIN sysusers u ON o.uid=u.uid
WHERE u.name = :schema_name
AND o.name = :table_name
AND o.type in ('U', 'V')
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
if isinstance(table_name, unicode):
table_name = table_name.encode("ascii")
result = connection.execute(TABLEID_SQL,
schema_name=schema,
table_name=table_name)
table_id = result.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
COLUMN_SQL = text("""
SELECT col.name AS name,
t.name AS type,
(col.status & 8) AS nullable,
(col.status & 128) AS autoincrement,
com.text AS 'default',
col.prec AS precision,
col.scale AS scale,
col.length AS length
FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
col.cdefault = com.id
WHERE col.usertype = t.usertype
AND col.id = :table_id
ORDER BY col.colid
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = []
for (name, type_, nullable, autoincrement, default, precision, scale,
length) in results:
col_info = self._get_column_info(name, type_, bool(nullable),
bool(autoincrement),
default, precision, scale,
length)
columns.append(col_info)
return columns
def _get_column_info(self, name, type_, nullable, autoincrement, default,
precision, scale, length):
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (NUMERIC, DECIMAL):
args = (precision, scale)
elif coltype == FLOAT:
args = (precision,)
elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
args = (length,)
else:
args = ()
if coltype:
coltype = coltype(*args, **kwargs)
# is this necessary
# if is_array:
# coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
coltype = sqltypes.NULLTYPE
if default:
default = default.replace("DEFAULT", "").strip()
default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
else:
default = None
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
return column_info
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
table_cache = {}
column_cache = {}
foreign_keys = []
table_cache[table_id] = {"name": table_name, "schema": schema}
COLUMN_SQL = text("""
SELECT c.colid AS id, c.name AS name
FROM syscolumns c
WHERE c.id = :table_id
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = {}
for col in results:
columns[col["id"]] = col["name"]
column_cache[table_id] = columns
REFCONSTRAINT_SQL = text("""
SELECT o.name AS name, r.reftabid AS reftable_id,
r.keycnt AS 'count',
r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
r.fokey16 AS fokey16,
r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
r.refkey10 AS refkey10, r.refkey11 AS refkey11,
r.refkey12 AS refkey12, r.refkey13 AS refkey13,
r.refkey14 AS refkey14, r.refkey15 AS refkey15,
r.refkey16 AS refkey16
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
""")
referential_constraints = connection.execute(
REFCONSTRAINT_SQL, table_id=table_id).fetchall()
REFTABLE_SQL = text("""
SELECT o.name AS name, u.name AS 'schema'
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE o.id = :table_id
""")
for r in referential_constraints:
reftable_id = r["reftable_id"]
if reftable_id not in table_cache:
c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
reftable = c.fetchone()
c.close()
table_info = {"name": reftable["name"], "schema": None}
if (schema is not None or
reftable["schema"] != self.default_schema_name):
table_info["schema"] = reftable["schema"]
table_cache[reftable_id] = table_info
results = connection.execute(COLUMN_SQL, table_id=reftable_id)
reftable_columns = {}
for col in results:
reftable_columns[col["id"]] = col["name"]
column_cache[reftable_id] = reftable_columns
reftable = table_cache[reftable_id]
reftable_columns = column_cache[reftable_id]
constrained_columns = []
referred_columns = []
for i in range(1, r["count"] + 1):
constrained_columns.append(columns[r["fokey%i" % i]])
referred_columns.append(reftable_columns[r["refkey%i" % i]])
fk_info = {
"constrained_columns": constrained_columns,
"referred_schema": reftable["schema"],
"referred_table": reftable["name"],
"referred_columns": referred_columns,
"name": r["name"]
}
foreign_keys.append(fk_info)
return foreign_keys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
INDEX_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
(i.status & 0x2) AS 'unique',
index_col(object_name(i.id), i.indid, 1) AS col_1,
index_col(object_name(i.id), i.indid, 2) AS col_2,
index_col(object_name(i.id), i.indid, 3) AS col_3,
index_col(object_name(i.id), i.indid, 4) AS col_4,
index_col(object_name(i.id), i.indid, 5) AS col_5,
index_col(object_name(i.id), i.indid, 6) AS col_6,
index_col(object_name(i.id), i.indid, 7) AS col_7,
index_col(object_name(i.id), i.indid, 8) AS col_8,
index_col(object_name(i.id), i.indid, 9) AS col_9,
index_col(object_name(i.id), i.indid, 10) AS col_10,
index_col(object_name(i.id), i.indid, 11) AS col_11,
index_col(object_name(i.id), i.indid, 12) AS col_12,
index_col(object_name(i.id), i.indid, 13) AS col_13,
index_col(object_name(i.id), i.indid, 14) AS col_14,
index_col(object_name(i.id), i.indid, 15) AS col_15,
index_col(object_name(i.id), i.indid, 16) AS col_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 0
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
column_names = []
for i in range(1, r["count"]):
column_names.append(r["col_%i" % (i,)])
index_info = {"name": r["name"],
"unique": bool(r["unique"]),
"column_names": column_names}
indexes.append(index_info)
return indexes
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
PK_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
index_col(object_name(i.id), i.indid, 1) AS pk_1,
index_col(object_name(i.id), i.indid, 2) AS pk_2,
index_col(object_name(i.id), i.indid, 3) AS pk_3,
index_col(object_name(i.id), i.indid, 4) AS pk_4,
index_col(object_name(i.id), i.indid, 5) AS pk_5,
index_col(object_name(i.id), i.indid, 6) AS pk_6,
index_col(object_name(i.id), i.indid, 7) AS pk_7,
index_col(object_name(i.id), i.indid, 8) AS pk_8,
index_col(object_name(i.id), i.indid, 9) AS pk_9,
index_col(object_name(i.id), i.indid, 10) AS pk_10,
index_col(object_name(i.id), i.indid, 11) AS pk_11,
index_col(object_name(i.id), i.indid, 12) AS pk_12,
index_col(object_name(i.id), i.indid, 13) AS pk_13,
index_col(object_name(i.id), i.indid, 14) AS pk_14,
index_col(object_name(i.id), i.indid, 15) AS pk_15,
index_col(object_name(i.id), i.indid, 16) AS pk_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 2048
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(PK_SQL, table_id=table_id)
pks = results.fetchone()
results.close()
constrained_columns = []
if pks:
for i in range(1, pks["count"] + 1):
constrained_columns.append(pks["pk_%i" % (i,)])
return {"constrained_columns": constrained_columns,
"name": pks["name"]}
else:
return {"constrained_columns": [], "name": None}
@reflection.cache
def get_schema_names(self, connection, **kw):
SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u")
schemas = connection.execute(SCHEMA_SQL)
return [s["name"] for s in schemas]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
TABLE_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'U'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
tables = connection.execute(TABLE_SQL, schema_name=schema)
return [t["name"] for t in tables]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_DEF_SQL = text("""
SELECT c.text
FROM syscomments c JOIN sysobjects o ON c.id = o.id
WHERE o.name = :view_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(view_name, unicode):
view_name = view_name.encode("ascii")
view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
return view.scalar()
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
views = connection.execute(VIEW_SQL, schema_name=schema)
return [v["name"] for v in views]
def has_table(self, connection, table_name, schema=None):
try:
self.get_table_id(connection, table_name, schema)
except exc.NoSuchTableError:
return False
else:
return True
| Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/sqlalchemy/dialects/sybase/base.py | Python | gpl-2.0 | 29,456 | [
"ASE"
] | 767c075b33659a03ac890c515945d906b4059bcdb5f4a66ce0c49ce5083deab6 |
#!/usr/bin/env python
from __future__ import with_statement
# Library path
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
# Standard libraries
import atexit
import random
import re
import socket
import threading
import time
import traceback
import weakref
import xmlrpclib
import Queue
import SimpleXMLRPCServer
# Import third-party libraries
import turbogears
# Custom libraries
from buzzbot import *
# TODO why isn't the above import sufficient?!
from buzzbot import model
from buzzbot import searcher
from buzzbot import visitor
myBotRoutines = bot.BotRoutines()
# Logging
import logging
logger = logging.getLogger("buzzbot.crawler")
hdlr = logging.FileHandler('crawler.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
def has_fork():
"""
Does this OS have the `fork` system call?
"""
return "fork" in os.__dict__
class CrawlerServerFunctions(object):
"""
Set of functions exposed by the CrawlerServer to the CrawlerClient.
"""
def enqueue(self, item):
"""
Enqueue the item into the Crawler.
"""
# NOTE: The XML-RPC proxy can't accept a bare **kwargs, so it's passed in as a `dict`.
global server
server.crawler.enqueue(**item)
def stop(self):
"""
Stop the Crawler.
"""
global server
server.keep_running = False
def prepare_results(self):
"""
Prepare scored results.
"""
global server
server.crawler.prepare_results()
def results_for(self, search_id):
"""
Return list of scored results for the `search_id`.
"""
global server
return server.crawler.results_for(search_id)
def evaluate(self, code):
"""
Return the result of eval'ing the string of `code`.
"""
global server
if self.allow_dangerous_operations():
return eval(code)
else:
raise SecurityError("Dangerous operations not allowed on server")
def execute(self, code):
"""
Pass `code` to Python's `exec`.
"""
global server
if self.allow_dangerous_operations():
exec code
else:
raise SecurityError("Dangerous operations not allowed on server")
def allow_dangerous_operations(self):
"""
Does this server allow dangerous operations? Returns true if the
`DANGEROUS` environmental variable has a value.
"""
global server
if os.environ.has_key('DANGEROUS'):
return True
else:
return False
def ping(self):
"""
Respond with true, to indicate server is alive.
"""
return server.crawler.ping()
class SecurityError(StandardError):
pass
class CrawlerBase(object):
"""
Methods provided to CrawlerClient and CrawlerServer.
"""
def host(self):
"""
Return the connection host.
"""
return turbogears.config.get("crawler.socket_host", "localhost")
def port(self):
"""
Return the connection port.
"""
return int(turbogears.config.get("crawler.socket_port", 8052))
class CrawlerServer(CrawlerBase):
def __init__(self, items_completed=True, concurrency_library=None):
"""
Instantiate a server that hosts a Crawler.
"""
self.service = SimpleXMLRPCServer.SimpleXMLRPCServer(addr=(self.host(), self.port()), logRequests=False, allow_none=True)
self.service.register_instance(CrawlerServerFunctions())
self.keep_running = True
self.crawler = Crawler(items_completed=items_completed, concurrency_library=concurrency_library)
def start(self):
"""
Start the server.
"""
self.crawler.start()
while self.keep_running:
self.service.handle_request()
self.stop()
def stop(self):
"""
Stop the server.
"""
self.keep_running = False
self.crawler.stop()
class ResilentXmlRpcServerProxy(object):
"""
Provides a wrapper around the XmlRpc proxy that retries the connection.
"""
def __init__(self, proxy, timeout_seconds=15.0, pause_seconds=0.25):
self.proxy = proxy
self.timeout_seconds = timeout_seconds
self.pause_seconds = pause_seconds
def __getattr__(self, name):
DEBUG = False
if DEBUG: print "GA: %s" % name
proxy = self.proxy
def wrapper(*args):
if DEBUG: print "WR: %s%s" % (name, args)
deadline = time.time() + self.timeout_seconds
while deadline > time.time():
try:
return proxy.__getattr__(name)(*args)
except socket.error, e:
if DEBUG: print "WA: retry"
logger.debug("Waiting for XML-RPC server to respond...")
time.sleep(self.pause_seconds)
raise TimeoutError("Couldn't connect to XML-RPC server, no response after %0.2f seconds" % self.timeout_seconds)
return wrapper
class CrawlerClient(CrawlerBase):
"""
Client that connects to the CrawlerServer.
"""
def __init__(self):
"""
Instantiate a client.
"""
self.raw_service = xmlrpclib.ServerProxy(uri="http://%s:%s" % (self.host(), self.port()), allow_none=True)
self.service = ResilentXmlRpcServerProxy(proxy=self.raw_service)
def enqueue(self, **item):
"""
Enqueue an item for crawling. The `item` is a dict with the same
parameters accepted by `Crawler.enqueue`.
"""
# NOTE: The XML-RPC proxy can't accept a bare **kwargs, so pass them as a `dict`.
return self.service.enqueue(item)
def stop(self):
"""
Stop the CrawlerServer.
"""
return self.raw_service.stop()
def evaluate(self, code):
"""
Return the result of evaluating the code on the server.
"""
return self.service.evaluate(code)
def execute(self, code):
"""
Pass `code` to Python's `exec`.
"""
self.service.execute(code)
def prepare_results(self):
"""
Prepare scored results.
"""
self.service.prepare_results()
def results_for(self, search_id):
"""
Return list of scored results for the `search_id`.
"""
return self.service.results_for(search_id)
def ping(self):
"""
Is the server responding?
"""
try:
return self.raw_service.ping()
except:
return False
class Crawler(object):
"""
Crawler
=======
The Crawler is an all-in-one multi-process crawler that searches keywords
to get URLs, visits these to get their content, and then scores these.
Usage
-----
# Load libraries
import crawler
# Instantiate objects and start service
crawler = .crawler.Crawler()
crawler.start()
# Run a job
crawler.enqueue(search_id=1234)
# Shutdown
crawler.stop()
# Print out the result
while True:
try:
print crawler.items_completed.get_nowait()
except:
print "DONE!"
break
"""
STOP = 'STOP'
"""Sentinel token used to stop workers."""
def __init__(self, items_completed=True, searchers_count=None, visitors_count=None, scorers_count=None, concurrency_library=None):
"""
Initialize a new crawler.
Keyword arguments:
* items_completed: Store completed items? Defaults to True.
* searchers_count: Number of searcher processes to run. Defaults to sensible value.
* visitors_count: Number of visitors processes to run. Defaults to sensible value.
* scorers_count: Number of scorers processes to run. Defaults to sensible value.
"""
# TODO replace attributes with uniform dicts, e.g. self.count['searcher'], self.pool['searcher'], etc
# Concurrency
self._concurrency_library = concurrency_library
### self._concurrency_library = "threading" # Override for development
if not self._concurrency_library:
# If a library wasn't specified as a parameter, try to pick the one
# specified in the configuration files, else default to the most
# reasonable one for this operating system.
self._concurrency_library = turbogears.config.get("crawler.concurrency_library", has_fork() and "processing" or "threading")
exec "import %s" % self._concurrency_library
self._concurrency_library_module = eval("%s" % self._concurrency_library)
logger.info("Using the `%s` concurrency library" % self._concurrency_library)
# Worker counts
self.cpu_core_count = cpu_core_counter.cpu_core_count()
self.searchers_count = searchers_count or max(2, self.cpu_core_count)
self.visitors_count = visitors_count or min(40, self.cpu_core_count * 15)
self.scorers_count = scorers_count or self.cpu_core_count
# Workers pools
self.searchers = []
self.visitors = []
self.scorers = []
# Queues
if self._concurrency_library == "processing":
self.manager = self._concurrency_library_module.Manager()
self.lock = self.manager.Lock()
self.items_to_search = self.manager.Queue()
self.items_to_visit = self.manager.Queue()
self.items_to_score = self.manager.Queue()
self.items_to_finalize = self.manager.Queue()
self.items_completed = None
if items_completed:
logger.info("server will save completed items")
self.items_completed = self.manager.dict()
elif self._concurrency_library == "threading":
self.manager = None
self.lock = self._concurrency_library_module.Lock()
self.items_to_search = Queue.Queue()
self.items_to_visit = Queue.Queue()
self.items_to_score = Queue.Queue()
self.items_to_finalize = Queue.Queue()
self.items_completed = None
if items_completed:
logger.info("server will save completed items")
self.items_completed = dict()
else:
raise NotImplementedError("Unknown concurrency_library: %s" % self._concurrency_library)
# Queue maps
self.queue = {}
self.queue['searcher'] = {}
self.queue['searcher']['input'] = self.items_to_search
self.queue['searcher']['output'] = self.items_to_visit
self.queue['visitor'] = {}
self.queue['visitor']['input'] = self.items_to_visit
self.queue['visitor']['output'] = self.items_to_score
self.queue['scorer'] = {}
self.queue['scorer']['input'] = self.items_to_score
self.queue['scorer']['output'] = self.items_to_finalize
def __del__(self):
try:
self.stop()
except Exception, e:
print "Crawler: failed to stop: %s" % e
#logger.debug("failed to stop: %s" % e)
pass
finally:
#logger.debug("destroyed")
pass
def start(self, kind=None):
"""
Start the crawler. It will begin processing any entries in the queues
immediately.
Keyword arguments:
* kind: Start a kind of subprocess, e.g., "searcher". Default is to
start all available kinds.
"""
if kind:
count = self.__dict__['%ss_count' % kind]
workers = self.__dict__['%ss' % kind]
for i in range(count):
worker = None
if self._concurrency_library == "processing":
worker = self._concurrency_library_module.Process(target=self._processes_wrapper, args=[kind])
elif self._concurrency_library == "threading":
worker = self._concurrency_library_module.Thread(target=self._processes_wrapper, args=[kind])
else:
raise NotImplementedError("Unknown concurrency_library: %s" % self._concurrency_library)
workers.append(worker)
worker.start()
logger.info("started %i %s processes" % (count, kind))
else:
self.start("searcher")
self.start("visitor")
self.start("scorer")
def searcher_process(self, item, output=None):
"""
Search a single item.
"""
# TODO wrap in try/except
for result in searcher.SearchRunner(
delete_existing = item['delete_existing'],
search_id = item['search_id'],
max_results = item['max_results']):
logger.debug("searcher_process found: %s" % result.urltext)
subitem = dict(
delete_existing = item['delete_existing'],
search_id = item['search_id'],
max_results = item['max_results'],
url_id = result.id
)
output.put(subitem)
return None
def visitor_process(self, item, output=None):
"""
Visit a single item.
"""
# TODO wrap in try/except
search = model.Search.get(item['search_id'])
url_record = model.URLS.get(item['url_id'])
for content in visitor.Visitor(search, url_record):
## print content
subitem = dict(
delete_existing = item['delete_existing'],
search_id = item['search_id'],
max_results = item['max_results'],
content_id = content.id
)
output.put(subitem)
return None
def scorer_process(self, item, output=None):
"""
Score a single item.
"""
search_id = item['search_id']
content_id = item['content_id']
content = model.Content.get(content_id)
try:
myBotRoutines.addScoreToContent(content)
except Exception, e:
logger.error(traceback.format_exc(e))
return item
def _processes_wrapper(self, kind):
"""
Run a persistent group of worker processes that pop items from a queue
and process them.
Keyword arguments:
* kind: Kind of subprocess to run, e.g., "searcher.
"""
tagline = "%s %s:" % (kind, self._worker_name())
target = self.__getattribute__('%s_process' % kind)
input = self.queue[kind]['input']
output = self.queue[kind]['output']
## logger.debug("%s waiting" % tagline)
try:
testiter = iter(input.get, self.STOP)
mynext = testiter.next()
for item in iter(input.get, self.STOP):
logger.info("%s processing Search#%s" % (tagline, item['search_id']))
### print "%s_process(%s)" % (kind, repr(item)) # TODO is there no better way?
result = target(item=item, output=output)
if result and output != None:
output.put(result)
except KeyboardInterrupt, e:
pass
## logger.debug("%s done" % tagline)
def _worker_name(self):
"""
Returns string name uniquely identifying this worker. Actual name will
depend on the underlying concurrency library.
"""
if self._concurrency_library == "processing":
return self._concurrency_library_module.currentProcess().getName()
elif self._concurrency_library == "threading":
return self._concurrency_library_module.currentThread().getName()
else:
raise NotImplementedError("Unknown concurrency_library: %s" % self._concurrency_library)
def stop(self, kind=None):
"""
Stop crawler and its related processes.
Keyword arguments:
* kind: Stop only a particular kind of subprocess, e.g., "searcher".
Default is to stop all available kinds.
"""
if kind:
count = self.__dict__['%ss_count' % kind]
queue = self.queue[kind]['input']
stopped = False
for i in range(count):
try:
queue.put(self.STOP)
except Exception, e:
# Ignore if the queue is already stopped?
pass
workers = self.__dict__['%ss' % kind]
for worker in workers:
try:
worker.join()
except Exception, e:
# Ignore if worker is already dead?
pass
while len(workers) != 0:
workers.pop()
stopped = True
if stopped:
try:
import logging
global logger
logger.info("stopped %i %s processes" % (count, kind))
except:
# Logging and logger aren't available otherwise if stop() is called from destructor.
print ("Crawler: stopped %i %s processes" % (count, kind))
pass
else:
self.stop("searcher")
self.stop("visitor")
self.stop("scorer")
def enqueue(self, search_id, max_results=8, delete_existing=False, queue_name="items_to_search", **kwargs):
"""
Add a job to the crawler.
Keyword arguments:
* search_id: Crawl this search record.
* max_results: Return approximately this many results. Default is to
let the searcher decide how many to return.
* delete_existing: Delete existing records for this search record?
Defaults to False.
* queue_name: Name of queue to use. Defaults to "items_to_search".
"""
queue = self.__getattribute__(queue_name)
item = kwargs
item['search_id'] = search_id
item['max_results'] = max_results
item['delete_existing'] = delete_existing
logger.info("enqueued into `%s`: %s" % (queue_name, item))
queue.put(item)
def prepare_results(self):
while True:
item = None
try:
item = self.items_to_finalize.get_nowait()
except Queue.Empty:
pass # Handle below
if not item:
## logger.debug("results_for: no items")
break
leaf = None
self.lock.acquire()
if self.items_completed.has_key(item['search_id']):
leaf = self.items_completed[item['search_id']]
else:
logger.debug("results_for: creating array for Search#%s" % item['search_id'])
leaf = []
logger.debug("results_for: appending Search#%s/Content#%s" % (item['search_id'], item['content_id']))
leaf.append(item['content_id'])
self.items_completed[item['search_id']] = leaf
self.lock.release()
def results_for(self, search_id):
self.prepare_results()
if self.items_completed.has_key(search_id):
results = self.items_completed[search_id]
del self.items_completed[search_id]
logger.debug("results_for: returning results for Search#%s: %s" % (search_id, results))
return results
else:
## logger.debug("results_for: no results for Search#%s" % (search_id))
return []
def ping(self):
"""
Is the server alive? Yes, always because this is a local object.
"""
return True
class CrawlerRunner(object):
_instance = None
_instance_lock = threading.Lock()
# TODO collapse container_location and concurrency_library to single value
def __init__(self, concurrency_library=None, container_location=None, manager=True, **kwargs):
## print "%s.__init__" % self
self._concurrency_library = self._get_concurrency_library(concurrency_library)
self._container_location = self._get_container_location(container_location)
self._manager = self._container_location == "local" or manager
self._lock = threading.Lock()
### OVERRIDE FOR DEVELOPMENT
#self._concurrency_library = "threading"
#self._concurrency_library = "processing"
#self._container_location = "local"
#self._container_location = "remote"
crawler_kwargs = dict(
concurrency_library=self._concurrency_library
)
crawler_kwargs.update(kwargs)
if self._container_location == "local":
self.crawler = Crawler(**crawler_kwargs)
elif self._container_location == "remote":
self.crawler = CrawlerClient()
else:
raise NotImplementedError("Unknown container_location: %s" % self._container_location)
def __del__(self):
#print "%s.__del__" % self
self._crawler = None
self._lock = None
def start(self):
#print "%s.start" % self
if self._manager:
atexit.register(self.stop)
if self._container_location == "remote":
killing_crawler = False
try:
pause_seconds = 0.5
while True:
self.crawler.stop() # Will throw exception when down to end loop
logger.info("CrawlerRunner.start: killing stale remote crawler...")
killing_crawler = True
time.sleep(pause_seconds)
except Exception, e:
if killing_crawler:
logger.info("CrawlerRunner.start: killed stale remote crawler")
pass # Ignore because service may not be running already
logger.info("CrawlerRunner.start: launching remote crawler")
filename = re.sub("\.pyc$", ".py", __file__, 1)
# TODO safely quote paths
cmd = "'%s' --server --config '%s'" % (filename, commands.configuration)
logger.info(cmd)
os.system("%s &" % cmd)
elif self._container_location == "local":
logger.info("CrawlerRunner.start: launching local crawler")
return self.crawler.start()
else:
raise NotImplementedError("Unknown container_location: %s" % self._container_location)
def stop(self):
#print "%s.stop" % self
if self._manager:
with self._lock:
if self.crawler:
try:
return self.crawler.stop()
except Exception, e:
print "CrawlerRunner.stop failed: %s" % e
def enqueue(self, **item):
return self.crawler.enqueue(**item)
def results_for(self, search_id):
return self.crawler.results_for(search_id)
def ping(self):
return self.crawler.ping()
@classmethod
def _get_concurrency_library(self, kind=None):
if kind:
return kind
else:
return turbogears.config.get("crawler.concurrency_library", has_fork() and "processing" or "threading")
@classmethod
def _get_container_location(self, kind=None):
if kind:
return kind
else:
return turbogears.config.get("crawler.container_location", has_fork() and "remote" or "local")
@classmethod
def get_instance(self):
with self._instance_lock:
if not self._instance:
self._instance = self()
self._instance.start()
return self._instance
class TimeoutError(StandardError):
"""
Raised when a timeout is reached.
"""
pass
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--config", dest="configfile", help="Optional configuration file", metavar="FILE")
parser.add_option("-c", "--client", action="store_true", dest="client", help="Start client")
parser.add_option("-s", "--server", action="store_true", dest="server", help="Start server")
parser.add_option("-k", "--concurrency", dest="concurrency_library", help="threading OR processing", metavar="LIBRARY")
(options, args) = parser.parse_args()
if options.configfile:
commands.boot(options.configfile)
else:
commands.boot()
if options.client:
logger.info("Starting client...")
client = CrawlerClient()
try:
from ipdb import set_trace
except:
from pdb import set_trace
set_trace()
# TODO figure out how to make session exit without exceptions
else:
logger.info("Starting server...")
global server
server = CrawlerServer(concurrency_library=options.concurrency_library)
try:
server.start()
except KeyboardInterrupt:
logger.info("Shutting down server...")
server.stop()
logger.info("Stopped server")
| pbarton666/buzz_bot | bot_project/buzzbot/crawler.py | Python | mit | 26,283 | [
"VisIt"
] | 7ed11b0288c77605867bf0ed71df9b12e579a34b1c665cd0459cec551128f4bc |
# -*- coding: utf-8 -*-
#
# Copyright © 2013 Michael Rabbitt.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Warning: URL formats of "raw" gists are undocummented and subject to change.
# See also: https://developer.github.com/v3/gists/
#
# Inspired by "[Python] reStructuredText GitHub Gist directive"
# (https://gist.github.com/brianhsu/1407759), public domain by Brian Hsu
"""
Extension to Python Markdown for Embedded Gists (gist.github.com).
Basic Example:
Text of the gist:
[:gist: 4747847]
Example with filename:
Text of the gist:
[:gist: 4747847 zen.py]
Basic Example with hexidecimal id:
Text of the gist:
[:gist: c4a43d6fdce612284ac0]
Example with hexidecimal id filename:
Text of the gist:
[:gist: c4a43d6fdce612284ac0 cow.txt]
Example using reStructuredText syntax:
Text of the gist:
.. gist:: 4747847 zen.py
Example using hexidecimal ID with reStructuredText syntax:
Text of the gist:
.. gist:: c4a43d6fdce612284ac0
Example using hexidecimal ID and filename with reStructuredText syntax:
Text of the gist:
.. gist:: c4a43d6fdce612284ac0 cow.txt
Error Case: non-existent Gist ID:
Text of the gist:
[:gist: 0]
Error Case: non-existent file:
Text of the gist:
[:gist: 4747847 doesntexist.py]
"""
try:
from markdown.extensions import Extension
from markdown.inlinepatterns import Pattern
from markdown.util import AtomicString
from markdown.util import etree
except ImportError:
# No need to catch this, if you try to use this without Markdown,
# the markdown compiler will fail first
Extension = Pattern = object
from nikola.plugin_categories import MarkdownExtension
from nikola.utils import get_logger
import requests
LOGGER = get_logger('compile_markdown.mdx_gist')
GIST_JS_URL = "https://gist.github.com/{0}.js"
GIST_FILE_JS_URL = "https://gist.github.com/{0}.js?file={1}"
GIST_RAW_URL = "https://gist.githubusercontent.com/raw/{0}"
GIST_FILE_RAW_URL = "https://gist.githubusercontent.com/raw/{0}/{1}"
GIST_MD_RE = r'\[:gist:\s*(?P<gist_id>\S+)(?:\s*(?P<filename>.+?))?\s*\]'
GIST_RST_RE = r'(?m)^\.\.\s*gist::\s*(?P<gist_id>[^\]\s]+)(?:\s*(?P<filename>.+?))?\s*$'
class GistFetchException(Exception):
"""Raised when attempt to fetch content of a Gist from github.com fails."""
def __init__(self, url, status_code):
"""Initialize the exception."""
Exception.__init__(self)
self.message = 'Received a {0} response from Gist URL: {1}'.format(
status_code, url)
class GistPattern(Pattern):
"""InlinePattern for footnote markers in a document's body text."""
def __init__(self, pattern, configs):
"""Initialize the pattern."""
Pattern.__init__(self, pattern)
def get_raw_gist_with_filename(self, gist_id, filename):
"""Get raw gist text for a filename."""
url = GIST_FILE_RAW_URL.format(gist_id, filename)
resp = requests.get(url)
if not resp.ok:
raise GistFetchException(url, resp.status_code)
return resp.text
def get_raw_gist(self, gist_id):
"""Get raw gist text."""
url = GIST_RAW_URL.format(gist_id)
resp = requests.get(url)
if not resp.ok:
raise GistFetchException(url, resp.status_code)
return resp.text
def handleMatch(self, m):
"""Handle pattern match."""
gist_id = m.group('gist_id')
gist_file = m.group('filename')
gist_elem = etree.Element('div')
gist_elem.set('class', 'gist')
script_elem = etree.SubElement(gist_elem, 'script')
noscript_elem = etree.SubElement(gist_elem, 'noscript')
try:
if gist_file:
script_elem.set('src', GIST_FILE_JS_URL.format(
gist_id, gist_file))
raw_gist = (self.get_raw_gist_with_filename(
gist_id, gist_file))
else:
script_elem.set('src', GIST_JS_URL.format(gist_id))
raw_gist = (self.get_raw_gist(gist_id))
# Insert source as <pre/> within <noscript>
pre_elem = etree.SubElement(noscript_elem, 'pre')
pre_elem.text = AtomicString(raw_gist)
except GistFetchException as e:
LOGGER.warn(e.message)
warning_comment = etree.Comment(' WARNING: {0} '.format(e.message))
noscript_elem.append(warning_comment)
return gist_elem
class GistExtension(MarkdownExtension, Extension):
"""Gist extension for Markdown."""
def __init__(self, configs={}):
"""Initialize the extension."""
# set extension defaults
self.config = {}
# Override defaults with user settings
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
"""Extend Markdown."""
gist_md_pattern = GistPattern(GIST_MD_RE, self.getConfigs())
gist_md_pattern.md = md
md.inlinePatterns.add('gist', gist_md_pattern, "<not_strong")
gist_rst_pattern = GistPattern(GIST_RST_RE, self.getConfigs())
gist_rst_pattern.md = md
md.inlinePatterns.add('gist-rst', gist_rst_pattern, ">gist")
md.registerExtension(self)
def makeExtension(configs=None): # pragma: no cover
"""Make Markdown extension."""
return GistExtension(configs)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=(doctest.NORMALIZE_WHITESPACE +
doctest.REPORT_NDIFF))
| gwax/nikola | nikola/plugins/compile/markdown/mdx_gist.py | Python | mit | 6,599 | [
"Brian"
] | c0bea5ca5d8895826e8d3b61799215955cef38717e62abe2c959731f71de4043 |
import sys, traceback
import time
import os
from git import Repo
from ConfigParser import SafeConfigParser
import bioblend
from bioblend.galaxy import GalaxyInstance
from bioblend.galaxy.workflows import WorkflowClient
import subprocess
dist_dname = '/galaxy-central/config'
wf_dname = dist_dname + '/workflow_file'
repo_name = 'workflow_rnaseq_on_docker_galaxy'
GALAXY_URL = 'http://localhost:8080/'
conf = SafeConfigParser()
conf.read(dist_dname + '/galaxy.ini')
API_KEY="admin"
def get_all_ga(directory):
ret = []
for root, dirs, files in os.walk(directory):
for file in files:
name, ext = os.path.splitext(file)
if not '.git' in root and ext == '.ga':
ret.append(os.path.join(root, file))
return ret
def makeDir(dname):
if os.path.exists(dname) is False:
os.mkdir(dname)
print '%s (dir) created.' % dname
else:
print '%s (dir) is already exists.' % dname
def main():
try:
gInstance = GalaxyInstance(url = GALAXY_URL, key=API_KEY)
wClient = WorkflowClient(gInstance)
print ':::::::::::::::::::::::::::::::::::::::::::'
print '>>>>>>>>>>>>>>>>> get current workflowlist...'
gInstance = GalaxyInstance(url = GALAXY_URL, key=API_KEY)
wClient = WorkflowClient(gInstance)
dataset = wClient.get_workflows()
wf_namelist = [x['name'] for x in dataset if x['deleted'] == False]
print wf_namelist
print ':::::::::::::::::::::::::::::::::::::::::::'
print '>>>>>>>>>>>>>>>>> clone BiT Workflows from github...'
if not os.path.exists(wf_dname + '/' + repo_name):
makeDir(wf_dname)
os.chdir(wf_dname)
git_url = 'https://github.com/myoshimura080822/' + repo_name + '.git'
Repo.clone_from(git_url, repo_name)
else:
print repo_name + ' already cloned. To update, Please delete, move or rename dir before this script execute.'
return 0
print ':::::::::::::::::::::::::::::::::::::::::::'
print '>>>>>>>>>>>>>>>>> delete and inport workflow files...'
mytoolsdir = wf_dname + '/' + repo_name + '/'
clone_wf_list = [file.replace(mytoolsdir, "") for file in get_all_ga(mytoolsdir)]
print clone_wf_list
delete_itm =[]
[[ delete_itm.append(y) for y in wf_namelist if y.find(x.replace('.ga','')) > -1] for x in clone_wf_list]
print delete_itm
id_list = []
[[id_list.append(x['id']) for x in dataset if x['name'].find(y) > -1] for y in delete_itm]
print id_list
[wClient.delete_workflow(x) for x in id_list]
print wClient.get_workflows()
wf_file_path = get_all_ga(mytoolsdir)
[wClient.import_workflow_from_local_path(file) for file in wf_file_path]
print wClient.get_workflows()
print ':::::::::::::::::::::::::::::::::::::::::::'
print '>>>>>>>>>>>>>>>>> script ended :)'
return 0
except:
info = sys.exc_info()
tbinfo = traceback.format_tb( info[2] )
print 'Error Info...'.ljust( 80, '=' )
for tbi in tbinfo:
print tbi
print ' %s' % str( info[1] )
print '\n'.rjust( 85, '=' )
sys.exit(1)
if __name__ == '__main__':
sys.exit(main())
| myoshimura080822/galaxy_in_docker_custom_bit_wf | setup_scripts/bit-workflow_install_docker.py | Python | mit | 3,334 | [
"Galaxy"
] | c2c1da4d163d65af7c24bc5291772da360c9344268cb3b4d0a1c16a1ca886587 |
##############################################################################
# adaptiveMD: A Python Framework to Run Adaptive Molecular Dynamics (MD)
# Simulations on HPC Resources
# Copyright 2017 FU Berlin and the Authors
#
# Authors: Jan-Hendrik Prinz
# Contributors:
#
# `adaptiveMD` is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from openmm import OpenMMEngine
| thempel/adaptivemd | adaptivemd/engine/openmm/__init__.py | Python | lgpl-2.1 | 1,050 | [
"MDTraj",
"OpenMM"
] | 3b710968cdf67bfb762dfa38127b52b617bdd5cae94addc58dc561559d6a2938 |
#!/usr/bin/env python
"""
Define paramters
author: Xiaowei Huang
"""
from network_configuration import *
from usual_configuration import *
#######################################################
#
# To find counterexample or do safety checking
#
#######################################################
task = "safety_check"
#######################################################
#
# The following are parameters to indicate how to work
# with a problem
#
#######################################################
# which dataset to work with
#dataset = "twoDcurve"
#dataset = "mnist"
dataset = "cifar10"
#dataset = "imageNet"
# decide whether to take an experimental configuration
# for specific dataset
experimental_config = True
#experimental_config = False
# the network is trained from scratch
# or read from the saved files
whichMode = "read"
#whichMode = "train"
# work with a single image or a batch of images
#dataProcessing = "single"
dataProcessing = "batch"
dataProcessingBatchNum = 3
#######################################################
#
# 1. parameters related to the networks
#
#######################################################
span = 255/float(255) # s_p in the paper
numSpan = 1.0 # m_p in the paper
featureDims = 5 # dims_{k,f} in the paper
# error bounds, defaulted to be 1.0
# \varepsilon in the paper
errorBounds = {}
errorBounds[-1] = 1.0
#######################################################
# get parameters from network_configuration
#######################################################
(featureDims,span,numSpan,errorBounds,boundOfPixelValue,NN,dataBasics,directory_model_string,directory_statistics_string,directory_pic_string,filterSize) = network_parameters(dataset)
#######################################################
#
# 2. The following are parameters for safety checking
# only useful only when experimental_config = False
#
#######################################################
# which image to start with or work with
# from the database
startIndexOfImage = 197
# the maximal layer to work until
startLayer = 0
# the maximal layer to work until
maxLayer = 3
## number of features of each layer
# in the paper, dims_L = numOfFeatures * featureDims
numOfFeatures = 40
# use linear restrictions or conv filter restriction
inverseFunction = "point"
#inverseFunction = "area"
# point-based or line-based, or only work with a specific point
enumerationMethod = "convex"
#enumerationMethod = "line"
#enumerationMethod = "point"
# heuristics for deciding a region
heuristics = "Activation"
#heuristics = "Derivative"
# do we need to repeatedly select an updated input neuron
#repeatedManipulation = "allowed"
repeatedManipulation = "disallowed"
#checkingMode = "specificLayer"
checkingMode = "stepwise"
# exit whenever an adversarial example is found
#exitWhen = "foundAll"
exitWhen = "foundFirst"
# compute the derivatives up to a specific layer
derivativelayerUpTo = 3
# do we need to generate temp_.png files
#tempFile = "enabled"
tempFile = "disabled"
#######################################################
# get parameters for the case when experimental_config = True
#######################################################
if experimental_config == True:
(startIndexOfImage,startLayer, maxLayer,numOfFeatures,inverseFunction,enumerationMethod,heuristics,repeatedManipulation,checkingMode,exitWhen,derivativelayerUpTo,tempFile) = usual_configuration(dataset)
############################################################
#
# 3. other parameters that are believed to be shared among all cases
# FIXME: check to see if they are really needed/used
#
################################################################
## reset percentage
# applies when manipulated elements do not increase
reset = "onEqualManipulationSet"
#reset = "Never"
## how many branches to expand
numOfPointsAfterEachFeature = 1
# impose bounds on the input or not
boundRestriction = True
# timeout for z3 to handle a run
timeout = 600
############################################################
#
# some miscellaneous parameters
# which need to confirm whether they are useful
# FIXME: check to see if they are really needed/used
#
################################################################
# how many pixels per feature will be changed
num = 3 #csize - 3
# for conv_solve_prep
# the size of the region to be modified
#if imageSize(originalImage) < 50:
size = 4
maxsize = 32
step = 0
# the error bound for manipulation refinement
# between layers
epsilon = 0.1
############################################################
#
# a parameter to decide whether
# FIXME: check to see if they are really needed/used
#
################################################################
# 1) the stretch is to decide a dimension of the next layer on
# the entire region of the current layer
# 2) the condense is to decide several (depends on refinementRate) dimensions
# of the next layer on a manipulation of a single dimension of the current layer
#regionSynthMethod = "stretch"
regionSynthMethod = "condense"
############################################################
#
# a function to decide how many features to be manipulated in each layer
# this function is put here for its related to the setting of an execution
# FIXME: check to see if they are really needed/used
#
################################################################
# this parameter tells how many elements will be used to
# implement a manipulation from a single element of the previous layer
refinementRate = 1
def getManipulatedFeatureNumber(model,numDimsToMani,layer2Consider):
config = NN.getConfig(model)
# get the type of the current layer
layerType = [ lt for (l,lt) in config if layer2Consider == l ]
if len(layerType) > 0: layerType = layerType[0]
else: print "cannot find the layerType"
if layerType == "Convolution2D":
return numDimsToMani # + 1
elif layerType == "Dense":
return numDimsToMani * refinementRate
else: return numDimsToMani
#######################################################
#
# show detailedInformation or not
# FIXME: check to see if they are really needed/used
#
#######################################################
detailedInformation = False
def nprint(str):
if detailedInformation == True:
print(str)
| xiaoweih/DLV | configuration.py | Python | gpl-3.0 | 6,469 | [
"NEURON"
] | 1d715a837626ecf5d230102d406bfd23ecf9e8cf053215b14cefa915b40756ac |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import unittest
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import gto
from pyscf import df
mol = gto.Mole()
mol.build(
verbose = 0,
atom = '''O 0 0. 0.
1 0 -0.757 0.587
1 0 0.757 0.587''',
basis = 'cc-pvdz',
)
auxmol = df.addons.make_auxmol(mol, 'weigend')
atm, bas, env = gto.conc_env(mol._atm, mol._bas, mol._env,
auxmol._atm, auxmol._bas, auxmol._env)
def tearDownModule():
global mol, auxmol, atm, bas, env
del mol, auxmol, atm, bas, env
class KnownValues(unittest.TestCase):
def test_aux_e2(self):
nao = mol.nao_nr()
naoaux = auxmol.nao_nr()
eri0 = numpy.empty((nao,nao,naoaux))
pi = 0
for i in range(mol.nbas):
pj = 0
for j in range(mol.nbas):
pk = 0
for k in range(mol.nbas, mol.nbas+auxmol.nbas):
shls = (i, j, k)
buf = gto.moleintor.getints_by_shell('int3c2e_sph',
shls, atm, bas, env)
di, dj, dk = buf.shape
eri0[pi:pi+di,pj:pj+dj,pk:pk+dk] = buf
pk += dk
pj += dj
pi += di
j3c = df.incore.aux_e2(mol, auxmol, intor='int3c2e_sph', aosym='s1')
self.assertTrue(numpy.allclose(eri0, j3c.reshape(nao,nao,naoaux)))
self.assertAlmostEqual(lib.finger(j3c), 45.27912877994409, 9)
idx = numpy.tril_indices(nao)
j3c = df.incore.aux_e2(mol, auxmol, intor='int3c2e_sph', aosym='s2ij')
self.assertTrue(numpy.allclose(eri0[idx], j3c))
self.assertAlmostEqual(lib.finger(j3c), 12.407403711205063, 9)
def test_aux_e1(self):
j3c1 = df.incore.aux_e1(mol, auxmol, intor='int3c2e', aosym='s2ij')
j3c2 = df.incore.aux_e2(mol, auxmol, intor='int3c2e', aosym='s2ij')
self.assertAlmostEqual(abs(j3c1.T-j3c2).max(), 0, 12)
j3c1 = df.incore.aux_e1(mol, auxmol, intor='int3c2e', aosym='s1')
j3c2 = df.incore.aux_e2(mol, auxmol, intor='int3c2e', aosym='s1')
self.assertAlmostEqual(abs(j3c1.transpose(2,0,1)-j3c2).max(), 0, 12)
def test_aux_e2_diff_bra_ket(self):
mol1 = mol.copy()
mol1.basis = 'sto3g'
mol1.build(0, 0, verbose=0)
atm1, bas1, env1 = gto.conc_env(atm, bas, env,
mol1._atm, mol1._bas, mol1._env)
ao_loc = gto.moleintor.make_loc(bas1, 'int3c2e_sph')
shls_slice = (0, mol.nbas,
mol.nbas+auxmol.nbas, mol.nbas+auxmol.nbas+mol1.nbas,
mol.nbas, mol.nbas+auxmol.nbas)
j3c = gto.moleintor.getints3c('int3c2e_sph', atm1, bas1, env1, comp=1,
shls_slice=shls_slice, aosym='s1', ao_loc=ao_loc)
nao = mol.nao_nr()
naoj = mol1.nao_nr()
naoaux = auxmol.nao_nr()
eri0 = numpy.empty((nao,naoj,naoaux))
pi = 0
for i in range(mol.nbas):
pj = 0
for j in range(mol.nbas+auxmol.nbas, len(bas1)):
pk = 0
for k in range(mol.nbas, mol.nbas+auxmol.nbas):
shls = (i, j, k)
buf = gto.moleintor.getints_by_shell('int3c2e_sph',
shls, atm1, bas1, env1)
di, dj, dk = buf.shape
eri0[pi:pi+di,pj:pj+dj,pk:pk+dk] = buf
pk += dk
pj += dj
pi += di
self.assertTrue(numpy.allclose(eri0, j3c))
def test_cholesky_eri(self):
j2c = df.incore.fill_2c2e(mol, auxmol)
eri0 = numpy.empty_like(j2c)
pi = 0
for i in range(mol.nbas, len(bas)):
pj = 0
for j in range(mol.nbas, len(bas)):
shls = (i, j)
buf = gto.moleintor.getints_by_shell('int2c2e_sph',
shls, atm, bas, env)
di, dj = buf.shape
eri0[pi:pi+di,pj:pj+dj] = buf
pj += dj
pi += di
self.assertTrue(numpy.allclose(eri0, j2c))
j3c = df.incore.aux_e2(mol, auxmol, intor='int3c2e_sph', aosym='s2ij')
cderi = df.incore.cholesky_eri(mol)
eri0 = numpy.einsum('pi,pk->ik', cderi, cderi)
eri1 = numpy.einsum('ik,kl->il', j3c, numpy.linalg.inv(j2c))
eri1 = numpy.einsum('ip,kp->ik', eri1, j3c)
self.assertTrue(numpy.allclose(eri1, eri0))
cderi1 = df.incore.cholesky_eri_debug(mol)
self.assertAlmostEqual(abs(cderi-cderi1).max(), 0, 9)
def test_r_incore(self):
j3c = df.r_incore.aux_e2(mol, auxmol, intor='int3c2e_spinor', aosym='s1')
nao = mol.nao_2c()
naoaux = auxmol.nao_nr()
j3c = j3c.reshape(nao,nao,naoaux)
eri0 = numpy.empty((nao,nao,naoaux), dtype=numpy.complex128)
pi = 0
for i in range(mol.nbas):
pj = 0
for j in range(mol.nbas):
pk = 0
for k in range(mol.nbas, mol.nbas+auxmol.nbas):
shls = (i, j, k)
buf = gto.moleintor.getints_by_shell('int3c2e_spinor',
shls, atm, bas, env)
di, dj, dk = buf.shape
eri0[pi:pi+di,pj:pj+dj,pk:pk+dk] = buf
pk += dk
pj += dj
pi += di
self.assertTrue(numpy.allclose(eri0, j3c))
eri1 = df.r_incore.aux_e2(mol, auxmol, intor='int3c2e_spinor',
aosym='s2ij')
for i in range(naoaux):
j3c[:,:,i] = lib.unpack_tril(eri1[:,i])
self.assertTrue(numpy.allclose(eri0, j3c))
def test_lindep(self):
cderi0 = df.incore.cholesky_eri(mol, auxmol=auxmol)
auxmol1 = auxmol.copy()
auxmol1.basis = {'O': 'weigend', 'H': ('weigend', 'weigend')}
auxmol1.build(0, 0)
cderi1 = df.incore.cholesky_eri(mol, auxmol=auxmol1)
eri0 = numpy.dot(cderi0.T, cderi0)
eri1 = numpy.dot(cderi1.T, cderi1)
self.assertAlmostEqual(abs(eri0-eri1).max(), 0, 9)
if __name__ == "__main__":
print("Full Tests for df.incore")
unittest.main()
| sunqm/pyscf | pyscf/df/test/test_incore.py | Python | apache-2.0 | 7,048 | [
"PySCF"
] | b4af90b16b6a3aac7e29ba076501eefc139e0721d6b07a0624eb75696e318aa7 |
#!/usr/bin/env python
#
import webapp2
import re
from google.appengine.ext import db
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
import xml.etree.ElementTree as ET
import logging
headers = '''<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>%s - schema.org</title>
<meta name="description" content="Schema.org is a set of extensible schemas that enables webmasters to embed
structured data on their web pages for use by search engines and other applications." />
<link rel="stylesheet" type="text/css" href="/docs/schemaorg.css" />
<link href="/docs/prettify.css" type="text/css" rel="stylesheet" />
<script type="text/javascript" src="/docs/prettify.js"></script>
<script type="text/javascript" src="//ajax.googleapis.com/ajax/libs/jquery/1.5.1/jquery.min.js"></script>
<script type="text/javascript">
$(document).ready(function(){
prettyPrint();
setTimeout(function(){
$(".atn:contains(itemscope), .atn:contains(itemtype), .atn:contains(itemprop), .atn:contains(itemid), .atn:contains(time), .atn:contains(datetime), .atn:contains(datetime), .tag:contains(time) ").addClass(\'new\');
$('.new + .pun + .atv\').addClass(\'curl\');
}, 500);
setTimeout(function(){
$(".atn:contains(property), .atn:contains(typeof) ").addClass(\'new\');
$('.new + .pun + .atv\').addClass(\'curl\');
}, 500);
setTimeout(function() {
$('.ds-selector-tabs .selectors a').click(function() {
var $this = $(this);
var $p = $this.parents('.ds-selector-tabs');
$('.selected', $p).removeClass('selected');
$this.addClass('selected');
$('pre.' + $this.data('selects'), $p).addClass('selected');
});
}, 0);
});
</script>
<style>
.pln { color: #444; } /* plain text */
.tag { color: #515484; } /* div, span, a, etc */
.atn,
.atv { color: #314B17; } /* href, datetime */
.new { color: #660003; } /* itemscope, itemtype, etc,. */
.curl { color: #080; } /* new url */
table.definition-table {
border-spacing: 3px;
border-collapse: separate;
}
</style>
</head>
<body class="%s">
<div id="container">
<div id="intro">
<div id="pageHeader">
<div class="wrapper">
<h1>%s</h1>
<div id="cse-search-form" style="width: 400px;"></div>
<script type="text/javascript" src="//www.google.com/jsapi"></script>
<script type="text/javascript">
google.load(\'search\', \'1\', {language : \'en\', style : google.loader.themes.ESPRESSO});
google.setOnLoadCallback(function() {
var customSearchControl = new google.search.CustomSearchControl(\'013516846811604855281:nj5laplixaa\');
customSearchControl.setResultSetSize(google.search.Search.FILTERED_CSE_RESULTSET);
var options = new google.search.DrawOptions();
options.enableSearchboxOnly("/docs/search_results.html", null, false, \'#\');
customSearchControl.draw(\'cse-search-form\', options);
}, true);
</script>
</div>
</div>
</div>
</div>
<div id="selectionbar">
<div class="wrapper">
<ul>
<li >
<a href="docs/documents.html">Documentation</a></li>
<li class="activelink">
<a href="docs/schemas.html">Schemas</a></li>
<li >
<a href=".">Home</a></li>
</ul>
</div>
</div>
<div style="padding: 14px; float: right;" id="languagebox"></div>
<div id="mainContent" vocab="http://schema.org/" typeof="%s" resource="http://schema.org/%s">
%s
'''
def OutputSchemaorgHeaders(webapp, entry='', is_class=False, ext_mappings='', sitemode="default", sitename="schema.org"):
"""
Generates the headers for class and property pages
* entry = name of the class or property
"""
rdfs_type = 'rdfs:Property'
if is_class:
rdfs_type = 'rdfs:Class'
out = headers % (str(entry), sitemode, sitename, rdfs_type, str(entry), ext_mappings)
webapp.response.write(out)
| Dataliberate/bibschemaorg | headers.py | Python | apache-2.0 | 4,412 | [
"ESPResSo"
] | ee3040954b8d4e8a32a5b7a26167fcd8028fedc743ced3c4dc72298af77ac982 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# networks.py
#
# This file is part of the NNGT project to generate and analyze
# neuronal networks and their activity.
# Copyright (C) 2015-2019 Tanguy Fardet
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Network and SpatialNetwork classes for neuroscience integration """
import numpy as np
import nngt
from nngt.lib import (InvalidArgument, nonstring_container, default_neuron,
default_synapse)
from .graph import Graph
from .spatial_graph import SpatialGraph
# ------- #
# Network #
# ------- #
class Network(Graph):
"""
The detailed class that inherits from :class:`~nngt.Graph` and implements
additional properties to describe various biological functions
and interact with the NEST simulator.
"""
#-------------------------------------------------------------------------#
# Class attributes and methods
__num_networks = 0
__max_id = 0
@classmethod
def num_networks(cls):
''' Returns the number of alive instances. '''
return cls.__num_networks
@classmethod
def from_gids(cls, gids, get_connections=True, get_params=False,
neuron_model=default_neuron, neuron_param=None,
syn_model=default_synapse, syn_param=None, **kwargs):
'''
Generate a network from gids.
Warning
-------
Unless `get_connections` and `get_params` is True, or if your
population is homogeneous and you provide the required information, the
information contained by the network and its `population` attribute
will be erroneous!
To prevent conflicts the :func:`~nngt.Network.to_nest` function is not
available. If you know what you are doing, you should be able to find a
workaround...
Parameters
----------
gids : array-like
Ids of the neurons in NEST or simply user specified ids.
get_params : bool, optional (default: True)
Whether the parameters should be obtained from NEST (can be very
slow).
neuron_model : string, optional (default: None)
Name of the NEST neural model to use when simulating the activity.
neuron_param : dict, optional (default: {})
Dictionary containing the neural parameters; the default value will
make NEST use the default parameters of the model.
syn_model : string, optional (default: 'static_synapse')
NEST synaptic model to use when simulating the activity.
syn_param : dict, optional (default: {})
Dictionary containing the synaptic parameters; the default value
will make NEST use the default parameters of the model.
Returns
-------
net : :class:`~nngt.Network` or subclass
Uniform network of disconnected neurons.
'''
from nngt.lib.errors import not_implemented
if neuron_param is None:
neuron_param = {}
if syn_param is None:
syn_param = {}
# create the population
size = len(gids)
nodes = [i for i in range(size)]
group = nngt.NeuralGroup(
nodes, neuron_type=1, neuron_model=neuron_model,
neuron_param=neuron_param)
pop = nngt.NeuralPop.from_groups([group])
# create the network
net = cls(population=pop, **kwargs)
net.nest_gids = np.array(gids)
net._id_from_nest_gid = {gid: i for i, gid in enumerate(gids)}
net.to_nest = not_implemented
if get_connections:
from nngt.simulation import get_nest_adjacency
converter = {gid: i for i, gid in enumerate(gids)}
mat = get_nest_adjacency(converter)
edges = np.array(mat.nonzero()).T
w = mat.data
net.new_edges(edges, {'weight': w}, check_duplicates=False,
check_self_loops=False, check_existing=False)
if get_params:
raise NotImplementedError('`get_params` not implemented yet.')
return net
@classmethod
def uniform(cls, size, neuron_model=default_neuron,
neuron_param=None, syn_model=default_synapse,
syn_param=None, **kwargs):
'''
Generate a network containing only one type of neurons.
Parameters
----------
size : int
Number of neurons in the network.
neuron_model : string, optional (default: 'aief_cond_alpha')
Name of the NEST neural model to use when simulating the activity.
neuron_param : dict, optional (default: {})
Dictionary containing the neural parameters; the default value will
make NEST use the default parameters of the model.
syn_model : string, optional (default: 'static_synapse')
NEST synaptic model to use when simulating the activity.
syn_param : dict, optional (default: {})
Dictionary containing the synaptic parameters; the default value
will make NEST use the default parameters of the model.
Returns
-------
net : :class:`~nngt.Network` or subclass
Uniform network of disconnected neurons.
'''
if neuron_param is None:
neuron_param = {}
if syn_param is None:
syn_param = {}
pop = nngt.NeuralPop.uniform(
size, neuron_model=neuron_model, neuron_param=neuron_param,
syn_model=syn_model, syn_param=syn_param, parent=None)
net = cls(population=pop, **kwargs)
return net
@classmethod
def exc_and_inhib(cls, size, iratio=0.2, en_model=default_neuron,
en_param=None, in_model=default_neuron, in_param=None,
syn_spec=None, **kwargs):
'''
Generate a network containing a population of two neural groups:
inhibitory and excitatory neurons.
Parameters
----------
size : int
Number of neurons in the network.
i_ratio : double, optional (default: 0.2)
Ratio of inhibitory neurons: :math:`\\frac{N_i}{N_e+N_i}`.
en_model : string, optional (default: 'aeif_cond_alpha')
Nest model for the excitatory neuron.
en_param : dict, optional (default: {})
Dictionary of parameters for the the excitatory neuron.
in_model : string, optional (default: 'aeif_cond_alpha')
Nest model for the inhibitory neuron.
in_param : dict, optional (default: {})
Dictionary of parameters for the the inhibitory neuron.
syn_spec : dict, optional (default: static synapse)
Dictionary containg a directed edge between groups as key and the
associated synaptic parameters for the post-synaptic neurons (i.e.
those of the second group) as value. If provided, all connections
between groups will be set according to the values contained in
`syn_spec`. Valid keys are:
- `('excitatory', 'excitatory')`
- `('excitatory', 'inhibitory')`
- `('inhibitory', 'excitatory')`
- `('inhibitory', 'inhibitory')`
Returns
-------
net : :class:`~nngt.Network` or subclass
Network of disconnected excitatory and inhibitory neurons.
See also
--------
:func:`~nngt.NeuralPop.exc_and_inhib`
'''
pop = nngt.NeuralPop.exc_and_inhib(
size, iratio, en_model, en_param, in_model, in_param,
syn_spec=syn_spec)
net = cls(population=pop, **kwargs)
return net
#-------------------------------------------------------------------------#
# Constructor, destructor and attributes
def __init__(self, name="Network", weighted=True, directed=True,
from_graph=None, population=None, inh_weight_factor=1.,
**kwargs):
'''
Initializes :class:`~nngt.Network` instance.
Parameters
----------
nodes : int, optional (default: 0)
Number of nodes in the graph.
name : string, optional (default: "Graph")
The name of this :class:`Graph` instance.
weighted : bool, optional (default: True)
Whether the graph edges have weight properties.
directed : bool, optional (default: True)
Whether the graph is directed or undirected.
from_graph : :class:`~nngt.core.GraphObject`, optional (default: None)
An optional :class:`~nngt.core.GraphObject` to serve as base.
population : :class:`nngt.NeuralPop`, (default: None)
An object containing the neural groups and their properties:
model(s) to use in NEST to simulate the neurons as well as their
parameters.
inh_weight_factor : float, optional (default: 1.)
Factor to apply to inhibitory synapses, to compensate for example
the strength difference due to timescales between excitatory and
inhibitory synapses.
Returns
-------
self : :class:`~nggt.Network`
'''
self.__id = self.__class__.__max_id
self.__class__.__num_networks += 1
self.__class__.__max_id += 1
assert directed, "Network class cannot be undirected."
if population is None:
raise InvalidArgument("Network needs a NeuralPop to be created")
nodes = population.size
if "nodes" in kwargs.keys():
assert kwargs["nodes"] == nodes, "Incompatible values for " +\
"`nodes` = {} with a `population` of size {}.".format(
kwargs["nodes"], nodes)
del kwargs["nodes"]
if "delays" not in kwargs: # set default delay to 1.
kwargs["delays"] = 1.
super().__init__(nodes=nodes, name=name, weighted=weighted,
directed=directed, from_graph=from_graph,
inh_weight_factor=inh_weight_factor, **kwargs)
self._init_bioproperties(population)
if "shape" in kwargs or "positions" in kwargs:
self.make_spatial(self, shape=kwargs.get("shape", None),
positions=kwargs.get("positions", None))
def __del__(self):
super().__del__()
self.__class__.__num_networks -= 1
@property
def population(self):
'''
:class:`~nngt.NeuralPop` that divides the neurons into groups with
specific properties.
'''
return self._population
@population.setter
def population(self, population):
if issubclass(population.__class__, nngt.NeuralPop):
if self.node_nb() == population.size:
if population.is_valid:
self._population = population
else:
raise AttributeError("NeuralPop is not valid (not all "
"neurons are associated to a group).")
else:
raise AttributeError("Network and NeuralPop must have same "
"number of neurons.")
else:
raise AttributeError("Expecting NeuralPop but received "
"'{}'".format(population.__class__.__name__))
@property
def nest_gids(self):
return self._nest_gids
@nest_gids.setter
def nest_gids(self, gids):
self._nest_gids = gids
for group in self.population.values():
group._nest_gids = gids[group.ids]
def get_edge_types(self):
inhib_neurons = {}
types = np.ones(self.edge_nb())
for g in self._population.values():
if g.neuron_type == -1:
for n in g.ids:
inhib_neurons[n] = None
for i, e in enumerate(self.edges_array):
if e[0] in inhib_neurons:
types[i] = -1
return types
def id_from_nest_gid(self, gids):
'''
Return the ids of the nodes in the :class:`nngt.Network` instance from
the corresponding NEST gids.
Parameters
----------
gids : int or tuple
NEST gids.
Returns
-------
ids : int or tuple
Ids in the network. Same type as the requested `gids` type.
'''
if nonstring_container(gids):
return np.array([self._id_from_nest_gid[gid] for gid in gids],
dtype=int)
else:
return self._id_from_nest_gid[gids]
def to_nest(self, send_only=None, weights=True):
'''
Send the network to NEST.
.. seealso::
:func:`~nngt.simulation.make_nest_network` for parameters
'''
from nngt.simulation import make_nest_network
if nngt._config['with_nest']:
return make_nest_network(
self, send_only=send_only, weights=weights)
else:
raise RuntimeError("NEST is not present.")
#-------------------------------------------------------------------------#
# Init tool
def _init_bioproperties(self, population):
''' Set the population attribute and link each neuron to its group. '''
self._population = None
self._nest_gids = None
self._id_from_nest_gid = None
if not hasattr(self, '_iwf'):
self._iwf = 1.
if issubclass(population.__class__, nngt.NeuralPop):
if population.is_valid or not self.node_nb():
self._population = population
nodes = population.size
# create the delay attribute if necessary
if "delay" not in self.edge_attributes:
self.set_delays()
else:
raise AttributeError("NeuralPop is not valid (not all neurons "
"are associated to a group).")
else:
raise AttributeError("Expected NeuralPop but received "
"{}".format(pop.__class__.__name__))
#-------------------------------------------------------------------------#
# Setter
def set_types(self, edge_type, nodes=None, fraction=None):
raise NotImplementedError("Cannot be used on :class:`~nngt.Network`.")
def get_neuron_type(self, neuron_ids):
'''
Return the type of the neurons (+1 for excitatory, -1 for inhibitory).
Parameters
----------
neuron_ids : int or tuple
NEST gids.
Returns
-------
ids : int or tuple
Ids in the network. Same type as the requested `gids` type.
'''
if is_integer(neuron_ids):
group_name = self._population._neuron_group[neuron_ids]
neuron_type = self._population[group_name].neuron_type
return neuron_type
else:
groups = (self._population._neuron_group[i] for i in neuron_ids)
types = tuple(self._population[gn].neuron_type for gn in groups)
return types
#-------------------------------------------------------------------------#
# Getter
def neuron_properties(self, idx_neuron):
'''
Properties of a neuron in the graph.
Parameters
----------
idx_neuron : int
Index of a neuron in the graph.
Returns
-------
dict of the neuron's properties.
'''
group_name = self._population._neuron_group[idx_neuron]
return self._population[group_name].properties()
# -------------- #
# SpatialNetwork #
# -------------- #
class SpatialNetwork(Network, SpatialGraph):
"""
Class that inherits from :class:`~nngt.Network` and
:class:`~nngt.SpatialGraph` to provide a detailed description of a real
neural network in space, i.e. with positions and biological properties to
interact with NEST.
"""
#-------------------------------------------------------------------------#
# Class attributes
__num_networks = 0
__max_id = 0
#-------------------------------------------------------------------------#
# Constructor, destructor, and attributes
def __init__(self, population, name="SpatialNetwork", weighted=True,
directed=True, shape=None, from_graph=None, positions=None,
**kwargs):
'''
Initialize SpatialNetwork instance
Parameters
----------
name : string, optional (default: "Graph")
The name of this :class:`Graph` instance.
weighted : bool, optional (default: True)
Whether the graph edges have weight properties.
directed : bool, optional (default: True)
Whether the graph is directed or undirected.
shape : :class:`~nngt.geometry.Shape`, optional (default: None)
Shape of the neurons' environment (None leads to a square of side
1 cm)
positions : :class:`numpy.array`, optional (default: None)
Positions of the neurons; if not specified and `nodes` != 0, then
neurons will be reparted at random inside the
:class:`~nngt.geometry.Shape` object of the instance.
population : class:`~nngt.NeuralPop`, optional (default: None)
Population from which the network will be built.
Returns
-------
self : :class:`~nngt.SpatialNetwork`
'''
self.__id = self.__class__.__max_id
self.__class__.__num_networks += 1
self.__class__.__max_id += 1
if population is None:
raise InvalidArgument("Network needs a NeuralPop to be created")
nodes = population.size
super().__init__(
nodes=nodes, name=name, weighted=weighted, directed=directed,
shape=shape, positions=positions, population=population,
from_graph=from_graph, **kwargs)
def __del__ (self):
super().__del__()
self.__class__.__num_networks -= 1
#-------------------------------------------------------------------------#
# Setter
def set_types(self, syn_type, nodes=None, fraction=None):
raise NotImplementedError("Cannot be used on "
":class:`~nngt.SpatialNetwork`.")
| Silmathoron/NNGT | nngt/core/networks.py | Python | gpl-3.0 | 19,108 | [
"NEURON"
] | 9868792b3fc60a33f1e7abcb97d734e097c7cada6867ba1898d1955697007ee6 |
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Tuple
from kivy.uix.label import Label
from kivy.graphics import Color
from kivy.graphics import Rectangle
from kivy.properties import ListProperty
from ORCA.widgets.core.ButtonBehaviour import cOrcaButtonBehaviour
from ORCA.utils.RemoveNoClassArgs import RemoveNoClassArgs
__all__ = ['cLabel']
# noinspection PyUnusedLocal
class cLabel(cOrcaButtonBehaviour,Label):
""" base class for a label """
# noinspection PyArgumentList
background_color = ListProperty([0, 0, 0, 0])
def __init__(self, **kwargs):
Label.__init__(self,**RemoveNoClassArgs(dInArgs=kwargs,oObject=Label))
cOrcaButtonBehaviour.__init__(self,**kwargs)
if 'background_color' in kwargs:
self.background_color=kwargs['background_color']
if not self.background_color==[0, 0, 0, 0]:
with self.canvas.before:
Color(self.background_color[0],self.background_color[1], self.background_color[2],self.background_color[3])
self.rect_bg = Rectangle(size=self.size,pos=self.pos)
self.bind(pos=self.update_graphics_pos,size=self.update_graphics_size)
def update_graphics_pos(self, instance, value:Tuple) -> None:
""" Update the label after position change """
self.rect_bg.pos = value
def update_graphics_size(self, instance, value:Tuple) -> None:
""" Update the label after size change """
self.rect_bg.size = value
def on_touch_up(self, touch) -> bool:
""" handles the touch event """
if cOrcaButtonBehaviour.on_touch_up(self,touch):
Label.on_touch_up(self,touch)
return True
else:
return Label.on_touch_up(self,touch)
| thica/ORCA-Remote | src/ORCA/widgets/core/Label.py | Python | gpl-3.0 | 2,739 | [
"ORCA"
] | d5dde4c25f6eab9512ec6a263bc7ee3cf41d710dd86c2bf152a5e48956b64e4a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.